gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
from django.db import models
from south.db import db
class Migration:
def forwards(self, orm):
# Adding model 'PayPalIPN'
db.create_table('paypal_ipn', (
('id', models.AutoField(primary_key=True)),
('business', models.CharField(max_length=127, blank=True)),
('charset', models.CharField(max_length=32, blank=True)),
('custom', models.CharField(max_length=255, blank=True)),
('notify_version', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('parent_txn_id', models.CharField("Parent Transaction ID", max_length=19, blank=True)),
('receiver_email', models.EmailField(max_length=127, blank=True)),
('receiver_id', models.CharField(max_length=127, blank=True)),
('residence_country', models.CharField(max_length=2, blank=True)),
('test_ipn', models.BooleanField(default=False, blank=True)),
('txn_id', models.CharField("Transaction ID", max_length=19, blank=True)),
('txn_type', models.CharField("Transaction Type", max_length=128, blank=True)),
('verify_sign', models.CharField(max_length=255, blank=True)),
('address_country', models.CharField(max_length=64, blank=True)),
('address_city', models.CharField(max_length=40, blank=True)),
('address_country_code', models.CharField(max_length=64, blank=True)),
('address_name', models.CharField(max_length=128, blank=True)),
('address_state', models.CharField(max_length=40, blank=True)),
('address_status', models.CharField(max_length=11, blank=True)),
('address_street', models.CharField(max_length=200, blank=True)),
('address_zip', models.CharField(max_length=20, blank=True)),
('contact_phone', models.CharField(max_length=20, blank=True)),
('first_name', models.CharField(max_length=64, blank=True)),
('last_name', models.CharField(max_length=64, blank=True)),
('payer_business_name', models.CharField(max_length=127, blank=True)),
('payer_email', models.CharField(max_length=127, blank=True)),
('payer_id', models.CharField(max_length=13, blank=True)),
('auth_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('auth_exp', models.CharField(max_length=28, blank=True)),
('auth_id', models.CharField(max_length=19, blank=True)),
('auth_status', models.CharField(max_length=9, blank=True)),
('exchange_rate', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=16, blank=True)),
('invoice', models.CharField(max_length=127, blank=True)),
('item_name', models.CharField(max_length=127, blank=True)),
('item_number', models.CharField(max_length=127, blank=True)),
('mc_currency', models.CharField(default='USD', max_length=32, blank=True)),
('mc_fee', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_gross', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_handling', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_shipping', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('memo', models.CharField(max_length=255, blank=True)),
('num_cart_items', models.IntegerField(default=0, null=True, blank=True)),
('option_name1', models.CharField(max_length=64, blank=True)),
('option_name2', models.CharField(max_length=64, blank=True)),
('payer_status', models.CharField(max_length=10, blank=True)),
('payment_date', models.DateTimeField(null=True, blank=True)),
('payment_gross', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('payment_status', models.CharField(max_length=9, blank=True)),
('payment_type', models.CharField(max_length=7, blank=True)),
('pending_reason', models.CharField(max_length=14, blank=True)),
('protection_eligibility', models.CharField(max_length=32, blank=True)),
('quantity', models.IntegerField(default=1, null=True, blank=True)),
('reason_code', models.CharField(max_length=15, blank=True)),
(
'remaining_settle', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('settle_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('settle_currency', models.CharField(max_length=32, blank=True)),
('shipping', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('shipping_method', models.CharField(max_length=255, blank=True)),
('tax', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('transaction_entity', models.CharField(max_length=7, blank=True)),
('auction_buyer_id', models.CharField(max_length=64, blank=True)),
('auction_closing_date', models.DateTimeField(null=True, blank=True)),
('auction_multi_item', models.IntegerField(default=0, null=True, blank=True)),
('for_auction', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
(
'amount_per_cycle', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('initial_payment_amount',
models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('next_payment_date', models.DateTimeField(null=True, blank=True)),
('outstanding_balance',
models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('payment_cycle', models.CharField(max_length=32, blank=True)),
('period_type', models.CharField(max_length=32, blank=True)),
('product_name', models.CharField(max_length=128, blank=True)),
('product_type', models.CharField(max_length=128, blank=True)),
('profile_status', models.CharField(max_length=32, blank=True)),
('recurring_payment_id', models.CharField(max_length=128, blank=True)),
('rp_invoice_id', models.CharField(max_length=127, blank=True)),
('time_created', models.DateTimeField(null=True, blank=True)),
('amount1', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount2', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount3', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount1', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount2', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount3', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('password', models.CharField(max_length=24, blank=True)),
('period1', models.CharField(max_length=32, blank=True)),
('period2', models.CharField(max_length=32, blank=True)),
('period3', models.CharField(max_length=32, blank=True)),
('reattempt', models.CharField(max_length=1, blank=True)),
('recur_times', models.IntegerField(default=0, null=True, blank=True)),
('recurring', models.CharField(max_length=1, blank=True)),
('retry_at', models.DateTimeField(null=True, blank=True)),
('subscr_date', models.DateTimeField(null=True, blank=True)),
('subscr_effective', models.DateTimeField(null=True, blank=True)),
('subscr_id', models.CharField(max_length=19, blank=True)),
('username', models.CharField(max_length=64, blank=True)),
('case_creation_date', models.DateTimeField(null=True, blank=True)),
('case_id', models.CharField(max_length=14, blank=True)),
('case_type', models.CharField(max_length=24, blank=True)),
('receipt_id', models.CharField(max_length=64, blank=True)),
('currency_code', models.CharField(default='USD', max_length=32, blank=True)),
('handling_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('transaction_subject', models.CharField(max_length=255, blank=True)),
('ipaddress', models.IPAddressField(blank=True)),
('flag', models.BooleanField(default=False, blank=True)),
('flag_code', models.CharField(max_length=16, blank=True)),
('flag_info', models.TextField(blank=True)),
('query', models.TextField(blank=True)),
('response', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('from_view', models.CharField(max_length=6, null=True, blank=True)),
))
db.send_create_signal('ipn', ['PayPalIPN'])
def backwards(self, orm):
# Deleting model 'PayPalIPN'
db.delete_table('paypal_ipn')
models = {
'ipn.paypalipn': {
'Meta': {'db_table': '"paypal_ipn"'},
'address_city': ('models.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('models.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('models.CharField', [], {'max_length': '11', 'blank': 'True'}),
'address_street': ('models.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('models.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'auction_buyer_id': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('models.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'auth_exp': ('models.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('models.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('models.CharField', [], {'max_length': '9', 'blank': 'True'}),
'business': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('models.CharField', [], {'max_length': '14', 'blank': 'True'}),
'case_type': ('models.CharField', [], {'max_length': '24', 'blank': 'True'}),
'charset': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'contact_phone': ('models.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('models.DateTimeField', [], {'auto_now_add': 'True'}),
'currency_code': ('models.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exchange_rate': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16',
'blank': 'True'}),
'first_name': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'flag_code': ('models.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('models.TextField', [], {'blank': 'True'}),
'for_auction': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'from_view': ('models.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'invoice': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('models.IPAddressField', [], {'blank': 'True'}),
'item_name': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'mc_amount2': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'mc_amount3': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'mc_currency': ('models.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'mc_shipping': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'memo': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'next_payment_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'num_cart_items': ('models.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'parent_txn_id': ('models.CharField', ['"Parent Transaction ID"'], {'max_length': '19', 'blank': 'True'}),
'password': ('models.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('models.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('models.CharField', [], {'max_length': '10', 'blank': 'True'}),
'payment_cycle': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'payment_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'payment_status': ('models.CharField', [], {'max_length': '9', 'blank': 'True'}),
'payment_type': ('models.CharField', [], {'max_length': '7', 'blank': 'True'}),
'pending_reason': ('models.CharField', [], {'max_length': '14', 'blank': 'True'}),
'period1': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period2': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period3': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period_type': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'product_name': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'product_type': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'profile_status': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'protection_eligibility': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'quantity': ('models.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('models.TextField', [], {'blank': 'True'}),
'reason_code': ('models.CharField', [], {'max_length': '15', 'blank': 'True'}),
'reattempt': ('models.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'receiver_email': ('models.EmailField', [], {'max_length': '127', 'blank': 'True'}),
'receiver_id': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'recur_times': ('models.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('models.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'remaining_settle': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'residence_country': ('models.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('models.TextField', [], {'blank': 'True'}),
'retry_at': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2',
'blank': 'True'}),
'settle_currency': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subscr_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('models.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('models.DecimalField', [],
{'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'time_created': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('models.CharField', [], {'max_length': '7', 'blank': 'True'}),
'transaction_subject': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'txn_id': ('models.CharField', ['"Transaction ID"'], {'max_length': '19', 'blank': 'True'}),
'txn_type': ('models.CharField', ['"Transaction Type"'], {'max_length': '128', 'blank': 'True'}),
'updated_at': ('models.DateTimeField', [], {'auto_now': 'True'}),
'username': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('models.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['ipn']
|
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import (
TYPE_CHECKING,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from matrix_common.versionstring import get_distribution_version_string
from typing_extensions import Literal
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.http.servlet import (
parse_boolean_from_args,
parse_integer_from_args,
parse_string_from_args,
parse_strings_from_args,
)
from synapse.types import JsonDict
from synapse.util.ratelimitutils import FederationRateLimiter
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
issue_8631_logger = logging.getLogger("synapse.8631_debug")
class BaseFederationServerServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a federation server handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_federation_server()
class FederationSendServlet(BaseFederationServerServlet):
PATH = "/send/(?P<transaction_id>[^/]*)/?"
# We ratelimit manually in the handler as we queue up the requests and we
# don't want to fill up the ratelimiter with blocked requests.
RATELIMIT = False
# This is when someone is trying to send us a bunch of data.
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
transaction_id: str,
) -> Tuple[int, JsonDict]:
"""Called on PUT /send/<transaction_id>/
Args:
transaction_id: The transaction_id associated with this request. This
is *not* None.
Returns:
Tuple of `(code, response)`, where
`response` is a python dict to be converted into JSON that is
used as the response body.
"""
# Parse the request
try:
transaction_data = content
logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
logger.info(
"Received txn %s from %s. (PDUs: %d, EDUs: %d)",
transaction_id,
origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
)
if issue_8631_logger.isEnabledFor(logging.DEBUG):
DEVICE_UPDATE_EDUS = ["m.device_list_update", "m.signing_key_update"]
device_list_updates = [
edu.get("content", {})
for edu in transaction_data.get("edus", [])
if edu.get("edu_type") in DEVICE_UPDATE_EDUS
]
if device_list_updates:
issue_8631_logger.debug(
"received transaction [%s] including device list updates: %s",
transaction_id,
device_list_updates,
)
except Exception as e:
logger.exception(e)
return 400, {"error": "Invalid transaction"}
code, response = await self.handler.on_incoming_transaction(
origin, transaction_id, self.server_name, transaction_data
)
return code, response
class FederationEventServlet(BaseFederationServerServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
# This is when someone asks for a data item for a given server data_id pair.
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
event_id: str,
) -> Tuple[int, Union[JsonDict, str]]:
return await self.handler.on_pdu_request(origin, event_id)
class FederationStateV1Servlet(BaseFederationServerServlet):
PATH = "/state/(?P<room_id>[^/]*)/?"
# This is when someone asks for all data for a given room.
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_room_state_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=False),
)
class FederationStateIdsServlet(BaseFederationServerServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_state_ids_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=True),
)
class FederationBackfillServlet(BaseFederationServerServlet):
PATH = "/backfill/(?P<room_id>[^/]*)/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
versions = [x.decode("ascii") for x in query[b"v"]]
limit = parse_integer_from_args(query, "limit", None)
if not limit:
return 400, {"error": "Did not include limit param"}
return await self.handler.on_backfill_request(origin, room_id, versions, limit)
class FederationTimestampLookupServlet(BaseFederationServerServlet):
"""
API endpoint to fetch the `event_id` of the closest event to the given
timestamp (`ts` query parameter) in the given direction (`dir` query
parameter).
Useful for other homeservers when they're unable to find an event locally.
`ts` is a timestamp in milliseconds where we will find the closest event in
the given direction.
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp.
GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
{
"event_id": ...
}
"""
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
timestamp = parse_integer_from_args(query, "ts", required=True)
direction = parse_string_from_args(
query, "dir", default="f", allowed_values=["f", "b"], required=True
)
return await self.handler.on_timestamp_to_event_request(
origin, room_id, timestamp, direction
)
class FederationQueryServlet(BaseFederationServerServlet):
PATH = "/query/(?P<query_type>[^/]*)"
# This is when we receive a server-server Query
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
query_type: str,
) -> Tuple[int, JsonDict]:
args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
args["origin"] = origin
return await self.handler.on_query_request(query_type, args)
class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
"""
Args:
origin: The authenticated server_name of the calling server
content: (GETs don't have bodies)
query: Query params from the request.
**kwargs: the dict mapping keys to path components as specified in
the path match regexp.
Returns:
Tuple of (response code, response object)
"""
supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
if supported_versions is None:
supported_versions = ["1"]
result = await self.handler.on_make_join_request(
origin, room_id, user_id, supported_versions=supported_versions
)
return 200, result
class FederationMakeLeaveServlet(BaseFederationServerServlet):
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_make_leave_request(origin, room_id, user_id)
return 200, result
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
result = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, (200, result)
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, result
class FederationMakeKnockServlet(BaseFederationServerServlet):
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
# Retrieve the room versions the remote homeserver claims to support
supported_versions = parse_strings_from_args(
query, "ver", required=True, encoding="utf-8"
)
result = await self.handler.on_make_knock_request(
origin, room_id, user_id, supported_versions=supported_versions
)
return 200, result
class FederationV1SendKnockServlet(BaseFederationServerServlet):
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_send_knock_request(origin, content, room_id)
return 200, result
class FederationEventAuthServlet(BaseFederationServerServlet):
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_event_auth(origin, room_id, event_id)
class FederationV1SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
# TODO(paul): assert that event_id parsed from path actually
# match those given in content
result = await self.handler.on_send_join_request(origin, content, room_id)
return 200, (200, result)
class FederationV2SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._msc3706_enabled = hs.config.experimental.msc3706_enabled
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
# TODO(paul): assert that event_id parsed from path actually
# match those given in content
partial_state = False
if self._msc3706_enabled:
partial_state = parse_boolean_from_args(
query, "org.matrix.msc3706.partial_state", default=False
)
result = await self.handler.on_send_join_request(
origin, content, room_id, caller_supports_partial_state=partial_state
)
return 200, result
class FederationV1InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
# We don't get a room version, so we have to assume its EITHER v1 or
# v2. This is "fine" as the only difference between V1 and V2 is the
# state resolution algorithm, and we don't use that for processing
# invites
result = await self.handler.on_invite_request(
origin, content, room_version_id=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
# due to a historical bug.
return 200, (200, result)
class FederationV2InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
# TODO(paul): assert that room_id/event_id parsed from path actually
# match those given in content
room_version = content["room_version"]
event = content["event"]
invite_room_state = content["invite_room_state"]
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
result = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
return 200, result
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
await self.handler.on_exchange_third_party_invite_request(content)
return 200, {}
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
PATH = "/user/keys/query"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
return await self.handler.on_query_client_keys(origin, content)
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
user_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_query_user_devices(origin, user_id)
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
PATH = "/user/keys/claim"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
response = await self.handler.on_claim_client_keys(origin, content)
return 200, response
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
limit = int(content.get("limit", 10))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
result = await self.handler.on_get_missing_events(
origin,
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
return 200, result
class On3pidBindServlet(BaseFederationServerServlet):
PATH = "/3pid/onbind"
REQUIRE_AUTH = False
async def on_POST(
self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
if "invites" in content:
last_exception = None
for invite in content["invites"]:
try:
if "signed" not in invite or "token" not in invite["signed"]:
message = (
"Rejecting received notification of third-"
"party invite without signed: %s" % (invite,)
)
logger.info(message)
raise SynapseError(400, message)
await self.handler.exchange_third_party_invite(
invite["sender"],
invite["mxid"],
invite["room_id"],
invite["signed"],
)
except Exception as e:
last_exception = e
if last_exception:
raise last_exception
return 200, {}
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
REQUIRE_AUTH = False
async def on_GET(
self,
origin: Optional[str],
content: Literal[None],
query: Dict[bytes, List[bytes]],
) -> Tuple[int, JsonDict]:
return (
200,
{
"server": {
"name": "Synapse",
"version": get_distribution_version_string("matrix-synapse"),
}
},
)
class FederationRoomHierarchyServlet(BaseFederationServlet):
PATH = "/hierarchy/(?P<room_id>[^/]*)"
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_room_summary_handler()
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
return 200, await self.handler.get_federation_hierarchy(
origin, room_id, suggested_only
)
class FederationRoomHierarchyUnstableServlet(FederationRoomHierarchyServlet):
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
class RoomComplexityServlet(BaseFederationServlet):
"""
Indicates to other servers how complex (and therefore likely
resource-intensive) a public room this server knows about is.
"""
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._store = self.hs.get_datastores().main
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
is_public = await self._store.is_room_world_readable_or_publicly_joinable(
room_id
)
if not is_public:
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
complexity = await self._store.get_room_complexity(room_id)
return 200, complexity
class FederationAccountStatusServlet(BaseFederationServerServlet):
PATH = "/query/account_status"
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3720"
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._account_handler = hs.get_account_handler()
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
if "user_ids" not in content:
raise SynapseError(
400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM
)
statuses, failures = await self._account_handler.get_account_statuses(
content["user_ids"],
allow_remote=False,
)
return 200, {"account_statuses": statuses, "failures": failures}
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet,
FederationEventServlet,
FederationStateV1Servlet,
FederationStateIdsServlet,
FederationBackfillServlet,
FederationTimestampLookupServlet,
FederationQueryServlet,
FederationMakeJoinServlet,
FederationMakeLeaveServlet,
FederationEventServlet,
FederationV1SendJoinServlet,
FederationV2SendJoinServlet,
FederationV1SendLeaveServlet,
FederationV2SendLeaveServlet,
FederationV1InviteServlet,
FederationV2InviteServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,
FederationClientKeysQueryServlet,
FederationUserDevicesQueryServlet,
FederationClientKeysClaimServlet,
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,
RoomComplexityServlet,
FederationRoomHierarchyServlet,
FederationRoomHierarchyUnstableServlet,
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
FederationAccountStatusServlet,
)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
import tempfile
import textwrap
import unittest
from collections import namedtuple
from contextlib import contextmanager
from twitter.common.collections import maybe_list
from pants.base.revision import Revision
from pants.java.distribution.distribution import Distribution, DistributionLocator
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import chmod_plus_x, safe_open, safe_rmtree, touch
from pants_test.subsystem.subsystem_util import subsystem_instance
EXE = namedtuple('Exe', ['relpath', 'contents'])
def exe(relpath, version=None):
contents = textwrap.dedent("""
#!/bin/sh
if [ $# -ne 3 ]; then
# Sanity check a classpath switch with a value plus the classname for main
echo "Expected 3 arguments, got $#: $@" >&2
exit 1
fi
echo "java.home=${{DIST_ROOT}}"
{}
""".format('echo "java.version={}"'.format(version) if version else '')).strip()
return EXE(relpath, contents=contents)
@contextmanager
def distribution(files=None, executables=None, java_home=None):
with subsystem_instance(DistributionLocator):
with temporary_dir() as dist_root:
with environment_as(DIST_ROOT=os.path.join(dist_root, java_home) if java_home else dist_root):
for f in maybe_list(files or ()):
touch(os.path.join(dist_root, f))
for executable in maybe_list(executables or (), expected_type=EXE):
path = os.path.join(dist_root, executable.relpath)
with safe_open(path, 'w') as fp:
fp.write(executable.contents or '')
chmod_plus_x(path)
yield dist_root
@contextmanager
def env(**kwargs):
environment = dict(JDK_HOME=None, JAVA_HOME=None, PATH=None)
environment.update(**kwargs)
with environment_as(**environment):
yield
class DistributionValidationTest(unittest.TestCase):
def test_validate_basic(self):
with distribution() as dist_root:
with self.assertRaises(ValueError):
Distribution(bin_path=os.path.join(dist_root, 'bin')).validate()
with distribution(files='bin/java') as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin')).validate()
with distribution(executables=exe('bin/java')) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin')).validate()
def test_validate_jre(self):
with distribution(executables=exe('bin/java')) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin'), jdk=False).validate()
def test_validate_jdk(self):
with distribution(executables=exe('bin/java')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin'), jdk=True).validate()
with distribution(executables=[exe('bin/java'), exe('bin/javac')]) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin'), jdk=True).validate()
with distribution(executables=[exe('jre/bin/java'), exe('bin/javac')],
java_home='jre') as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'jre/bin'), jdk=True).validate()
def test_validate_version(self):
with distribution(executables=exe('bin/java', '1.7.0_25')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin'), minimum_version='1.7.0_45').validate()
with distribution(executables=exe('bin/java', '1.8.0_1')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin'), maximum_version='1.8').validate()
with distribution(executables=exe('bin/java', '1.7.0_25')) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin'), minimum_version='1.7.0_25').validate()
Distribution(bin_path=os.path.join(dist_root, 'bin'),
minimum_version=Revision.lenient('1.6')).validate()
Distribution(bin_path=os.path.join(dist_root, 'bin'),
minimum_version='1.7.0_25',
maximum_version='1.7.999').validate()
def test_validated_binary(self):
with distribution(files='bin/jar', executables=exe('bin/java')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin')).binary('jar')
with distribution(executables=[exe('bin/java'), exe('bin/jar')]) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin')).binary('jar')
with distribution(executables=[exe('jre/bin/java'), exe('bin/jar')],
java_home='jre') as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'jre', 'bin')).binary('jar')
with distribution(executables=[exe('jre/bin/java'), exe('bin/jar'), exe('bin/javac')],
java_home='jre') as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'jre', 'bin')).binary('jar')
with distribution(executables=[exe('jre/bin/java'), exe('jre/bin/java_vm'), exe('bin/javac')],
java_home='jre') as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'jre', 'bin')).binary('java_vm')
def test_validated_library(self):
with distribution(executables=exe('bin/java')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin')).find_libs(['tools.jar'])
with distribution(executables=exe('bin/java'), files='lib/tools.jar') as dist_root:
dist = Distribution(bin_path=os.path.join(dist_root, 'bin'))
self.assertEqual([os.path.join(dist_root, 'lib', 'tools.jar')],
dist.find_libs(['tools.jar']))
with distribution(executables=[exe('jre/bin/java'), exe('bin/javac')],
files=['lib/tools.jar', 'jre/lib/rt.jar'],
java_home='jre') as dist_root:
dist = Distribution(bin_path=os.path.join(dist_root, 'jre/bin'))
self.assertEqual([os.path.join(dist_root, 'lib', 'tools.jar'),
os.path.join(dist_root, 'jre', 'lib', 'rt.jar')],
dist.find_libs(['tools.jar', 'rt.jar']))
class BaseDistributionLocationTest(unittest.TestCase):
def make_tmp_dir(self):
tmpdir = tempfile.mkdtemp()
self.addCleanup(safe_rmtree, tmpdir)
return tmpdir
def set_up_no_linux_discovery(self):
orig_java_dist_dir = DistributionLocator._JAVA_DIST_DIR
def restore_java_dist_dir():
DistributionLocator._JAVA_DIST_DIR = orig_java_dist_dir
DistributionLocator._JAVA_DIST_DIR = self.make_tmp_dir()
self.addCleanup(restore_java_dist_dir)
def set_up_no_osx_discovery(self):
osx_java_home_exe = DistributionLocator._OSX_JAVA_HOME_EXE
def restore_osx_java_home_exe():
DistributionLocator._OSX_JAVA_HOME_EXE = osx_java_home_exe
DistributionLocator._OSX_JAVA_HOME_EXE = os.path.join(self.make_tmp_dir(), 'java_home')
self.addCleanup(restore_osx_java_home_exe)
class BaseDistributionLocationEnvOnlyTest(BaseDistributionLocationTest):
def setUp(self):
self.set_up_no_linux_discovery()
self.set_up_no_osx_discovery()
class DistributionEnvLocationTest(BaseDistributionLocationEnvOnlyTest):
def test_locate_none(self):
with env():
with self.assertRaises(Distribution.Error):
with subsystem_instance(DistributionLocator):
DistributionLocator.locate()
def test_locate_java_not_executable(self):
with distribution(files='bin/java') as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.locate()
def test_locate_jdk_is_jre(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.locate(jdk=True)
def test_locate_version_to_low(self):
with distribution(executables=exe('bin/java', '1.6.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.locate(minimum_version='1.7.0')
def test_locate_version_to_high(self):
with distribution(executables=exe('bin/java', '1.8.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.locate(maximum_version='1.7.999')
def test_locate_invalid_jdk_home(self):
with distribution(executables=exe('java')) as dist_root:
with env(JDK_HOME=dist_root):
with self.assertRaises(Distribution.Error):
DistributionLocator.locate()
def test_locate_invalid_java_home(self):
with distribution(executables=exe('java')) as dist_root:
with env(JAVA_HOME=dist_root):
with self.assertRaises(Distribution.Error):
DistributionLocator.locate()
def test_locate_jre_by_path(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.locate()
def test_locate_jdk_by_path(self):
with distribution(executables=[exe('bin/java'), exe('bin/javac')]) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.locate(jdk=True)
def test_locate_jdk_via_jre_path(self):
with distribution(executables=[exe('jre/bin/java'), exe('bin/javac')],
java_home='jre') as dist_root:
with env(PATH=os.path.join(dist_root, 'jre', 'bin')):
DistributionLocator.locate(jdk=True)
def test_locate_version_greater_then_or_equal(self):
with distribution(executables=exe('bin/java', '1.7.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.locate(minimum_version='1.6.0')
def test_locate_version_less_then_or_equal(self):
with distribution(executables=exe('bin/java', '1.7.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.locate(maximum_version='1.7.999')
def test_locate_version_within_range(self):
with distribution(executables=exe('bin/java', '1.7.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.locate(minimum_version='1.6.0', maximum_version='1.7.999')
def test_locate_via_jdk_home(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(JDK_HOME=dist_root):
DistributionLocator.locate()
def test_locate_via_java_home(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(JAVA_HOME=dist_root):
DistributionLocator.locate()
class DistributionLinuxLocationTest(BaseDistributionLocationTest):
def setUp(self):
self.set_up_no_osx_discovery()
@contextmanager
def java_dist_dir(self):
with distribution(executables=exe('bin/java', version='1')) as jdk1_home:
with distribution(executables=exe('bin/java', version='2')) as jdk2_home:
with temporary_dir() as java_dist_dir:
jdk1_home_link = os.path.join(java_dist_dir, 'jdk1_home')
jdk2_home_link = os.path.join(java_dist_dir, 'jdk2_home')
os.symlink(jdk1_home, jdk1_home_link)
os.symlink(jdk2_home, jdk2_home_link)
original_java_dist_dir = DistributionLocator._JAVA_DIST_DIR
DistributionLocator._JAVA_DIST_DIR = java_dist_dir
try:
yield jdk1_home_link, jdk2_home_link
finally:
DistributionLocator._JAVA_DIST_DIR = original_java_dist_dir
def test_locate_jdk1(self):
with env():
with self.java_dist_dir() as (jdk1_home, _):
dist = DistributionLocator.locate(maximum_version='1')
self.assertEqual(jdk1_home, dist.home)
def test_locate_jdk2(self):
with env():
with self.java_dist_dir() as (_, jdk2_home):
dist = DistributionLocator.locate(minimum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_trumps_path(self):
with self.java_dist_dir() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as path_jdk:
with env(PATH=os.path.join(path_jdk, 'bin')):
dist = DistributionLocator.locate(minimum_version='2')
self.assertEqual(jdk2_home, dist.home)
dist = DistributionLocator.locate(minimum_version='3')
self.assertEqual(path_jdk, dist.home)
def test_locate_jdk_home_trumps(self):
with self.java_dist_dir() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as jdk_home:
with env(JDK_HOME=jdk_home):
dist = DistributionLocator.locate()
self.assertEqual(jdk_home, dist.home)
dist = DistributionLocator.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = DistributionLocator.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_java_home_trumps(self):
with self.java_dist_dir() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as java_home:
with env(JAVA_HOME=java_home):
dist = DistributionLocator.locate()
self.assertEqual(java_home, dist.home)
dist = DistributionLocator.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = DistributionLocator.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
class DistributionOSXLocationTest(BaseDistributionLocationTest):
def setUp(self):
self.set_up_no_linux_discovery()
@contextmanager
def java_home_exe(self):
with distribution(executables=exe('bin/java', version='1')) as jdk1_home:
with distribution(executables=exe('bin/java', version='2')) as jdk2_home:
with temporary_dir() as tmpdir:
osx_java_home_exe = os.path.join(tmpdir, 'java_home')
with safe_open(osx_java_home_exe, 'w') as fp:
fp.write(textwrap.dedent("""
#!/bin/sh
echo '<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<array>
<dict>
<key>JVMHomePath</key>
<string>{jdk1_home}</string>
</dict>
<dict>
<key>JVMHomePath</key>
<string>{jdk2_home}</string>
</dict>
</array>
</plist>
'
""".format(jdk1_home=jdk1_home, jdk2_home=jdk2_home)).strip())
chmod_plus_x(osx_java_home_exe)
original_osx_java_home_exe = DistributionLocator._OSX_JAVA_HOME_EXE
DistributionLocator._OSX_JAVA_HOME_EXE = osx_java_home_exe
try:
yield jdk1_home, jdk2_home
finally:
DistributionLocator._OSX_JAVA_HOME_EXE = original_osx_java_home_exe
def test_locate_jdk1(self):
with env():
with self.java_home_exe() as (jdk1_home, _):
dist = DistributionLocator.locate()
self.assertEqual(jdk1_home, dist.home)
def test_locate_jdk2(self):
with env():
with self.java_home_exe() as (_, jdk2_home):
dist = DistributionLocator.locate(minimum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_trumps_path(self):
with self.java_home_exe() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as path_jdk:
with env(PATH=os.path.join(path_jdk, 'bin')):
dist = DistributionLocator.locate()
self.assertEqual(jdk1_home, dist.home)
dist = DistributionLocator.locate(minimum_version='3')
self.assertEqual(path_jdk, dist.home)
def test_locate_jdk_home_trumps(self):
with self.java_home_exe() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as jdk_home:
with env(JDK_HOME=jdk_home):
dist = DistributionLocator.locate()
self.assertEqual(jdk_home, dist.home)
dist = DistributionLocator.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = DistributionLocator.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_java_home_trumps(self):
with self.java_home_exe() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as java_home:
with env(JAVA_HOME=java_home):
dist = DistributionLocator.locate()
self.assertEqual(java_home, dist.home)
dist = DistributionLocator.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = DistributionLocator.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
class DistributionCachedTest(BaseDistributionLocationEnvOnlyTest):
def setUp(self):
super(DistributionCachedTest, self).setUp()
# Save local cache and then flush so tests get a clean environment.
local_cache = DistributionLocator._CACHE
def restore_cache():
DistributionLocator._CACHE = local_cache
DistributionLocator._CACHE = {}
self.addCleanup(restore_cache)
def test_cached_good_min(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.cached(minimum_version='1.7.0_25')
def test_cached_good_max(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.cached(maximum_version='1.7.0_50')
def test_cached_good_bounds(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
DistributionLocator.cached(minimum_version='1.6.0_35', maximum_version='1.7.0_55')
def test_cached_too_low(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.cached(minimum_version='1.7.0_40')
def test_cached_too_high(self):
with distribution(executables=exe('bin/java', '1.7.0_83')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.cached(maximum_version='1.7.0_55')
def test_cached_low_fault(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.cached(minimum_version='1.7.0_35', maximum_version='1.7.0_55')
def test_cached_high_fault(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.cached(minimum_version='1.6.0_00', maximum_version='1.6.0_50')
def test_cached_conflicting(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
DistributionLocator.cached(minimum_version='1.7.0_00', maximum_version='1.6.0_50')
def test_cached_bad_input(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(ValueError):
DistributionLocator.cached(minimum_version=1.7, maximum_version=1.8)
def exe_path(name):
process = subprocess.Popen(['which', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = process.communicate()
if process.returncode != 0:
return None
path = stdout.strip()
return path if os.path.exists(path) and os.access(path, os.X_OK) else None
class LiveDistributionTest(unittest.TestCase):
JAVA = exe_path('java')
JAVAC = exe_path('javac')
@unittest.skipIf(not JAVA, reason='No java executable on the PATH.')
def test_validate_live(self):
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='999.9.9').validate()
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.dirname(self.JAVA), maximum_version='0.0.1').validate()
Distribution(bin_path=os.path.dirname(self.JAVA)).validate()
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='1.3.1').validate()
Distribution(bin_path=os.path.dirname(self.JAVA), maximum_version='999.999.999').validate()
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='1.3.1',
maximum_version='999.999.999').validate()
with subsystem_instance(DistributionLocator):
DistributionLocator.locate(jdk=False)
@unittest.skipIf(not JAVAC, reason='No javac executable on the PATH.')
def test_validate_live_jdk(self):
Distribution(bin_path=os.path.dirname(self.JAVAC), jdk=True).validate()
Distribution(bin_path=os.path.dirname(self.JAVAC), jdk=True).binary('javap')
with subsystem_instance(DistributionLocator):
DistributionLocator.locate(jdk=True)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class ClustersOperations(object):
"""ClustersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The version of the ServiceFabric resouce provider api. Constant value: "2016-09-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def update(
self, resource_group_name, cluster_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Update cluster configuration.
:param resource_group_name: The name of the resource group to which
the resource belongs or get created
:type resource_group_name: str
:param cluster_name: The name of the cluster resource
:type cluster_name: str
:param parameters: The parameters which contains the property value
and property name which used to update the cluster configuration
:type parameters: :class:`ClusterUpdateParameters
<azure.mgmt.servicefabric.models.ClusterUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Cluster
<azure.mgmt.servicefabric.models.Cluster>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorModelException<azure.mgmt.servicefabric.models.ErrorModelException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ClusterUpdateParameters')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
raise models.ErrorModelException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Cluster', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, cluster_name, custom_headers=None, raw=False, **operation_config):
"""Get cluster resource.
:param resource_group_name: The name of the resource group to which
the resource belongs or get created
:type resource_group_name: str
:param cluster_name: The name of the cluster resource
:type cluster_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`Cluster <azure.mgmt.servicefabric.models.Cluster>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`Cluster <azure.mgmt.servicefabric.models.Cluster>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorModelException<azure.mgmt.servicefabric.models.ErrorModelException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorModelException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Cluster', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, cluster_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create cluster resource.
:param resource_group_name: The name of the resource group to which
the resource belongs or get created
:type resource_group_name: str
:param cluster_name: The name of the cluster resource
:type cluster_name: str
:param parameters: Put Request
:type parameters: :class:`Cluster
<azure.mgmt.servicefabric.models.Cluster>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Cluster
<azure.mgmt.servicefabric.models.Cluster>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorModelException<azure.mgmt.servicefabric.models.ErrorModelException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Cluster')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
raise models.ErrorModelException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Cluster', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, cluster_name, custom_headers=None, raw=False, **operation_config):
"""Delete cluster resource.
:param resource_group_name: The name of the resource group to which
the resource belongs or get created
:type resource_group_name: str
:param cluster_name: The name of the cluster resource
:type cluster_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorModelException<azure.mgmt.servicefabric.models.ErrorModelException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorModelException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""List cluster resource by resource group.
:param resource_group_name: The name of the resource group to which
the resource belongs or get created
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Cluster
<azure.mgmt.servicefabric.models.Cluster>`
:rtype: :class:`ClusterPaged
<azure.mgmt.servicefabric.models.ClusterPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ClusterPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ClusterPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""List cluster resource.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Cluster
<azure.mgmt.servicefabric.models.Cluster>`
:rtype: :class:`ClusterPaged
<azure.mgmt.servicefabric.models.ClusterPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/clusters'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ClusterPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ClusterPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
# Copyright 2016 Massachusetts Open Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import smtplib
import uuid
from email.mime.text import MIMEText
from flask import Response
from flask import request, render_template
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneauth1.exceptions import http as ksa_exceptions
from setpass import config
from setpass import model
from setpass import wsgi
from setpass import exception
application = wsgi.app
CONF = config.CONF
@wsgi.app.route('/', methods=['GET'])
def view_form():
token = request.args.get('token', None)
if not token:
return Response(response='Token not found', status=404)
return render_template('password_form.html')
@wsgi.app.route('/', methods=['POST'])
def set_password():
token = request.args.get('token')
password = request.form['password']
confirm_password = request.form['confirm_password']
pin = request.form['pin']
if not token or not password or not confirm_password or not pin:
return Response(response='Missing required field!', status=400)
if password != confirm_password:
return Response(response='Passwords do not match', status=400)
try:
_set_password(token, pin, password)
except exception.TokenNotFoundException:
return Response(response='Token not found', status=404)
except exception.TokenExpiredException:
return Response(response='Token expired', status=403)
except exception.WrongPinException:
return Response(response='Wrong pin', status=403)
except exception.OpenStackError as e:
return Response(response=e.message, status=500)
except exception.AccountLocked:
return Response(response='Account locked, too many wrong attempts!',
status=403)
return Response(response='Password set.', status=200)
def _set_openstack_password(user_id, old_password, new_password):
auth = v3.Password(auth_url=CONF.auth_url,
user_id=user_id,
password=old_password)
sess = session.Session(auth=auth)
url = '%s/users/%s/password' % (CONF.auth_url, user_id)
payload = {'user': {'password': new_password,
'original_password': old_password}}
header = {'Content-Type': 'application/json'}
r = sess.post(url, headers=header, data=json.dumps(payload))
if 200 <= r.status_code < 300:
return True
else:
raise exception.OpenStackError(r.text)
def _check_admin_token(token):
auth = v3.Token(auth_url=CONF.auth_url,
token=token,
project_name=CONF.admin_project_name,
project_domain_id=CONF.admin_project_domain_id)
sess = session.Session(auth=auth)
# If we're able to scope succesfully to the admin project with this
# token, assume admin.
try:
sess.get_token()
return True
except ksa_exceptions.Unauthorized:
return False
def _increase_attempts(user):
user.attempts += 1
model.db.session.commit()
def _set_password(token, pin, password):
# Find user for token
user = model.User.find(token=token)
if user is None:
raise exception.TokenNotFoundException
if user.attempts > CONF.max_attempts:
raise exception.AccountLocked
if pin != user.pin:
_increase_attempts(user)
raise exception.WrongPinException
delta = datetime.datetime.utcnow() - user.updated_at
if delta.total_seconds() > CONF.token_expiration:
raise exception.TokenExpiredException
_set_openstack_password(user.user_id, user.password, password)
model.db.session.delete(user)
model.db.session.commit()
@wsgi.app.route('/token/<user_id>', methods=['PUT'])
def add(user_id):
token = request.headers.get('x-auth-token', None)
if not token:
return Response(response='Unauthorized', status=401)
if not _check_admin_token(token):
return Response(response='Forbidden', status=403)
payload = json.loads(request.data)
user = model.User.find(user_id=user_id)
if user:
if 'pin' in payload:
user.pin = payload['pin']
if 'password' in payload:
user.password = payload['password']
user.token = str(uuid.uuid4())
user.update_timestamp_and_attempts()
else:
user = model.User(
user_id=user_id,
token=str(uuid.uuid4()),
pin=payload['pin'],
password=payload['password']
)
model.db.session.add(user)
model.db.session.commit()
return Response(response=user.token, status=200)
@wsgi.app.route('/reset', methods=['GET'])
def view_reset_form():
return render_template('reset_form.html')
@wsgi.app.route('/reset', methods=['POST'])
def reset_password():
name = request.form['name']
email = request.form['email']
confirm_email = request.form['confirm_email']
pin = request.form['pin']
if not name or not email or not confirm_email or not pin:
return Response(response='Missing required field!', status=400)
if email != confirm_email:
return Response(response="Email addresses do not match.", status=400)
_notify_helpdesk(name=name, username=email, pin=pin)
return Response(response='The request has been forwarded to the helpdesk.',
status=200)
def _notify_helpdesk(**kwargs):
with open(CONF.helpdesk_template, 'r') as f:
msg_body = f.read()
msg_body = msg_body.format(**kwargs)
sender = CONF.ticket_sender
recipient = CONF.helpdesk_email
msg = MIMEText(msg_body)
msg['Subject'] = CONF.ticket_subject
msg['From'] = sender
msg['To'] = recipient
server = smtplib.SMTP(CONF.mail_ip, CONF.mail_port)
server.ehlo()
server.starttls()
server.sendmail(sender, recipient, msg.as_string())
if __name__ == '__main__':
wsgi.app.run(port=CONF.port, host='0.0.0.0')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewaysOperations(object):
"""ApplicationGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationGateway"
"""Gets the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.ApplicationGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
parameters, # type: "_models.ApplicationGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
parameters, # type: "_models.ApplicationGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationGateway"]
"""Creates or updates the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to the create or update application gateway operation.
:type parameters: ~azure.mgmt.network.v2017_06_01.models.ApplicationGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.ApplicationGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationGatewayListResult"]
"""Lists all application gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationGatewayListResult"]
"""Gets all the application gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
def _backend_health_initial(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ApplicationGatewayBackendHealth"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationGatewayBackendHealth"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
# Construct URL
url = self._backend_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_backend_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
def begin_backend_health(
self,
resource_group_name, # type: str
application_gateway_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationGatewayBackendHealth"]
"""Gets the backend health of the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param expand: Expands BackendAddressPool and BackendHttpSettings referenced in backend health.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationGatewayBackendHealth or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayBackendHealth]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayBackendHealth"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._backend_health_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
expand=expand,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_backend_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
def list_available_waf_rule_sets(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationGatewayAvailableWafRuleSetsResult"
"""Lists all available web application firewall rule sets.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableWafRuleSetsResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayAvailableWafRuleSetsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayAvailableWafRuleSetsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
# Construct URL
url = self.list_available_waf_rule_sets.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableWafRuleSetsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_waf_rule_sets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets'} # type: ignore
def list_available_ssl_options(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationGatewayAvailableSslOptions"
"""Lists available Ssl options for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableSslOptions, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayAvailableSslOptions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayAvailableSslOptions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
# Construct URL
url = self.list_available_ssl_options.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableSslOptions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_ssl_options.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default'} # type: ignore
def list_available_ssl_predefined_policies(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationGatewayAvailableSslPredefinedPolicies"]
"""Lists all SSL predefined policies for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayAvailableSslPredefinedPolicies or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayAvailableSslPredefinedPolicies]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayAvailableSslPredefinedPolicies"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_ssl_predefined_policies.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayAvailableSslPredefinedPolicies', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_available_ssl_predefined_policies.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies'} # type: ignore
def get_ssl_predefined_policy(
self,
predefined_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationGatewaySslPredefinedPolicy"
"""Gets Ssl predefined policy with the specified policy name.
:param predefined_policy_name: Name of Ssl predefined policy.
:type predefined_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewaySslPredefinedPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.ApplicationGatewaySslPredefinedPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewaySslPredefinedPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json"
# Construct URL
url = self.get_ssl_predefined_policy.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'predefinedPolicyName': self._serialize.url("predefined_policy_name", predefined_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewaySslPredefinedPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ssl_predefined_policy.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/{predefinedPolicyName}'} # type: ignore
|
|
import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
#for param_list in ('Attributes', 'Methods'):
# out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
|
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_uv(src_file, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'):
ystart=690
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# time reference "days since 1900-01-01 00:00:00"
ref = datetime(1900, 1, 1, 0, 0, 0)
ref = date2num(ref)
tag = src_file.rsplit('/')[-1].rsplit('_')[2]
print("tag:", tag)
year = int(tag[:4])
month = int(tag[4:6])
day = int(tag[6:])
time = datetime(year, month, day, 0, 0, 0)
time = date2num(time)
time = time - ref
time = time + 0.5 # 1-day average
# get dimensions
Mp, Lp = dst_grd.hgrid.mask_rho.shape
# create destination file
dst_file = src_file.rsplit('/')[-1]
dst_fileu = dst_dir + dst_file[:-4] + '_u_ic_' + dst_grd.name + '.nc'
print '\nCreating destination file', dst_fileu
if os.path.exists(dst_fileu) is True:
os.remove(dst_fileu)
pyroms_toolbox.nc_create_roms_file(dst_fileu, dst_grd, nctime)
dst_filev = dst_dir + dst_file[:-4] + '_v_ic_' + dst_grd.name + '.nc'
print 'Creating destination file', dst_filev
if os.path.exists(dst_filev) is True:
os.remove(dst_filev)
pyroms_toolbox.nc_create_roms_file(dst_filev, dst_grd, nctime)
# open destination file
ncu = netCDF.Dataset(dst_fileu, 'a', format='NETCDF3_64BIT')
ncv = netCDF.Dataset(dst_filev, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_varu = cdf.variables['vozocrtx']
src_varv = cdf.variables['vomecrty']
print "dims", src_varu.dimensions, src_varv.dimensions
#get missing value
spval = src_varu._FillValue
# ARCTIC grid sub-sample
src_varu = src_varu[:]
src_varv = src_varv[:]
print "shape 1", src_varu.shape, src_varv.shape
src_varu = np.squeeze(src_varu)
src_varv = np.squeeze(src_varv)
print "shape 2", src_varu.shape, src_varv.shape
src_varu = src_varu[:,np.r_[ystart:np.size(src_varu,1),-1],:]
src_varv = src_varv[:,np.r_[ystart:np.size(src_varv,1),-1],:]
print "shape 3", src_varu.shape, src_varv.shape
# get weights file
wts_file_a = 'remap_weights_GLORYS_to_ARCTIC2_bilinear_t_to_rho.nc'
wts_file_u = 'remap_weights_GLORYS_to_ARCTIC2_bilinear_u_to_rho.nc'
wts_file_v = 'remap_weights_GLORYS_to_ARCTIC2_bilinear_v_to_rho.nc'
# build intermediate zgrid
zlevel = -src_grd.z_t[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in destination file
print 'Creating variable u'
ncu.createVariable('u', 'f8', ('ocean_time', 's_rho', 'eta_u', 'xi_u'), fill_value=spval)
ncu.variables['u'].long_name = '3D u-momentum component'
ncu.variables['u'].units = 'meter second-1'
ncu.variables['u'].field = 'u-velocity, scalar, series'
# create variable in destination file
print 'Creating variable ubar'
ncu.createVariable('ubar', 'f8', ('ocean_time', 'eta_u', 'xi_u'), fill_value=spval)
ncu.variables['ubar'].long_name = '2D u-momentum component'
ncu.variables['ubar'].units = 'meter second-1'
ncu.variables['ubar'].field = 'ubar-velocity,, scalar, series'
print 'Creating variable v'
ncv.createVariable('v', 'f8', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=spval)
ncv.variables['v'].long_name = '3D v-momentum component'
ncv.variables['v'].units = 'meter second-1'
ncv.variables['v'].field = 'v-velocity, scalar, series'
print 'Creating variable vbar'
ncv.createVariable('vbar', 'f8', ('ocean_time', 'eta_v', 'xi_v'), fill_value=spval)
ncv.variables['vbar'].long_name = '2D v-momentum component'
ncv.variables['vbar'].units = 'meter second-1'
ncv.variables['vbar'].field = 'vbar-velocity,, scalar, series'
# remaping
print 'remapping and rotating u and v from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
# flood the grid
print 'flood the grid', src_varu.shape
src_uz = pyroms_toolbox.CGrid_GLORYS.flood(src_varu, src_grd, Cpos='u', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
src_vz = pyroms_toolbox.CGrid_GLORYS.flood(src_varv, src_grd, Cpos='v', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_uz = pyroms.remapping.remap(src_uz, wts_file_u, \
spval=spval)
dst_vz = pyroms.remapping.remap(src_vz, wts_file_v, \
spval=spval)
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_u = pyroms.remapping.z2roms(dst_uz[::-1,:,:], dst_grdz, \
dst_grd, Cpos='rho', spval=spval, flood=False)
dst_v = pyroms.remapping.z2roms(dst_vz[::-1,:,:], dst_grdz, \
dst_grd, Cpos='rho', spval=spval, flood=False)
# rotate u,v fields
src_angle = src_grd.angle
src_angle = pyroms.remapping.remap(src_angle, wts_file_a)
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U = dst_u + dst_v*1j
eitheta = np.exp(-1j*angle[:,:,:])
U = U * eitheta
dst_u = np.real(U)
dst_v = np.imag(U)
# move back to u,v points
dst_u = 0.5 * (dst_u[:,:,:-1] + dst_u[:,:,1:])
dst_v = 0.5 * (dst_v[:,:-1,:] + dst_v[:,1:,:])
# spval
idxu = np.where(dst_grd.hgrid.mask_u == 0)
idxv = np.where(dst_grd.hgrid.mask_v == 0)
for n in range(dst_grd.vgrid.N):
dst_u[n,idxu[0], idxu[1]] = spval
dst_v[n,idxv[0], idxv[1]] = spval
# compute depth average velocity ubar and vbar
# get z at the right position
z_u = 0.5 * (dst_grd.vgrid.z_w[0,:,:,:-1] + dst_grd.vgrid.z_w[0,:,:,1:])
z_v = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,:] + dst_grd.vgrid.z_w[0,:,1:,:])
dst_ubar = np.zeros((dst_u.shape[1], dst_u.shape[2]))
dst_vbar = np.zeros((dst_v.shape[1], dst_v.shape[2]))
for i in range(dst_ubar.shape[1]):
for j in range(dst_ubar.shape[0]):
dst_ubar[j,i] = (dst_u[:,j,i] * np.diff(z_u[:,j,i])).sum() / -z_u[0,j,i]
for i in range(dst_vbar.shape[1]):
for j in range(dst_vbar.shape[0]):
dst_vbar[j,i] = (dst_v[:,j,i] * np.diff(z_v[:,j,i])).sum() / -z_v[0,j,i]
# spval
dst_ubar[idxu[0], idxu[1]] = spval
dst_vbar[idxv[0], idxv[1]] = spval
# write data in destination file
print 'write data in destination file'
ncu.variables['ocean_time'][0] = time
ncu.variables['u'][0] = dst_u
ncu.variables['ubar'][0] = dst_ubar
ncv.variables['ocean_time'][0] = time
ncv.variables['v'][0] = dst_v
ncv.variables['vbar'][0] = dst_vbar
print dst_u.shape
print dst_ubar.shape
print dst_v.shape
print dst_vbar.shape
# close destination file
ncu.close()
ncv.close()
cdf.close()
|
|
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of update command for updating gsutil."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import signal
import stat
import sys
import tarfile
import tempfile
import textwrap
from six.moves import input
import gslib
from gslib.command import Command
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.metrics import CheckAndMaybePromptForAnalyticsEnabling
from gslib.sig_handling import RegisterSignalHandler
from gslib.utils import system_util
from gslib.utils.boto_util import GetConfigFilePaths
from gslib.utils.boto_util import CERTIFICATE_VALIDATION_ENABLED
from gslib.utils.constants import RELEASE_NOTES_URL
from gslib.utils.text_util import CompareVersions
from gslib.utils.update_util import DisallowUpdateIfDataInGsutilDir
from gslib.utils.update_util import LookUpGsutilVersion
from gslib.utils.update_util import GsutilPubTarball
_SYNOPSIS = """
gsutil update [-f] [-n] [url]
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
NOTE: This command is not available if you're using a gsutil installation
from a package manager or the Cloud SDK. When using the Cloud SDK, use
``gcloud components update``.
The gsutil update command downloads the latest gsutil release, checks its
version, and offers to let you update to it if it differs from the version
you're currently running.
Once you say "Y" to the prompt of whether to install the update, the gsutil
update command locates where the running copy of gsutil is installed,
unpacks the new version into an adjacent directory, moves the previous version
aside, moves the new version to where the previous version was installed,
and removes the moved-aside old version. Because of this, users are cautioned
not to store data in the gsutil directory, since that data will be lost
when you update gsutil. (Some users change directories into the gsutil
directory to run the command. We advise against doing that, for this reason.)
Note also that the gsutil update command will refuse to run if it finds user
data in the gsutil directory.
By default gsutil update will retrieve the new code from
%s, but you can optionally specify a URL to use
instead. This is primarily used for distributing pre-release versions of
the code to a small group of early test users.
NOTE: gsutil periodically checks whether a more recent software update is
available. By default this check is performed every 30 days; you can change
(or disable) this check by editing the software_update_check_period variable
in the .boto config file. Note also that gsutil will only check for software
updates if stdin, stdout, and stderr are all connected to a TTY, to avoid
interfering with cron jobs, streaming transfers, and other cases where gsutil
input or output are redirected from/to files or pipes. Software update
periodic checks are also disabled by the gsutil -q option (see
'gsutil help options')
<B>OPTIONS</B>
-f Forces the update command to offer to let you update, even if you
have the most current copy already. This can be useful if you have
a corrupted local copy.
-n Causes update command to run without prompting [Y/n] whether to
continue if an update is available.
""" % GsutilPubTarball())
class UpdateCommand(Command):
"""Implementation of gsutil update command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'update',
command_name_aliases=['refresh'],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=1,
supported_sub_args='fn',
file_url_ok=True,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='update',
help_name_aliases=['refresh'],
help_type='command_help',
help_one_line_summary='Update to the latest gsutil release',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def _ExplainIfSudoNeeded(self, tf, dirs_to_remove, old_cwd):
"""Explains what to do if sudo needed to update gsutil software.
Happens if gsutil was previously installed by a different user (typically if
someone originally installed in a shared file system location, using sudo).
Args:
tf: Opened TarFile.
dirs_to_remove: List of directories to remove.
old_cwd: Path to the working directory we should chdir back to if sudo is
needed. It's possible that we've chdir'd to a temp directory that's
been deleted, which can cause odd behavior (e.g. OSErrors when opening
the metrics subprocess). If this is not truthy, we won't attempt to
chdir back to this value.
Raises:
CommandException: if errors encountered.
"""
# If running under Windows or Cygwin we don't need (or have) sudo.
if system_util.IS_CYGWIN or system_util.IS_WINDOWS:
return
user_id = os.getuid()
if os.stat(gslib.GSUTIL_DIR).st_uid == user_id:
return
# Won't fail - this command runs after main startup code that insists on
# having a config file.
config_file_list = GetConfigFilePaths()
config_files = ' '.join(config_file_list)
self._CleanUpUpdateCommand(tf, dirs_to_remove, old_cwd)
# Pick current protection of each boto config file for command that restores
# protection (rather than fixing at 600) to support use cases like how GCE
# installs a service account with an /etc/boto.cfg file protected to 644.
chmod_cmds = []
for config_file in config_file_list:
mode = oct(stat.S_IMODE((os.stat(config_file)[stat.ST_MODE])))
chmod_cmds.append('\n\tsudo chmod %s %s' % (mode, config_file))
raise CommandException('\n'.join(
textwrap.wrap(
'Since it was installed by a different user previously, you will need '
'to update using the following commands. You will be prompted for your '
'password, and the install will run as "root". If you\'re unsure what '
'this means please ask your system administrator for help:')) + (
'\n\tsudo chmod 0644 %s\n\tsudo env BOTO_CONFIG="%s" %s update'
'%s') % (config_files, config_files, self.gsutil_path,
' '.join(chmod_cmds)),
informational=True)
# This list is checked during gsutil update by doing a lowercased
# slash-left-stripped check. For example "/Dev" would match the "dev" entry.
unsafe_update_dirs = [
'applications',
'auto',
'bin',
'boot',
'desktop',
'dev',
'documents and settings',
'etc',
'export',
'home',
'kernel',
'lib',
'lib32',
'library',
'lost+found',
'mach_kernel',
'media',
'mnt',
'net',
'null',
'network',
'opt',
'private',
'proc',
'program files',
'python',
'root',
'sbin',
'scripts',
'srv',
'sys',
'system',
'tmp',
'users',
'usr',
'var',
'volumes',
'win',
'win32',
'windows',
'winnt',
]
def _EnsureDirsSafeForUpdate(self, dirs):
"""Raises Exception if any of dirs is known to be unsafe for gsutil update.
This provides a fail-safe check to ensure we don't try to overwrite
or delete any important directories. (That shouldn't happen given the
way we construct tmp dirs, etc., but since the gsutil update cleanup
uses shutil.rmtree() it's prudent to add extra checks.)
Args:
dirs: List of directories to check.
Raises:
CommandException: If unsafe directory encountered.
"""
for d in dirs:
if not d:
d = 'null'
if d.lstrip(os.sep).lower() in self.unsafe_update_dirs:
raise CommandException('EnsureDirsSafeForUpdate: encountered unsafe '
'directory (%s); aborting update' % d)
def _CleanUpUpdateCommand(self, tf, dirs_to_remove, old_cwd):
"""Cleans up temp files etc. from running update command.
Args:
tf: Opened TarFile, or None if none currently open.
dirs_to_remove: List of directories to remove.
old_cwd: Path to the working directory we should chdir back to. It's
possible that we've chdir'd to a temp directory that's been deleted,
which can cause odd behavior (e.g. OSErrors when opening the metrics
subprocess). If this is not truthy, we won't attempt to chdir back
to this value.
"""
if tf:
tf.close()
self._EnsureDirsSafeForUpdate(dirs_to_remove)
for directory in dirs_to_remove:
try:
shutil.rmtree(directory)
except OSError:
# Ignore errors while attempting to remove old dirs under Windows. They
# happen because of Windows exclusive file locking, and the update
# actually succeeds but just leaves the old versions around in the
# user's temp dir.
if not system_util.IS_WINDOWS:
raise
if old_cwd:
try:
os.chdir(old_cwd)
except OSError:
pass
def RunCommand(self):
"""Command entry point for the update command."""
if gslib.IS_PACKAGE_INSTALL:
raise CommandException(
'The update command is only available for gsutil installed from a '
'tarball. If you installed gsutil via another method, use the same '
'method to update it.')
if system_util.InvokedViaCloudSdk():
raise CommandException(
'The update command is disabled for Cloud SDK installs. Please run '
'"gcloud components update" to update it. Note: the Cloud SDK '
'incorporates updates to the underlying tools approximately every 2 '
'weeks, so if you are attempting to update to a recently created '
'release / pre-release of gsutil it may not yet be available via '
'the Cloud SDK.')
https_validate_certificates = CERTIFICATE_VALIDATION_ENABLED
if not https_validate_certificates:
raise CommandException(
'Your boto configuration has https_validate_certificates = False.\n'
'The update command cannot be run this way, for security reasons.')
DisallowUpdateIfDataInGsutilDir()
force_update = False
no_prompt = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-f':
force_update = True
if o == '-n':
no_prompt = True
dirs_to_remove = []
tmp_dir = tempfile.mkdtemp()
dirs_to_remove.append(tmp_dir)
old_cwd = os.getcwd()
os.chdir(tmp_dir)
if not no_prompt:
self.logger.info('Checking for software update...')
if self.args:
update_from_url_str = self.args[0]
if not update_from_url_str.endswith('.tar.gz'):
raise CommandException(
'The update command only works with tar.gz files.')
for i, result in enumerate(self.WildcardIterator(update_from_url_str)):
if i > 0:
raise CommandException(
'Invalid update URL. Must name a single .tar.gz file.')
storage_url = result.storage_url
if storage_url.IsFileUrl() and not storage_url.IsDirectory():
if not force_update:
raise CommandException(
('"update" command does not support "file://" URLs without the '
'-f option.'))
elif not (storage_url.IsCloudUrl() and storage_url.IsObject()):
raise CommandException(
'Invalid update object URL. Must name a single .tar.gz file.')
else:
update_from_url_str = GsutilPubTarball()
# Try to retrieve version info from tarball metadata; failing that; download
# the tarball and extract the VERSION file. The version lookup will fail
# when running the update system test, because it retrieves the tarball from
# a temp file rather than a cloud URL (files lack the version metadata).
tarball_version = LookUpGsutilVersion(self.gsutil_api, update_from_url_str)
if tarball_version:
tf = None
else:
tf = self._FetchAndOpenGsutilTarball(update_from_url_str)
tf.extractall()
with open(os.path.join('gsutil', 'VERSION'), 'r') as ver_file:
tarball_version = ver_file.read().strip()
if not force_update and gslib.VERSION == tarball_version:
self._CleanUpUpdateCommand(tf, dirs_to_remove, old_cwd)
if self.args:
raise CommandException('You already have %s installed.' %
update_from_url_str,
informational=True)
else:
raise CommandException(
'You already have the latest gsutil release '
'installed.',
informational=True)
if not no_prompt:
CheckAndMaybePromptForAnalyticsEnabling()
(_, major) = CompareVersions(tarball_version, gslib.VERSION)
if major:
print(('\n'.join(
textwrap.wrap(
'This command will update to the "%s" version of gsutil at %s. '
'NOTE: This a major new version, so it is strongly recommended '
'that you review the release note details at %s before updating to '
'this version, especially if you use gsutil in scripts.' %
(tarball_version, gslib.GSUTIL_DIR, RELEASE_NOTES_URL)))))
else:
print(('This command will update to the "%s" version of\ngsutil at %s' %
(tarball_version, gslib.GSUTIL_DIR)))
self._ExplainIfSudoNeeded(tf, dirs_to_remove, old_cwd)
if no_prompt:
answer = 'y'
else:
answer = input('Proceed? [y/N] ')
if not answer or answer.lower()[0] != 'y':
self._CleanUpUpdateCommand(tf, dirs_to_remove, old_cwd)
raise CommandException('Not running update.', informational=True)
if not tf:
tf = self._FetchAndOpenGsutilTarball(update_from_url_str)
# Ignore keyboard interrupts during the update to reduce the chance someone
# hitting ^C leaves gsutil in a broken state.
RegisterSignalHandler(signal.SIGINT, signal.SIG_IGN)
# gslib.GSUTIL_DIR lists the path where the code should end up (like
# /usr/local/gsutil), which is one level down from the relative path in the
# tarball (since the latter creates files in ./gsutil). So, we need to
# extract at the parent directory level.
gsutil_bin_parent_dir = os.path.normpath(
os.path.join(gslib.GSUTIL_DIR, '..'))
# Extract tarball to a temporary directory in a sibling to GSUTIL_DIR.
old_dir = tempfile.mkdtemp(dir=gsutil_bin_parent_dir)
new_dir = tempfile.mkdtemp(dir=gsutil_bin_parent_dir)
dirs_to_remove.append(old_dir)
dirs_to_remove.append(new_dir)
self._EnsureDirsSafeForUpdate(dirs_to_remove)
try:
tf.extractall(path=new_dir)
except Exception as e:
self._CleanUpUpdateCommand(tf, dirs_to_remove, old_cwd)
raise CommandException('Update failed: %s.' % e)
# For enterprise mode (shared/central) installation, users with
# different user/group than the installation user/group must be
# able to run gsutil so we need to do some permissions adjustments
# here. Since enterprise mode is not not supported for Windows
# users, we can skip this step when running on Windows, which
# avoids the problem that Windows has no find or xargs command.
if not system_util.IS_WINDOWS:
# Make all files and dirs in updated area owner-RW and world-R, and make
# all directories owner-RWX and world-RX.
for dirname, subdirs, filenames in os.walk(new_dir):
for filename in filenames:
fd = os.open(os.path.join(dirname, filename), os.O_RDONLY)
os.fchmod(fd,
stat.S_IWRITE | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.close(fd)
for subdir in subdirs:
fd = os.open(os.path.join(dirname, subdir), os.O_RDONLY)
os.fchmod(
fd, stat.S_IRWXU | stat.S_IXGRP | stat.S_IXOTH | stat.S_IRGRP |
stat.S_IROTH)
os.close(fd)
# Make main gsutil script owner-RWX and world-RX.
fd = os.open(os.path.join(new_dir, 'gsutil', 'gsutil'), os.O_RDONLY)
os.fchmod(
fd, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH |
stat.S_IXOTH)
os.close(fd)
# Move old installation aside and new into place.
os.rename(gslib.GSUTIL_DIR, os.path.join(old_dir, 'old'))
os.rename(os.path.join(new_dir, 'gsutil'), gslib.GSUTIL_DIR)
self._CleanUpUpdateCommand(tf, dirs_to_remove, old_cwd)
RegisterSignalHandler(signal.SIGINT, signal.SIG_DFL)
self.logger.info('Update complete.')
return 0
def _FetchAndOpenGsutilTarball(self, update_from_url_str):
self.command_runner.RunNamedCommand(
'cp',
[update_from_url_str, 'file://gsutil.tar.gz'],
self.headers,
self.debug,
skip_update_check=True,
)
# Note: tf is closed in _CleanUpUpdateCommand.
tf = tarfile.open('gsutil.tar.gz')
tf.errorlevel = 1 # So fatal tarball unpack errors raise exceptions.
return tf
|
|
from __future__ import division, print_function, absolute_import
from os.path import join, dirname
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import pytest
from pytest import raises as assert_raises
from scipy.fftpack.realtransforms import (
dct, idct, dst, idst, dctn, idctn, dstn, idstn)
# Matlab reference data
MDATA = np.load(join(dirname(__file__), 'test.npz'))
X = [MDATA['x%d' % i] for i in range(8)]
Y = [MDATA['y%d' % i] for i in range(8)]
# FFTW reference data: the data are organized as follows:
# * SIZES is an array containing all available sizes
# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
# contains the output of the DCT applied to the input np.linspace(0, size-1,
# size)
FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
def fftw_dct_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dct_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def fftw_dst_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dst_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def dct_2d_ref(x, **kwargs):
"""Calculate reference values for testing dct2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dct(x[:, col], **kwargs)
return x
def idct_2d_ref(x, **kwargs):
"""Calculate reference values for testing idct2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idct(x[:, col], **kwargs)
return x
def dst_2d_ref(x, **kwargs):
"""Calculate reference values for testing dst2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dst(x[:, col], **kwargs)
return x
def idst_2d_ref(x, **kwargs):
"""Calculate reference values for testing idst2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idst(x[:, col], **kwargs)
return x
def naive_dct1(x, norm=None):
"""Calculate textbook definition version of DCT-I."""
x = np.array(x, copy=True)
N = len(x)
M = N-1
y = np.zeros(N)
m0, m = 1, 2
if norm == 'ortho':
m0 = np.sqrt(1.0/M)
m = np.sqrt(2.0/M)
for k in range(N):
for n in range(1, N-1):
y[k] += m*x[n]*np.cos(np.pi*n*k/M)
y[k] += m0 * x[0]
y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
if norm == 'ortho':
y[0] *= 1/np.sqrt(2)
y[N-1] *= 1/np.sqrt(2)
return y
def naive_dst1(x, norm=None):
"""Calculate textbook definition version of DST-I."""
x = np.array(x, copy=True)
N = len(x)
M = N+1
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
if norm == 'ortho':
y *= np.sqrt(0.5/M)
return y
def naive_dct4(x, norm=None):
"""Calculate textbook definition version of DCT-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
def naive_dst4(x, norm=None):
"""Calculate textbook definition version of DST-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
class TestComplex(object):
def test_dct_complex64(self):
y = dct(1j*np.arange(5, dtype=np.complex64))
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dct_complex(self):
y = dct(np.arange(5)*1j)
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_idct_complex(self):
y = idct(np.arange(5)*1j)
x = 1j*idct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex64(self):
y = dst(np.arange(5, dtype=np.complex64)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex(self):
y = dst(np.arange(5)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_idst_complex(self):
y = idst(np.arange(5)*1j)
x = 1j*idst(np.arange(5))
assert_array_almost_equal(x, y)
class _TestDCTBase(object):
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
y = dct(x, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
def test_axis(self):
nt = 2
for i in [7, 8, 9, 16, 32, 64]:
x = np.random.randn(nt, i)
y = dct(x, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=self.type),
decimal=self.dec)
x = x.T
y = dct(x, axis=0, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
decimal=self.dec)
class _TestDCTIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=1)
y2 = naive_dct1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class _TestDCTIIBase(_TestDCTBase):
def test_definition_matlab(self):
# Test correspondence with MATLAB (orthornomal mode).
for i in range(len(X)):
dt = np.result_type(np.float32, self.rdt)
x = np.array(X[i], dtype=dt)
yr = Y[i]
y = dct(x, norm="ortho", type=2)
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=self.dec)
class _TestDCTIIIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=self.dec)
class _TestDCTIVBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=4)
y2 = naive_dct4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class TestDCTIDouble(_TestDCTIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestDCTIFloat(_TestDCTIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestDCTIInt(_TestDCTIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDCTIIDouble(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestDCTIIFloat(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestDCTIIInt(_TestDCTIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestDCTIIIDouble(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDCTIIIFloat(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIIIInt(_TestDCTIIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class TestDCTIVDouble(_TestDCTIVBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 3
class TestDCTIVFloat(_TestDCTIVBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIVInt(_TestDCTIVBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestIDCTBase(object):
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
x = idct(yr, type=self.type)
if self.type == 1:
x /= 2 * (i-1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDCTIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestIDCTIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDCTIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDCTIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestIDCTIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestIDCTIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestIDCTIIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDCTIIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestIDCTIIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class TestIDCTIVDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestIDCTIVFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 4
class TestIDCTIVInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
class _TestDSTBase(object):
def setup_method(self):
self.rdt = None # dtype
self.dec = None # number of decimals to match
self.type = None # dst type
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
y = dst(xr, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
class _TestDSTIBase(_TestDSTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dst(x, norm='ortho', type=1)
y2 = naive_dst1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class _TestDSTIVBase(_TestDSTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dst(x, norm='ortho', type=4)
y2 = naive_dst4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, y2, decimal=self.dec)
class TestDSTIDouble(_TestDSTIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestDSTIFloat(_TestDSTIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestDSTIInt(_TestDSTIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDSTIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestDSTIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestDSTIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestDSTIIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDSTIIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 7
self.type = 3
class TestDSTIIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 7
self.type = 3
class TestDSTIVDouble(_TestDSTIVBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestDSTIVFloat(_TestDSTIVBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 4
class TestDSTIVInt(_TestDSTIVBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
class _TestIDSTBase(object):
def setup_method(self):
self.rdt = None
self.dec = None
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
x = idst(yr, type=self.type)
if self.type == 1:
x /= 2 * (i+1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(x) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDSTIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestIDSTIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDSTIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDSTIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestIDSTIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestIDSTIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestIDSTIIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDSTIIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 3
class TestIDSTIIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 3
class TestIDSTIVDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestIDSTIVFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 4
class TestIDSTIVnt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 4
class TestOverwrite(object):
"""Check input overwrite behavior."""
real_dtypes = [np.float32, np.float64]
def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
x2 = x.copy()
routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not overwrite_x:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
for type in [1, 2, 3, 4]:
for overwrite_x in [True, False]:
for norm in [None, 'ortho']:
self._check(data, routine, type, None, axis, norm,
overwrite_x)
def test_dct(self):
for dtype in self.real_dtypes:
self._check_1d(dct, dtype, (16,), -1)
self._check_1d(dct, dtype, (16, 2), 0)
self._check_1d(dct, dtype, (2, 16), 1)
def test_idct(self):
for dtype in self.real_dtypes:
self._check_1d(idct, dtype, (16,), -1)
self._check_1d(idct, dtype, (16, 2), 0)
self._check_1d(idct, dtype, (2, 16), 1)
def test_dst(self):
for dtype in self.real_dtypes:
self._check_1d(dst, dtype, (16,), -1)
self._check_1d(dst, dtype, (16, 2), 0)
self._check_1d(dst, dtype, (2, 16), 1)
def test_idst(self):
for dtype in self.real_dtypes:
self._check_1d(idst, dtype, (16,), -1)
self._check_1d(idst, dtype, (16, 2), 0)
self._check_1d(idst, dtype, (2, 16), 1)
class Test_DCTN_IDCTN(object):
dec = 14
dct_type = [1, 2, 3, 4]
norms = [None, 'ortho']
rstate = np.random.RandomState(1234)
shape = (32, 16)
data = rstate.randn(*shape)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [None,
1, (1,), [1],
0, (0,), [0],
(0, 1), [0, 1],
(-2, -1), [-2, -1]])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', ['ortho'])
def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
assert_array_almost_equal(self.data, tmp, decimal=12)
@pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
(dstn, dst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
dct_type, norm):
y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
y2 = fforward_ref(self.data, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
(idstn, idst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', [None, 'ortho'])
def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
dct_type, norm):
fdata = dctn(self.data, type=dct_type, norm=norm)
y1 = finverse(fdata, type=dct_type, norm=norm)
y2 = finverse_ref(fdata, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
def test_axes_and_shape(self, fforward, finverse):
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=None)
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape, axes=0)
@pytest.mark.parametrize('fforward', [dctn, dstn])
def test_shape(self, fforward):
tmp = fforward(self.data, shape=(128, 128), axes=None)
assert_equal(tmp.shape, (128, 128))
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [1, (1,), [1],
0, (0,), [0]])
def test_shape_is_none_with_axes(self, fforward, finverse, axes):
tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
|
|
import csv, cgi
import json
import dxr.plugins
import dxr.schema
import os, sys
import re, urllib
from dxr.languages import language_schema
PLUGIN_NAME = 'clang'
__all__ = dxr.plugins.indexer_exports()
def pre_process(tree, env):
# Setup environment variables for inspecting clang as runtime
# We'll store all the havested metadata in the plugins temporary folder.
temp_folder = os.path.join(tree.temp_folder, 'plugins', PLUGIN_NAME)
plugin_folder = os.path.join(tree.config.plugin_folder, PLUGIN_NAME)
flags = [
'-load', os.path.join(plugin_folder, 'libclang-index-plugin.so'),
'-add-plugin', 'dxr-index',
'-plugin-arg-dxr-index', tree.source_folder
]
flags_str = ""
for flag in flags:
flags_str += ' -Xclang ' + flag
env['CC'] = "clang %s" % flags_str
env['CXX'] = "clang++ %s" % flags_str
env['DXR_CC'] = env['CC']
env['DXR_CXX'] = env['CXX']
env['DXR_CLANG_FLAGS'] = flags_str
env['DXR_CXX_CLANG_OBJECT_FOLDER'] = tree.object_folder
env['DXR_CXX_CLANG_TEMP_FOLDER'] = temp_folder
def post_process(tree, conn):
print "cxx-clang post-processing:"
print " - Adding tables"
conn.executescript(schema.get_create_sql())
print " - Processing files"
temp_folder = os.path.join(tree.temp_folder, 'plugins', PLUGIN_NAME)
for f in os.listdir(temp_folder):
csv_path = os.path.join(temp_folder, f)
dump_indexer_output(conn, csv_path)
fixup_scope(conn)
print " - Generating callgraph"
generate_callgraph(conn)
print " - Generating inheritance graph"
generate_inheritance(conn)
print " - Updating definitions"
update_defids(conn)
print " - Updating references"
update_refs(conn)
print " - Committing changes"
conn.commit()
schema = dxr.schema.Schema({
# Typedef information in the tables
"typedefs": [
("id", "INTEGER", False), # The typedef's id
("name", "VARCHAR(256)", False), # Simple name of the typedef
("qualname", "VARCHAR(256)", False), # Fully-qualified name of the typedef
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True),
("_key", "id"),
("_index", "qualname"),
],
# References to functions
"function_refs": [
("refid", "INTEGER", True), # ID of the function being referenced
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True),
("_location", True, 'referenced'),
("_fkey", "refid", "functions", "id"),
("_index", "refid"),
],
# References to macros
"macro_refs": [
("refid", "INTEGER", True), # ID of the macro being referenced
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True),
("_location", True, 'referenced'),
("_fkey", "refid", "macros", "id"),
("_index", "refid"),
],
# References to types
"type_refs": [
("refid", "INTEGER", True), # ID of the type being referenced
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True),
("_location", True, 'referenced'),
("_fkey", "refid", "types", "id"),
("_index", "refid"),
],
# References to typedefs
"typedef_refs": [
("refid", "INTEGER", True), # ID of the typedef being referenced
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True),
("_location", True, 'referenced'),
("_fkey", "refid", "typedefs", "id"),
("_index", "refid"),
],
# References to variables
"variable_refs": [
("refid", "INTEGER", True), # ID of the variable being referenced
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True),
("_location", True, 'referenced'),
("_fkey", "refid", "variables", "id"),
("_index", "refid"),
],
# Warnings found while compiling
"warnings": [
("msg", "VARCHAR(256)", False), # Text of the warning
("opt", "VARCHAR(64)", True), # option controlling this warning (-Wxxx)
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True),
],
# Declaration/definition mapping for functions
"function_decldef": [
("defid", "INTEGER", True), # ID of the definition instance
("_location", True),
("_location", True, 'definition'),
# Extents of the declaration
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_fkey", "defid", "functions", "id"),
("_index", "defid"),
],
# Declaration/definition mapping for types
"type_decldef": [
("defid", "INTEGER", True), # ID of the definition instance
("_location", True),
("_location", True, 'definition'),
# Extents of the declaration
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_fkey", "defid", "types", "id"),
("_index", "defid"),
],
# Declaration/definition mapping for variables
"variable_decldef": [
("defid", "INTEGER", True), # ID of the definition instance
("_location", True),
("_location", True, 'definition'),
# Extents of the declaration
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_fkey", "defid", "variables", "id"),
("_index", "defid"),
],
# Macros: this is a table of all of the macros we come across in the code.
"macros": [
("id", "INTEGER", False), # The macro id, for references
("name", "VARCHAR(256)", False), # The name of the macro
("args", "VARCHAR(256)", True), # The args of the macro (if any)
("text", "TEXT", True), # The macro contents
("extent_start", "INTEGER", True),
("extent_end", "INTEGER", True),
("_location", True)
],
# The following two tables are combined to form the callgraph implementation.
# In essence, the callgraph can be viewed as a kind of hypergraph, where the
# edges go from functions to sets of functions and variables. For use in the
# database, we are making some big assumptions: the targetid is going to be
# either a function or variable (the direct thing we called); if the function
# is virtual or the target is a variable, we use the targets table to identify
# what the possible implementations could be.
"callers": [
("callerid", "INTEGER", False), # The function in which the call occurs
("targetid", "INTEGER", False), # The target of the call
("_key", "callerid", "targetid"),
("_fkey", "callerid", "functions", "id")
],
"targets": [
("targetid", "INTEGER", False), # The target of the call
("funcid", "INTEGER", False), # One of the functions in the target set
("_key", "targetid", "funcid"),
("_fkey", "targetid", "functions", "id")
]
})
file_cache = {}
decl_master = {}
inheritance = {}
calls = {}
overrides = {}
def getFileID(conn, path):
global file_cache
file_id = file_cache.get(path, False)
if file_id is not False:
return file_id
cur = conn.cursor()
row = cur.execute("SELECT id FROM files where path=?", (path,)).fetchone()
file_id = None
if row:
file_id = row[0]
file_cache[path] = file_id
return file_id
def splitLoc(conn, value):
arr = value.split(':')
return (getFileID(conn, arr[0]), int(arr[1]), int(arr[2]))
def fixupEntryPath(args, file_key, conn, prefix=None):
value = args[file_key]
loc = splitLoc(conn, value)
if prefix is not None:
prefix = prefix + "_"
else:
prefix = ''
args[prefix + 'file_id'] = loc[0]
args[prefix + 'file_line'] = loc[1]
args[prefix + 'file_col'] = loc[2]
return loc[0] is not None
def fixupExtent(args, extents_key):
if extents_key not in args:
return
value = args[extents_key]
arr = value.split(':')
args['extent_start'] = int(arr[0])
args['extent_end'] = int(arr[1])
del args[extents_key]
def getScope(args, conn):
row = conn.execute("SELECT id FROM scopes WHERE file_id=? AND file_line=? AND file_col=?",
(args['file_id'], args['file_line'], args['file_col'])).fetchone()
if row is not None:
return row[0]
return None
def addScope(args, conn, name, id):
scope = {}
scope['name'] = args[name]
scope['id'] = args[id]
scope['file_id'] = args['file_id']
scope['file_line'] = args['file_line']
scope['file_col'] = args['file_col']
scope['language'] = 'native'
stmt = language_schema.get_insert_sql('scopes', scope)
conn.execute(stmt[0], stmt[1])
def handleScope(args, conn, canonicalize=False):
scope = {}
if 'scopename' not in args:
return
scope['name'] = args['scopename']
scope['loc'] = args['scopeloc']
scope['language'] = 'native'
if not fixupEntryPath(scope, 'loc', conn):
return None
if canonicalize is True:
decl = canonicalize_decl(scope['name'], scope['file_id'], scope['file_line'], scope['file_col'])
scope['file_id'], scope['file_line'], scope['file_col'] = decl[1], decl[2], decl[3]
scopeid = getScope(scope, conn)
if scopeid is None:
scope['id'] = scopeid = dxr.utils.next_global_id()
stmt = language_schema.get_insert_sql('scopes', scope)
conn.execute(stmt[0], stmt[1])
if scopeid is not None:
args['scopeid'] = scopeid
def process_decldef(args, conn):
if 'kind' not in args:
return None
# Store declaration map basics on memory
name, defloc, declloc = args['name'], args['defloc'], args['declloc']
defid, defline, defcol = splitLoc(conn, args['defloc'])
declid, declline, declcol = splitLoc (conn, args['declloc'])
if defid is None or declid is None:
return None
# FIXME: should kind be included in this mapping?
decl_master[(name, declid, declline, declcol)] = (defid, defline, defcol)
decl_master[(name, defid, defline, defcol)] = (defid, defline, defcol)
if not fixupEntryPath(args, 'declloc', conn):
return None
if not fixupEntryPath(args, 'defloc', conn, 'definition'):
return None
fixupExtent(args, 'extent')
return schema.get_insert_sql(args['kind'] + '_decldef', args)
def process_type(args, conn):
if not fixupEntryPath(args, 'loc', conn):
return None
# Scope might have been previously added to satisfy other process_* call
scopeid = getScope(args, conn)
if scopeid is not None:
args['id'] = scopeid
else:
args['id'] = dxr.utils.next_global_id()
addScope(args, conn, 'name', 'id')
handleScope(args, conn)
fixupExtent(args, 'extent')
return language_schema.get_insert_sql('types', args)
def process_typedef(args, conn):
args['id'] = dxr.utils.next_global_id()
if not fixupEntryPath(args, 'loc', conn):
return None
fixupExtent(args, 'extent')
# handleScope(args, conn)
return schema.get_insert_sql('typedefs', args)
def process_function(args, conn):
if not fixupEntryPath(args, 'loc', conn):
return None
scopeid = getScope(args, conn)
if scopeid is not None:
args['id'] = scopeid
else:
args['id'] = dxr.utils.next_global_id()
addScope(args, conn, 'name', 'id')
if 'overridename' in args:
overrides[args['id']] = (args['overridename'], args['overrideloc'])
handleScope(args, conn)
fixupExtent(args, 'extent')
return language_schema.get_insert_sql('functions', args)
def process_impl(args, conn):
inheritance[args['tbname'], args['tbloc'], args['tcname'], args['tcloc']] = args
return None
def process_variable(args, conn):
args['id'] = dxr.utils.next_global_id()
if not fixupEntryPath(args, 'loc', conn):
return None
handleScope(args, conn)
fixupExtent(args, 'extent')
return language_schema.get_insert_sql('variables', args)
def process_ref(args, conn):
if 'extent' not in args:
return None
if 'kind' not in args:
return None
if not fixupEntryPath(args, 'loc', conn):
return None
if not fixupEntryPath(args, 'declloc', conn, 'referenced'):
return None
fixupExtent(args, 'extent')
return schema.get_insert_sql(args['kind'] + '_refs', args)
def process_warning(args, conn):
if not fixupEntryPath(args, 'loc', conn):
return None
fixupExtent(args, 'extent')
return schema.get_insert_sql('warnings', args)
def process_macro(args, conn):
args['id'] = dxr.utils.next_global_id()
if 'text' in args:
args['text'] = args['text'].replace("\\\n", "\n").strip()
if not fixupEntryPath(args, 'loc', conn):
return None
fixupExtent(args, 'extent')
return schema.get_insert_sql('macros', args)
def process_call(args, conn):
if 'callername' in args:
calls[args['callername'], args['callerloc'],
args['calleename'], args['calleeloc']] = args
else:
calls[args['calleename'], args['calleeloc']] = args
return None
def load_indexer_output(fname):
f = open(fname, "rb")
try:
parsed_iter = csv.reader(f)
for line in parsed_iter:
# Our first column is the type that we're reading, the others are just
# an args array to be passed in
argobj = {}
for i in range(1, len(line), 2):
argobj[line[i]] = line[i + 1]
globals()['process_' + line[0]](argobj)
except:
print fname, line
raise
finally:
f.close()
def dump_indexer_output(conn, fname):
f = open(fname, 'r')
limit = 0
try:
parsed_iter = csv.reader(f)
for line in parsed_iter:
args = {}
# Our first column is the type that we're reading, the others are just
# a key/value pairs array to be passed in
for i in range(1, len(line), 2):
args[line[i]] = line[i + 1]
stmt = globals()['process_' + line[0]](args, conn)
if stmt is None:
continue
if isinstance(stmt, list):
for elem in list:
conn.execute(elem[0], elem[1])
elif isinstance(stmt, tuple):
try:
conn.execute(stmt[0], stmt[1])
except:
print line
print stmt
raise
else:
conn.execute(stmt)
limit = limit + 1
if limit > 10000:
limit = 0
conn.commit()
except IndexError, e:
raise e
finally:
f.close()
def canonicalize_decl(name, id, line, col):
value = decl_master.get((name, id, line, col), None)
if value is None:
return (name, id, line, col)
else:
return (name, value[0], value[1], value[2])
def recanon_decl(name, loc):
decl_master[name, loc] = loc
return (name, loc)
def fixup_scope(conn):
conn.execute ("UPDATE types SET scopeid = (SELECT id FROM scopes WHERE " +
"scopes.file_id = types.file_id AND scopes.file_line = types.file_line " +
"AND scopes.file_col = types.file_col) WHERE scopeid IS NULL")
conn.execute ("UPDATE functions SET scopeid = (SELECT id from scopes where " +
"scopes.file_id = functions.file_id AND scopes.file_line = functions.file_line " +
"AND scopes.file_col = functions.file_col) WHERE scopeid IS NULL")
conn.execute ("UPDATE variables SET scopeid = (SELECT id from scopes where " +
"scopes.file_id = variables.file_id AND scopes.file_line = variables.file_line " +
"AND scopes.file_col = variables.file_col) WHERE scopeid IS NULL")
def build_inherits(base, child, direct):
db = { 'tbase': base, 'tderived': child }
if direct is not None:
db['inhtype'] = direct
return db
def generate_inheritance(conn):
childMap, parentMap = {}, {}
types = {}
for row in conn.execute("SELECT qualname, file_id, file_line, file_col, id from types").fetchall():
types[(row[0], row[1], row[2], row[3])] = row[4]
for infoKey in inheritance:
info = inheritance[infoKey]
try:
base_loc = splitLoc(conn, info['tbloc'])
child_loc = splitLoc(conn, info['tcloc'])
if base_loc[0] is None or child_loc[0] is None:
continue
base = types[canonicalize_decl(info['tbname'], base_loc[0], base_loc[1], base_loc[2])]
child = types[canonicalize_decl(info['tcname'], child_loc[0], child_loc[1], child_loc[2])]
except KeyError:
continue
conn.execute("INSERT OR IGNORE INTO impl(tbase, tderived, inhtype) VALUES (?, ?, ?)",
(base, child, info.get('access', '')))
# Get all known relations
subs = childMap.setdefault(child, [])
supers = parentMap.setdefault(base, [])
# Use this information
for sub in subs:
conn.execute("INSERT OR IGNORE INTO impl(tbase, tderived) VALUES (?, ?)",
(base, sub))
parentMap[sub].append(base)
for sup in supers:
conn.execute("INSERT OR IGNORE INTO impl(tbase, tderived) VALUES (?, ?)",
(sup, child))
childMap[sup].append(child)
# Carry through these relations
newsubs = childMap.setdefault(base, [])
newsubs.append(child)
newsubs.extend(subs)
newsupers = parentMap.setdefault(child, [])
newsupers.append(base)
newsupers.extend(supers)
def generate_callgraph(conn):
global calls
functions = {}
variables = {}
callgraph = []
for row in conn.execute("SELECT qualname, file_id, file_line, file_col, id FROM functions").fetchall():
functions[(row[0], row[1], row[2], row[3])] = row[4]
for row in conn.execute("SELECT name, file_id, file_line, file_col, id FROM variables").fetchall():
variables[(row[0], row[1], row[2], row[3])] = row[4]
# Generate callers table
for call in calls.values():
if 'callername' in call:
caller_loc = splitLoc(conn, call['callerloc'])
if caller_loc[0] is None:
continue
source = canonicalize_decl(call['callername'], caller_loc[0], caller_loc[1], caller_loc[2])
call['callerid'] = functions.get(source)
if call['callerid'] is None:
continue
else:
call['callerid'] = 0
target_loc = splitLoc(conn, call['calleeloc'])
if target_loc[0] is None:
continue
target = canonicalize_decl(call['calleename'], target_loc[0], target_loc[1], target_loc[2])
targetid = functions.get(target)
if targetid is None:
targetid = variables.get(target)
if targetid is not None:
call['targetid'] = targetid
callgraph.append(call)
del variables
# Generate targets table
overridemap = {}
for func, funcid in functions.iteritems():
override = overrides.get(funcid)
if override is None:
continue
override_loc = splitLoc(conn, override[1])
if override_loc[0] is None:
continue
base = canonicalize_decl(override[0], override_loc[0], override_loc[1], override_loc[2])
basekey = functions.get(base)
if basekey is None:
continue
overridemap.setdefault(basekey, set()).add(funcid)
rescan = [x for x in overridemap]
while len(rescan) > 0:
base = rescan.pop(0)
childs = overridemap[base]
prev = len(childs)
temp = childs.union(*(overridemap.get(sub, []) for sub in childs))
childs.update(temp)
if len(childs) != prev:
rescan.append(base)
for base, childs in overridemap.iteritems():
conn.execute("INSERT OR IGNORE INTO targets (targetid, funcid) VALUES (?, ?)",
(-base, base));
for child in childs:
conn.execute("INSERT OR IGNORE INTO targets (targetid, funcid) VALUES (?, ?)",
(-base, child));
for call in callgraph:
if call['calltype'] == 'virtual':
targetid = call['targetid']
call['targetid'] = -targetid
if targetid not in overridemap:
overridemap[targetid] = set()
conn.execute("INSERT OR IGNORE INTO targets (targetid, funcid) VALUES (?, ?)",
(-targetid, targetid));
conn.execute("INSERT OR IGNORE INTO callers (callerid, targetid) VALUES (?, ?)",
(call['callerid'], call['targetid']))
def update_defids(conn):
sql = """
UPDATE type_decldef SET defid = (
SELECT id
FROM types AS def
WHERE def.file_id = definition_file_id
AND def.file_line = definition_file_line
AND def.file_col = definition_file_col
)"""
conn.execute(sql)
sql = """
UPDATE function_decldef SET defid = (
SELECT id
FROM functions AS def
WHERE def.file_id = definition_file_id
AND def.file_line = definition_file_line
AND def.file_col = definition_file_col
)"""
conn.execute(sql)
sql = """
UPDATE variable_decldef SET defid = (
SELECT id
FROM variables AS def
WHERE def.file_id = definition_file_id
AND def.file_line = definition_file_line
AND def.file_col = definition_file_col
)"""
conn.execute(sql)
def update_refs(conn):
# References to declarations
sql = """
UPDATE type_refs SET refid = (
SELECT defid
FROM type_decldef AS decl
WHERE decl.file_id = referenced_file_id
AND decl.file_line = referenced_file_line
AND decl.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
sql = """
UPDATE function_refs SET refid = (
SELECT defid
FROM function_decldef AS decl
WHERE decl.file_id = referenced_file_id
AND decl.file_line = referenced_file_line
AND decl.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
sql = """
UPDATE variable_refs SET refid = (
SELECT defid
FROM variable_decldef AS decl
WHERE decl.file_id = referenced_file_id
AND decl.file_line = referenced_file_line
AND decl.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
# References to definitions
sql = """
UPDATE macro_refs SET refid = (
SELECT id
FROM macros AS def
WHERE def.file_id = referenced_file_id
AND def.file_line = referenced_file_line
AND def.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
sql = """
UPDATE type_refs SET refid = (
SELECT id
FROM types AS def
WHERE def.file_id = referenced_file_id
AND def.file_line = referenced_file_line
AND def.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
sql = """
UPDATE typedef_refs SET refid = (
SELECT id
FROM typedefs AS def
WHERE def.file_id = referenced_file_id
AND def.file_line = referenced_file_line
AND def.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
sql = """
UPDATE function_refs SET refid = (
SELECT id
FROM functions AS def
WHERE def.file_id = referenced_file_id
AND def.file_line = referenced_file_line
AND def.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
sql = """
UPDATE variable_refs SET refid = (
SELECT id
FROM variables AS def
WHERE def.file_id = referenced_file_id
AND def.file_line = referenced_file_line
AND def.file_col = referenced_file_col
) WHERE refid IS NULL"""
conn.execute(sql)
|
|
from sympy import (pi, sin, cos, Symbol, Integral, Sum, sqrt, log, exp, Ne,
oo, LambertW, I, meijerg, exp_polar, Max, Piecewise, And,
real_root)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show, plot_contour, PlotGrid
from sympy.utilities import lambdify as lambdify_
from sympy.utilities.pytest import skip, raises, warns
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from tempfile import NamedTemporaryFile
import os
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
for file in cls.tmp_files:
try:
os.remove(file)
except OSError:
# If the file doesn't exist, for instance, if the test failed.
pass
def plot_and_save_1(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p._backend.close()
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p._backend.close()
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p._backend.close()
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p._backend.close()
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
p._backend.close()
raises(ValueError, lambda: plot(x, y))
#Piecewise plots
p = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1))
p.save(tmp_file('%s_plot_piecewise' % name))
p._backend.close()
p = plot(Piecewise((x, x < 1), (x**2, True)), (x, -3, 3))
p.save(tmp_file('%s_plot_piecewise_2' % name))
p._backend.close()
# test issue 7471
p1 = plot(x)
p2 = plot(3)
p1.extend(p2)
p.save(tmp_file('%s_horizontal_line' % name))
p._backend.close()
# test issue 10925
f = Piecewise((-1, x < -1), (x, And(-1 <= x, x < 0)), \
(x**2, And(0 <= x, x < 1)), (x**3, x >= 1))
p = plot(f, (x, -3, 3))
p.save(tmp_file('%s_plot_piecewise_3' % name))
p._backend.close()
def plot_and_save_2(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
p._backend.close()
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
p._backend.close()
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
p._backend.close()
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
p._backend.close()
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
p._backend.close()
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p._backend.close()
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
p._backend.close()
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
p._backend.close()
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
p._backend.close()
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
p._backend.close()
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
# Single Contour plot.
p = plot_contour(sin(x)*sin(y), (x, -5, 5), (y, -5, 5))
p.save(tmp_file('%s_contour_plot' % name))
p._backend.close()
# Multiple Contour plots with same range.
p = plot_contour(x**2 + y**2, x**3 + y**3, (x, -5, 5), (y, -5, 5))
p.save(tmp_file('%s_contour_plot' % name))
p._backend.close()
# Multiple Contour plots with different range.
p = plot_contour((x**2 + y**2, (x, -5, 5), (y, -5, 5)), (x**3 + y**3, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_contour_plot' % name))
p._backend.close()
def plot_and_save_3(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p._backend.close()
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambdify_(x, sin(4*x))
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p._backend.close()
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambdify_((x, y, z), sqrt((x - 3*pi)**2 + y**2))
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p._backend.close()
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambdify_((x, y, z), sqrt(x**2 + y**2 + z**2))
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
p._backend.close()
def plot_and_save_4(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
###
# Examples from the 'advanced' notebook
###
# XXX: This raises the warning "The evaluation of the expression is
# problematic. We are trying a failback method that may still work. Please
# report this as a bug." It has to use the fallback because using evalf()
# is the only way to evaluate the integral. We should perhaps just remove
# that warning.
with warns(UserWarning, match="The evaluation of the expression is problematic"):
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
p._backend.close()
def plot_and_save_5(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
s = Sum(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p._backend.close()
p = plot(Sum(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
p._backend.close()
def plot_and_save_6(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def plotgrid_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
p1 = plot(x)
p2 = plot_parametric((sin(x), cos(x)), (x, sin(x)), show=False)
p3 = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500, show=False)
p4 = plot3d_parametric_line(sin(x), cos(x), x, show=False)
# symmetric grid
p = PlotGrid(2, 2, p1, p2, p3, p4)
p.save(tmp_file('%s_grid1' % name))
p._backend.close()
# grid size greater than the number of subplots
p = PlotGrid(3, 4, p1, p2, p3, p4)
p.save(tmp_file('%s_grid2' % name))
p._backend.close()
p5 = plot(cos(x),(x, -pi, pi), show=False)
p5[0].line_color = lambda a: a
p6 = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1), show=False)
p7 = plot_contour((x**2 + y**2, (x, -5, 5), (y, -5, 5)), (x**3 + y**3, (x, -3, 3), (y, -3, 3)), show=False)
# unsymmetric grid (subplots in one line)
p = PlotGrid(1, 3, p5, p6, p7)
p.save(tmp_file('%s_grid3' % name))
p._backend.close()
def test_matplotlib_1():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save_1('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
def test_matplotlib_2():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save_2('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
def test_matplotlib_3():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save_3('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
def test_matplotlib_4():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save_4('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
def test_matplotlib_5():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save_5('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
def test_matplotlib_6():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save_6('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
def test_matplotlib_7():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plotgrid_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
def test_append_issue_7140():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
def test_issue_15265():
from sympy.core.sympify import sympify
from sympy.core.singleton import S
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
x = Symbol('x')
eqn = sin(x)
p = plot(eqn, xlim=(-S.Pi, S.Pi), ylim=(-1, 1))
p._backend.close()
p = plot(eqn, xlim=(-1, 1), ylim=(-S.Pi, S.Pi))
p._backend.close()
p = plot(eqn, xlim=(-1, 1), ylim=(sympify('-3.14'), sympify('3.14')))
p._backend.close()
p = plot(eqn, xlim=(sympify('-3.14'), sympify('3.14')), ylim=(-1, 1))
p._backend.close()
raises(ValueError,
lambda: plot(eqn, xlim=(-S.ImaginaryUnit, 1), ylim=(-1, 1)))
raises(ValueError,
lambda: plot(eqn, xlim=(-1, 1), ylim=(-1, S.ImaginaryUnit)))
raises(ValueError,
lambda: plot(eqn, xlim=(S.NegativeInfinity, 1), ylim=(-1, 1)))
raises(ValueError,
lambda: plot(eqn, xlim=(-1, 1), ylim=(-1, S.Infinity)))
def test_empty_Plot():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
from sympy.plotting.plot import Plot
p = Plot()
# No exception showing an empty plot
p.show()
def test_empty_plot():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
# No exception showing an empty plot
plot()
def test_issue_17405():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
x = Symbol('x')
f = x**0.3 - 10*x**3 + x**2
p = plot(f, (x, -10, 10), show=False)
# Random number of segments, probably more than 100, but we want to see
# that there are segments generated, as opposed to when the bug was present
assert len(p[0].get_segments()) >= 30
def test_logplot_PR_16796():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
x = Symbol('x')
p = plot(x, (x, .001, 100), xscale='log', show=False)
# Random number of segments, probably more than 100, but we want to see
# that there are segments generated, as opposed to when the bug was present
assert len(p[0].get_segments()) >= 30
assert p[0].end == 100.0
assert p[0].start == .001
def test_issue_16572():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
x = Symbol('x')
p = plot(LambertW(x), show=False)
# Random number of segments, probably more than 50, but we want to see
# that there are segments generated, as opposed to when the bug was present
assert len(p[0].get_segments()) >= 30
def test_issue_11865():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
k = Symbol('k', integer=True)
f = Piecewise((-I*exp(I*pi*k)/k + I*exp(-I*pi*k)/k, Ne(k, 0)), (2*pi, True))
p = plot(f, show=False)
# Random number of segments, probably more than 100, but we want to see
# that there are segments generated, as opposed to when the bug was present
# and that there are no exceptions.
assert len(p[0].get_segments()) >= 30
def test_issue_11461():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if not matplotlib:
skip("Matplotlib not the default backend")
x = Symbol('x')
p = plot(real_root((log(x/(x-2))), 3), show=False)
# Random number of segments, probably more than 100, but we want to see
# that there are segments generated, as opposed to when the bug was present
# and that there are no exceptions.
assert len(p[0].get_segments()) >= 30
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutronclient.common import exceptions as q_exceptions
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import short_id
from heat.common import template_format
from heat.engine.clients.os import nova
from heat.engine import node_data
from heat.engine import resource
from heat.engine.resources.aws.ec2 import eip
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import stk_defn
from heat.engine import template as tmpl
from heat.tests import common
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
eip_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "EIP Test",
"Parameters" : {},
"Resources" : {
"IPAddress" : {
"Type" : "AWS::EC2::EIP",
"Properties" : {
"InstanceId" : { "Ref" : "WebServer" }
}
},
"WebServer": {
"Type": "AWS::EC2::Instance",
}
}
}
'''
eip_template_ipassoc = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "EIP Test",
"Parameters" : {},
"Resources" : {
"IPAddress" : {
"Type" : "AWS::EC2::EIP"
},
"IPAssoc" : {
"Type" : "AWS::EC2::EIPAssociation",
"Properties" : {
"InstanceId" : { "Ref" : "WebServer" },
"EIP" : { "Ref" : "IPAddress" }
}
},
"WebServer": {
"Type": "AWS::EC2::Instance",
}
}
}
'''
eip_template_ipassoc2 = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "EIP Test",
"Parameters" : {},
"Resources" : {
"the_eip" : {
"Type" : "AWS::EC2::EIP",
"Properties" : {
"Domain": "vpc"
}
},
"IPAssoc" : {
"Type" : "AWS::EC2::EIPAssociation",
"Properties" : {
"AllocationId" : 'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
"NetworkInterfaceId" : { "Ref" : "the_nic" }
}
},
"the_vpc" : {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : "10.0.0.0/16"
}
},
"the_subnet" : {
"Type" : "AWS::EC2::Subnet",
"Properties" : {
"CidrBlock" : "10.0.0.0/24",
"VpcId" : { "Ref" : "the_vpc" }
}
},
"the_nic" : {
"Type" : "AWS::EC2::NetworkInterface",
"Properties" : {
"PrivateIpAddress": "10.0.0.100",
"SubnetId": { "Ref": "the_subnet" }
}
},
}
}
'''
eip_template_ipassoc3 = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "EIP Test",
"Parameters" : {},
"Resources" : {
"the_eip" : {
"Type" : "AWS::EC2::EIP",
"Properties" : {
"Domain": "vpc"
}
},
"IPAssoc" : {
"Type" : "AWS::EC2::EIPAssociation",
"Properties" : {
"AllocationId" : 'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
"InstanceId" : '1fafbe59-2332-4f5f-bfa4-517b4d6c1b65'
}
}
}
}
'''
ipassoc_template_validate = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "EIP Test",
"Parameters" : {},
"Resources" : {
"eip" : {
"Type" : "AWS::EC2::EIP",
"Properties" : {
"Domain": "vpc"
}
},
"IPAssoc" : {
"Type" : "AWS::EC2::EIPAssociation",
"Properties" : {
"EIP" : {'Ref': 'eip'},
"InstanceId" : '1fafbe59-2332-4f5f-bfa4-517b4d6c1b65'
}
}
}
}
'''
class EIPTest(common.HeatTestCase):
def setUp(self):
# force Nova, will test Neutron below
super(EIPTest, self).setUp()
self.fc = fakes_nova.FakeClient()
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
self.mock_list_net = self.patchobject(neutronclient.Client,
'list_networks')
self.mock_create_fip = self.patchobject(neutronclient.Client,
'create_floatingip')
self.mock_show_fip = self.patchobject(neutronclient.Client,
'show_floatingip')
self.patchobject(neutronclient.Client, 'update_floatingip')
self.patchobject(neutronclient.Client, 'delete_floatingip')
self.mock_list_fips = self.patchobject(neutronclient.Client,
'list_floatingips')
def mock_interface(self, port, ip):
class MockIface(object):
def __init__(self, port_id, fixed_ip):
self.port_id = port_id
self.fixed_ips = [{'ip_address': fixed_ip}]
return MockIface(port, ip)
def mock_list_floatingips(self):
self.mock_list_fips.return_value = {
'floatingips': [{'id':
"fc68ea2c-b60b-4b4f-bd82-94ec81110766"}]}
def mock_create_floatingip(self):
self.mock_list_net.return_value = {'networks': [{
'status': 'ACTIVE',
'subnets': [],
'name': 'nova',
'router:external': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'admin_state_up': True,
'shared': True,
'id': 'eeee'
}]}
self.mock_create_fip.return_value = {'floatingip': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"floating_ip_address": "11.0.0.1"
}}
def mock_show_floatingip(self):
self.mock_show_fip.return_value = {'floatingip': {
'router_id': None,
'tenant_id': 'e936e6cd3e0b48dcb9ff853a8f253257',
'floating_network_id': 'eeee',
'fixed_ip_address': None,
'floating_ip_address': '11.0.0.1',
'port_id': None,
'id': 'ffff'
}}
def create_eip(self, t, stack, resource_name):
resource_defns = stack.t.resource_definitions(stack)
rsrc = eip.ElasticIp(resource_name,
resource_defns[resource_name],
stack)
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
stk_defn.update_resource_data(stack.defn, resource_name,
rsrc.node_data())
return rsrc
def create_association(self, t, stack, resource_name):
resource_defns = stack.t.resource_definitions(stack)
rsrc = eip.ElasticIpAssociation(resource_name,
resource_defns[resource_name],
stack)
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
stk_defn.update_resource_data(stack.defn, resource_name,
rsrc.node_data())
return rsrc
def test_eip(self):
mock_server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get',
return_value=mock_server)
self.mock_create_floatingip()
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(mock_server, 'interface_list', return_value=[iface])
t = template_format.parse(eip_template)
stack = utils.parse_stack(t)
rsrc = self.create_eip(t, stack, 'IPAddress')
try:
self.assertEqual('11.0.0.1', rsrc.FnGetRefId())
rsrc.refid = None
self.assertEqual('11.0.0.1', rsrc.FnGetRefId())
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766',
rsrc.FnGetAtt('AllocationId'))
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
finally:
scheduler.TaskRunner(rsrc.destroy)()
def test_eip_update(self):
server_old = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get',
return_value=server_old)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(server_old, 'interface_list', return_value=[iface])
self.mock_create_floatingip()
t = template_format.parse(eip_template)
stack = utils.parse_stack(t)
rsrc = self.create_eip(t, stack, 'IPAddress')
self.assertEqual('11.0.0.1', rsrc.FnGetRefId())
# update with the new InstanceId
server_update = self.fc.servers.list()[1]
self.patchobject(self.fc.servers, 'get',
return_value=server_update)
self.patchobject(server_update, 'interface_list', return_value=[iface])
props = copy.deepcopy(rsrc.properties.data)
update_server_id = '5678'
props['InstanceId'] = update_server_id
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('11.0.0.1', rsrc.FnGetRefId())
# update without InstanceId
props = copy.deepcopy(rsrc.properties.data)
props.pop('InstanceId')
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_association_eip(self):
mock_server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get',
return_value=mock_server)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(mock_server, 'interface_list', return_value=[iface])
self.mock_create_floatingip()
self.mock_show_floatingip()
self.mock_list_floatingips()
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
rsrc = self.create_eip(t, stack, 'IPAddress')
association = self.create_association(t, stack, 'IPAssoc')
try:
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual((association.CREATE, association.COMPLETE),
association.state)
self.assertEqual(utils.PhysName(stack.name, association.name),
association.FnGetRefId())
self.assertEqual('11.0.0.1', association.properties['EIP'])
finally:
scheduler.TaskRunner(association.delete)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.assertEqual((association.DELETE, association.COMPLETE),
association.state)
def test_eip_with_exception(self):
self.mock_list_net.return_value = {'networks': [{
'status': 'ACTIVE',
'subnets': [],
'name': 'nova',
'router:external': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'admin_state_up': True,
'shared': True,
'id': 'eeee'
}]}
self.patchobject(neutronclient.Client, 'create_floatingip',
side_effect=neutronclient.exceptions.NotFound)
t = template_format.parse(eip_template)
stack = utils.parse_stack(t)
resource_name = 'IPAddress'
resource_defns = stack.t.resource_definitions(stack)
rsrc = eip.ElasticIp(resource_name,
resource_defns[resource_name],
stack)
self.assertRaises(neutronclient.exceptions.NotFound,
rsrc.handle_create)
@mock.patch.object(eip.ElasticIp, '_ipaddress')
def test_FnGetRefId_resource_name(self, mock_ipaddr):
t = template_format.parse(ipassoc_template_validate)
stack = utils.parse_stack(t)
rsrc = stack['eip']
mock_ipaddr.return_value = None
self.assertEqual('eip', rsrc.FnGetRefId())
@mock.patch.object(eip.ElasticIp, '_ipaddress')
def test_FnGetRefId_resource_ip(self, mock_ipaddr):
t = template_format.parse(ipassoc_template_validate)
stack = utils.parse_stack(t)
rsrc = stack['eip']
mock_ipaddr.return_value = 'x.x.x.x'
self.assertEqual('x.x.x.x', rsrc.FnGetRefId())
def test_FnGetRefId_convergence_cache_data(self):
t = template_format.parse(ipassoc_template_validate)
template = tmpl.Template(t)
stack = parser.Stack(utils.dummy_context(), 'test', template,
cache_data={
'eip': node_data.NodeData.from_dict({
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': '1.1.1.1'})})
rsrc = stack.defn['eip']
self.assertEqual('1.1.1.1', rsrc.FnGetRefId())
class AllocTest(common.HeatTestCase):
def setUp(self):
super(AllocTest, self).setUp()
self.fc = fakes_nova.FakeClient()
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
self.mock_list_net = self.patchobject(neutronclient.Client,
'list_networks')
self.mock_create_fip = self.patchobject(neutronclient.Client,
'create_floatingip')
self.mock_show_fip = self.patchobject(neutronclient.Client,
'show_floatingip')
self.patchobject(neutronclient.Client, 'update_floatingip')
self.patchobject(neutronclient.Client, 'delete_floatingip')
self.mock_list_fips = self.patchobject(neutronclient.Client,
'list_floatingips')
self.patchobject(neutronclient.Client, 'add_gateway_router')
self.mock_list_ports = self.patchobject(neutronclient.Client,
'list_ports')
self.mock_show_net = self.patchobject(neutronclient.Client,
'show_network')
self.mock_list_routers = self.patchobject(neutronclient.Client,
'list_routers')
self.patchobject(neutronclient.Client,
'remove_gateway_router')
def mock_interface(self, port, ip):
class MockIface(object):
def __init__(self, port_id, fixed_ip):
self.port_id = port_id
self.fixed_ips = [{'ip_address': fixed_ip}]
return MockIface(port, ip)
def _setup_test_stack_validate(self, stack_name):
t = template_format.parse(ipassoc_template_validate)
template = tmpl.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name,
template, stack_id='12233',
stack_user_project_id='8888')
stack.validate()
return template, stack
def _validate_properties(self, stack, template, expected):
resource_defns = template.resource_definitions(stack)
rsrc = eip.ElasticIpAssociation('validate_eip_ass',
resource_defns['IPAssoc'],
stack)
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertIn(expected, six.text_type(exc))
def mock_show_network(self):
vpc_name = utils.PhysName('test_stack', 'the_vpc')
self.mock_show_net.return_value = {"network": {
"status": "BUILD",
"subnets": [],
"name": vpc_name,
"admin_state_up": False,
"shared": False,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"id": "22c26451-cf27-4d48-9031-51f5e397b84e"
}}
def create_eip(self, t, stack, resource_name):
rsrc = eip.ElasticIp(resource_name,
stack.defn.resource_definition(resource_name),
stack)
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
stk_defn.update_resource_data(stack.defn, resource_name,
rsrc.node_data())
return rsrc
def create_association(self, t, stack, resource_name):
resource_defn = stack.defn.resource_definition(resource_name)
rsrc = eip.ElasticIpAssociation(resource_name,
resource_defn,
stack)
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
stk_defn.update_resource_data(stack.defn, resource_name,
rsrc.node_data())
return rsrc
def mock_create_floatingip(self):
self.mock_list_net.return_value = {'networks': [{
'status': 'ACTIVE',
'subnets': [],
'name': 'nova',
'router:external': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'admin_state_up': True,
'shared': True,
'id': 'eeee'
}]}
self.mock_create_fip.return_value = {'floatingip': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"floating_ip_address": "11.0.0.1"
}}
def mock_list_floatingips(self):
self.mock_list_fips.return_value = {
'floatingips': [{'id':
"fc68ea2c-b60b-4b4f-bd82-94ec81110766"}]}
def mock_show_floatingip(self):
self.mock_show_fip.return_value = {'floatingip': {
'router_id': None,
'tenant_id': 'e936e6cd3e0b48dcb9ff853a8f253257',
'floating_network_id': 'eeee',
'fixed_ip_address': None,
'floating_ip_address': '11.0.0.1',
'port_id': None,
'id': 'ffff'
}}
def mock_list_ports(self):
self.mock_list_ports.return_value = {"ports": [{
"status": "DOWN",
"binding:host_id": "null",
"name": "wp-NIC-yu7fc7l4g5p6",
"admin_state_up": True,
"network_id": "22c26451-cf27-4d48-9031-51f5e397b84e",
"tenant_id": "ecf538ec1729478fa1f97f1bf4fdcf7b",
"binding:vif_type": "ovs",
"device_owner": "",
"binding:capabilities": {"port_filter": True},
"mac_address": "fa:16:3e:62:2d:4f",
"fixed_ips": [{"subnet_id": "mysubnetid-70ec",
"ip_address": "192.168.9.2"}],
"id": "a000228d-b40b-4124-8394-a4082ae1b76b",
"security_groups": ["5c6f529d-3186-4c36-84c0-af28b8daac7b"],
"device_id": ""
}]}
def mock_list_instance_ports(self):
self.mock_list_ports.return_value = {"ports": [{
"status": "DOWN",
"binding:host_id": "null",
"name": "wp-NIC-yu7fc7l4g5p6",
"admin_state_up": True,
"network_id": "22c26451-cf27-4d48-9031-51f5e397b84e",
"tenant_id": "ecf538ec1729478fa1f97f1bf4fdcf7b",
"binding:vif_type": "ovs",
"device_owner": "",
"binding:capabilities": {"port_filter": True},
"mac_address": "fa:16:3e:62:2d:4f",
"fixed_ips": [{"subnet_id": "mysubnetid-70ec",
"ip_address": "192.168.9.2"}],
"id": "a000228d-b40b-4124-8394-a4082ae1b76c",
"security_groups": ["5c6f529d-3186-4c36-84c0-af28b8daac7b"],
"device_id": ""
}]}
def mock_router_for_vpc(self):
vpc_name = utils.PhysName('test_stack', 'the_vpc')
self.mock_list_routers.return_value = {
"routers": [{
"status": "ACTIVE",
"external_gateway_info": {
"network_id": "zzzz",
"enable_snat": True},
"name": vpc_name,
"admin_state_up": True,
"tenant_id": "3e21026f2dc94372b105808c0e721661",
"routes": [],
"id": "bbbb"
}]
}
def mock_no_router_for_vpc(self):
self.mock_list_routers.return_value = {
"routers": []
}
def test_association_allocationid(self):
self.mock_create_floatingip()
self.mock_router_for_vpc()
self.mock_show_network()
self.mock_list_ports()
self.mock_show_floatingip()
t = template_format.parse(eip_template_ipassoc2)
stack = utils.parse_stack(t)
rsrc = self.create_eip(t, stack, 'the_eip')
association = self.create_association(t, stack, 'IPAssoc')
scheduler.TaskRunner(association.delete)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((association.DELETE, association.COMPLETE),
association.state)
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
def test_association_allocationid_with_instance(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
self.mock_show_network()
self.mock_create_floatingip()
self.mock_list_instance_ports()
self.mock_no_router_for_vpc()
t = template_format.parse(eip_template_ipassoc3)
stack = utils.parse_stack(t)
rsrc = self.create_eip(t, stack, 'the_eip')
association = self.create_association(t, stack, 'IPAssoc')
scheduler.TaskRunner(association.delete)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((association.DELETE, association.COMPLETE),
association.state)
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
def test_validate_properties_EIP_and_AllocationId(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
template, stack = self._setup_test_stack_validate(
stack_name='validate_EIP_AllocationId')
properties = template.t['Resources']['IPAssoc']['Properties']
# test with EIP and AllocationId
properties['AllocationId'] = 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
expected = ("Either 'EIP' or 'AllocationId' must be provided.")
self._validate_properties(stack, template, expected)
# test without EIP and AllocationId
properties.pop('AllocationId')
properties.pop('EIP')
self._validate_properties(stack, template, expected)
def test_validate_EIP_and_InstanceId(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
template, stack = self._setup_test_stack_validate(
stack_name='validate_EIP_InstanceId')
properties = template.t['Resources']['IPAssoc']['Properties']
# test with EIP and no InstanceId
properties.pop('InstanceId')
expected = ("Must specify 'InstanceId' if you specify 'EIP'.")
self._validate_properties(stack, template, expected)
def test_validate_without_NetworkInterfaceId_and_InstanceId(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
template, stack = self._setup_test_stack_validate(
stack_name='validate_EIP_InstanceId')
properties = template.t['Resources']['IPAssoc']['Properties']
# test without NetworkInterfaceId and InstanceId
properties.pop('InstanceId')
properties.pop('EIP')
allocation_id = '1fafbe59-2332-4f5f-bfa4-517b4d6c1b65'
properties['AllocationId'] = allocation_id
resource_defns = template.resource_definitions(stack)
rsrc = eip.ElasticIpAssociation('validate_eip_ass',
resource_defns['IPAssoc'],
stack)
exc = self.assertRaises(exception.PropertyUnspecifiedError,
rsrc.validate)
self.assertIn('At least one of the following properties '
'must be specified: InstanceId, NetworkInterfaceId',
six.text_type(exc))
def test_delete_association_successful_if_create_failed(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
self.mock_create_floatingip()
self.mock_show_floatingip()
self.patchobject(server, 'interface_list',
side_effect=[q_exceptions.NotFound('Not FOund')])
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
self.create_eip(t, stack, 'IPAddress')
resource_defns = stack.t.resource_definitions(stack)
rsrc = eip.ElasticIpAssociation('IPAssoc',
resource_defns['IPAssoc'],
stack)
self.assertIsNone(rsrc.validate())
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
def test_update_association_with_InstanceId(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(server, 'interface_list', return_value=[iface])
self.mock_create_floatingip()
self.mock_list_floatingips()
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
self.create_eip(t, stack, 'IPAddress')
ass = self.create_association(t, stack, 'IPAssoc')
self.assertEqual('11.0.0.1', ass.properties['EIP'])
server_update = self.fc.servers.list()[1]
self.patchobject(self.fc.servers, 'get', return_value=server_update)
self.patchobject(server_update, 'interface_list', return_value=[iface])
# update with the new InstanceId
props = copy.deepcopy(ass.properties.data)
update_server_id = '5678'
props['InstanceId'] = update_server_id
update_snippet = rsrc_defn.ResourceDefinition(ass.name, ass.type(),
stack.t.parse(stack.defn,
props))
scheduler.TaskRunner(ass.update, update_snippet)()
self.assertEqual((ass.UPDATE, ass.COMPLETE), ass.state)
def test_update_association_with_EIP(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(server, 'interface_list', return_value=[iface])
self.mock_create_floatingip()
self.mock_list_floatingips()
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
self.create_eip(t, stack, 'IPAddress')
ass = self.create_association(t, stack, 'IPAssoc')
# update with the new EIP
props = copy.deepcopy(ass.properties.data)
update_eip = '11.0.0.2'
props['EIP'] = update_eip
update_snippet = rsrc_defn.ResourceDefinition(ass.name, ass.type(),
stack.t.parse(stack.defn,
props))
scheduler.TaskRunner(ass.update, update_snippet)()
self.assertEqual((ass.UPDATE, ass.COMPLETE), ass.state)
def test_update_association_with_AllocationId_or_EIP(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(server, 'interface_list', return_value=[iface])
self.mock_create_floatingip()
self.mock_list_floatingips()
self.mock_list_instance_ports()
self.mock_show_network()
self.mock_no_router_for_vpc()
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
self.create_eip(t, stack, 'IPAddress')
ass = self.create_association(t, stack, 'IPAssoc')
self.assertEqual('11.0.0.1', ass.properties['EIP'])
# change EIP to AllocationId
props = copy.deepcopy(ass.properties.data)
update_allocationId = 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
props['AllocationId'] = update_allocationId
props.pop('EIP')
update_snippet = rsrc_defn.ResourceDefinition(ass.name, ass.type(),
stack.t.parse(stack.defn,
props))
scheduler.TaskRunner(ass.update, update_snippet)()
self.assertEqual((ass.UPDATE, ass.COMPLETE), ass.state)
stk_defn.update_resource_data(stack.defn, ass.name, ass.node_data())
# change AllocationId to EIP
props = copy.deepcopy(ass.properties.data)
update_eip = '11.0.0.2'
props['EIP'] = update_eip
props.pop('AllocationId')
update_snippet = rsrc_defn.ResourceDefinition(ass.name, ass.type(),
stack.t.parse(stack.defn,
props))
scheduler.TaskRunner(ass.update, update_snippet)()
self.assertEqual((ass.UPDATE, ass.COMPLETE), ass.state)
def test_update_association_needs_update_InstanceId(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(server, 'interface_list', return_value=[iface])
self.mock_create_floatingip()
self.mock_list_floatingips()
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
self.create_eip(t, stack, 'IPAddress')
before_props = {'InstanceId': {'Ref': 'WebServer'},
'EIP': '11.0.0.1'}
after_props = {'InstanceId': {'Ref': 'WebServer2'},
'EIP': '11.0.0.1'}
before = self.create_association(t, stack, 'IPAssoc')
after = rsrc_defn.ResourceDefinition(before.name, before.type(),
after_props)
self.assertTrue(resource.UpdateReplace,
before._needs_update(after, before, after_props,
before_props, None))
def test_update_association_needs_update_InstanceId_EIP(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(server, 'interface_list', return_value=[iface])
self.mock_list_floatingips()
self.mock_create_floatingip()
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
self.create_eip(t, stack, 'IPAddress')
after_props = {'InstanceId': '5678',
'EIP': '11.0.0.2'}
before = self.create_association(t, stack, 'IPAssoc')
after = rsrc_defn.ResourceDefinition(before.name, before.type(),
after_props)
updater = scheduler.TaskRunner(before.update, after)
self.assertRaises(resource.UpdateReplace, updater)
def test_update_association_with_NetworkInterfaceId_or_InstanceId(self):
server = self.fc.servers.list()[0]
self.patchobject(self.fc.servers, 'get', return_value=server)
iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'1.2.3.4')
self.patchobject(server, 'interface_list', return_value=[iface])
self.mock_create_floatingip()
self.mock_list_ports()
self.mock_show_network()
self.mock_no_router_for_vpc()
t = template_format.parse(eip_template_ipassoc2)
stack = utils.parse_stack(t)
self.create_eip(t, stack, 'the_eip')
ass = self.create_association(t, stack, 'IPAssoc')
upd_server = self.fc.servers.list()[1]
self.patchobject(self.fc.servers, 'get', return_value=upd_server)
self.mock_list_instance_ports()
# update with the new NetworkInterfaceId
props = copy.deepcopy(ass.properties.data)
update_networkInterfaceId = 'a000228d-b40b-4124-8394-a4082ae1b76b'
props['NetworkInterfaceId'] = update_networkInterfaceId
update_snippet = rsrc_defn.ResourceDefinition(ass.name, ass.type(),
stack.t.parse(stack.defn,
props))
scheduler.TaskRunner(ass.update, update_snippet)()
self.assertEqual((ass.UPDATE, ass.COMPLETE), ass.state)
# update with the InstanceId
props = copy.deepcopy(ass.properties.data)
instance_id = '5678'
props.pop('NetworkInterfaceId')
props['InstanceId'] = instance_id
update_snippet = rsrc_defn.ResourceDefinition(ass.name, ass.type(),
stack.t.parse(stack.defn,
props))
scheduler.TaskRunner(ass.update, update_snippet)()
self.assertEqual((ass.UPDATE, ass.COMPLETE), ass.state)
def test_eip_allocation_refid_resource_name(self):
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
rsrc = stack['IPAssoc']
rsrc.id = '123'
rsrc.uuid = '9bfb9456-3fe8-41f4-b318-9dba18eeef74'
rsrc.action = 'CREATE'
expected = '%s-%s-%s' % (rsrc.stack.name,
rsrc.name,
short_id.get_id(rsrc.uuid))
self.assertEqual(expected, rsrc.FnGetRefId())
def test_eip_allocation_refid_resource_id(self):
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
rsrc = stack['IPAssoc']
rsrc.resource_id = 'phy-rsrc-id'
self.assertEqual('phy-rsrc-id', rsrc.FnGetRefId())
def test_eip_allocation_refid_convergence_cache_data(self):
t = template_format.parse(eip_template_ipassoc)
cache_data = {'IPAssoc': node_data.NodeData.from_dict({
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'convg_xyz'
})}
stack = utils.parse_stack(t, cache_data=cache_data)
rsrc = stack.defn['IPAssoc']
self.assertEqual('convg_xyz', rsrc.FnGetRefId())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import struct
import gzip
import itertools
from pkg_resources import resource_filename, resource_exists
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
_range = xrange
except NameError:
_range = range
import numpy as np
from PIL import Image
from sudokuextract.methods.map import _extraction_iterator_map
from sudokuextract.ml.features import extract_efd_features
from sudokuextract.imgproc.blob import blobify
from sudokuextract.utils import download_image
_url_to_mnist_train_data = "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"
_url_to_mnist_train_labels = "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"
def _toS32(bits):
return struct.unpack_from(">i", bits)[0]
def get_mnist_raw_data():
X, y = _mnist_raw_data(), _mnist_raw_labels()
for k in _range(len(X)):
X[k] = 255 - X[k]
return X, y
def _mnist_raw_data():
fname = resource_filename('sudokuextract.data', "train-images-idx3-ubyte.gz")
if resource_exists('sudokuextract.data', "train-images-idx3-ubyte.gz"):
f = gzip.open(fname, mode='rb')
data = f.read()
f.close()
else:
sio = StringIO(urlopen(_url_to_mnist_train_data).read())
sio.seek(0)
f = gzip.GzipFile(fileobj=sio, mode='rb')
data = f.read()
f.close()
try:
sio.seek(0)
with open(fname, 'wb') as f:
f.write(sio.read())
except:
pass
correct_magic_number = 2051
magic_number = _toS32(data[:4])
if magic_number != correct_magic_number:
raise ValueError("Error parsing images file. Read magic number {0} != {1}!".format(
magic_number, correct_magic_number))
n_images = _toS32(data[4:8])
n_rows = _toS32(data[8:12])
n_cols = _toS32(data[12:16])
images = np.fromstring(data[16:], 'uint8').reshape(n_images, n_rows*n_cols)
return [imrow.reshape(28, 28) for imrow in images]
def _mnist_raw_labels():
fname = resource_filename('sudokuextract.data', "train-labels-idx1-ubyte.gz")
if resource_exists('sudokuextract.data', "train-labels-idx1-ubyte.gz"):
f = gzip.open(fname, mode='rb')
data = f.read()
f.close()
else:
sio = StringIO(urlopen(_url_to_mnist_train_labels).read())
sio.seek(0)
f = gzip.GzipFile(fileobj=sio, mode='rb')
data = f.read()
f.close()
try:
sio.seek(0)
with open(fname, 'wb') as f:
f.write(sio.read())
except:
pass
correct_magic_number = 2049
magic_number = _toS32(data[:4])
if magic_number != correct_magic_number:
raise ValueError("Error parsing labels file. Read magic number {0} != {1}!".format(
magic_number, correct_magic_number))
n_labels = _toS32(data[4:8])
return np.fromstring(data[8:], 'uint8')
def get_sudokuextract_data():
return _sudokuextract_data(), _sudokuextract_labels()
def _sudokuextract_data():
fname = resource_filename('sudokuextract.data', "se-train-data.gz")
if resource_exists('sudokuextract.data', "se-train-data.gz"):
f = gzip.open(fname, mode='rb')
data = np.load(f)
f.close()
else:
raise IOError("SudokuExtract Training data file was not present!")
return data
def _sudokuextract_labels():
fname = resource_filename('sudokuextract.data', "se-train-labels.gz")
if resource_exists('sudokuextract.data', "se-train-labels.gz"):
f = gzip.open(fname, mode='rb')
data = np.load(f)
f.close()
else:
raise IOError("SudokuExtract Training labels file was not present!")
return data
def get_mnist_data():
return _mnist_data(), _mnist_labels()
def _mnist_data():
fname = resource_filename('sudokuextract.data', "mnist-train-data.gz")
if resource_exists('sudokuextract.data', "mnist-train-data.gz"):
f = gzip.open(fname, mode='rb')
data = np.load(f)
f.close()
else:
raise IOError("MNIST Training data file was not present!")
return data
def _mnist_labels():
fname = resource_filename('sudokuextract.data', "mnist-train-labels.gz")
if resource_exists('sudokuextract.data', "mnist-train-labels.gz"):
f = gzip.open(fname, mode='rb')
data = np.load(f)
f.close()
else:
raise IOError("MNIST Training labels file was not present!")
return data
def create_data_set_from_images(path_to_data_dir, force=False):
try:
import matplotlib.pyplot as plt
except ImportError:
print("This method requires matplotlib installed...")
return
images = []
labels = []
path_to_data_dir = os.path.abspath(os.path.expanduser(path_to_data_dir))
_, _, files = next(os.walk(path_to_data_dir))
for f in files:
file_name, file_ext = os.path.splitext(f)
if file_ext in ('.jpg', '.png', '.bmp') and "{0}.txt".format(file_name) in files:
# The current file is an image and it has a corresponding text file as reference.
# Use it as data.
print("Handling {0}...".format(f))
image = Image.open(os.path.join(path_to_data_dir, f))
with open(os.path.join(path_to_data_dir, "{0}.txt".format(file_name)), 'rt') as f:
parsed_img = f.read().strip().split('\n')
for sudoku, subimage in _extraction_iterator_map(np.array(image.convert('L'))):
if not force:
for k in range(len(sudoku)):
for kk in range(len(sudoku[k])):
ax = plt.subplot2grid((9, 9), (k, kk))
ax.imshow(sudoku[k][kk], plt.cm.gray)
ax.set_title(str(parsed_img[k][kk]))
ax.axis('off')
plt.show()
ok = raw_input("Is this OK (Y/n/a)? ")
if ok.lower() == 'a':
break
elif ok.lower() == 'n':
continue
else:
for k in range(len(sudoku)):
for kk in range(len(sudoku[k])):
images.append(sudoku[k][kk].copy())
labels.append(int(parsed_img[k][kk]))
break
else:
for k in range(len(sudoku)):
for kk in range(len(sudoku[k])):
images.append(sudoku[k][kk].copy())
labels.append(int(parsed_img[k][kk]))
break
for sudoku, subimage in _extraction_iterator_map(np.array(image.convert('L')), use_local_thresholding=True):
if not force:
for k in range(len(sudoku)):
for kk in range(len(sudoku[k])):
ax = plt.subplot2grid((9, 9), (k, kk))
ax.imshow(sudoku[k][kk], plt.cm.gray)
ax.set_title(str(parsed_img[k][kk]))
ax.axis('off')
plt.show()
ok = raw_input("Is this OK (Y/n/a)? ")
if ok.lower() == 'a':
break
elif ok.lower() == 'n':
continue
else:
for k in range(len(sudoku)):
for kk in range(len(sudoku[k])):
images.append(sudoku[k][kk].copy())
labels.append(int(parsed_img[k][kk]))
break
else:
for k in range(len(sudoku)):
for kk in range(len(sudoku[k])):
images.append(sudoku[k][kk].copy())
labels.append(int(parsed_img[k][kk]))
break
try:
os.makedirs(os.path.expanduser('~/sudokuextract'))
except:
pass
try:
for i, (img, lbl) in enumerate(zip(images, labels)):
img = Image.fromarray(img, 'L')
with open(os.path.expanduser('~/sudokuextract/{1}_{0:04d}.jpg'.format(i+1, lbl)), 'w') as f:
img.save(f)
except Exception as e:
print(e)
print("Pre-blobify: Label / N : {0}".format([(v, c) for v, c in zip(_range(10), np.bincount(labels))]))
y = np.array(labels, 'int8')
images, mask = blobify(images)
y = y[mask]
print("Post-blobify: Label / N : {0}".format([(v, c) for v, c in zip(_range(10), np.bincount(y))]))
print("Extract features...")
X = np.array([extract_efd_features(img) for img in images])
return images, labels, X, y
def create_mnist_dataset():
images, labels = get_mnist_raw_data()
mask = labels != 0
print("Pre-zero removal: Label / N : {0}".format([(v, c) for v, c in zip(_range(10), np.bincount(labels))]))
images = list(itertools.compress(images, mask))
labels = labels[mask]
images = images[3::20]
labels = labels[3::20]
print("Pre-blobify: Label / N : {0}".format([(v, c) for v, c in zip(_range(10), np.bincount(labels))]))
y = np.array(labels, 'int8')
images, mask = blobify(images)
y = y[mask]
print("Post-blobify: Label / N : {0}".format([(v, c) for v, c in zip(_range(10), np.bincount(y))]))
print("Extract features...")
X = np.array([extract_efd_features(img) for img in images])
try:
os.makedirs(os.path.expanduser('~/sudokuextract'))
except:
pass
try:
for i, (img, lbl) in enumerate(zip(images, labels)):
img = Image.fromarray(img, 'L')
with open(os.path.expanduser('~/sudokuextract/{1}_{0:04d}.jpg'.format(i + 1, lbl)), 'w') as f:
img.save(f)
except Exception as e:
print(e)
return images, labels, X, y
def save_training_data(X, y, data_source='se'):
_save_data('train', X, y, data_source)
def save_test_data(X, y, data_source='se'):
_save_data('test', X, y, data_source)
def _save_data(which, X, y, data_source):
if data_source.lower() == 'mnist':
data_source = 'mnist'
else:
data_source = 'se'
if X.shape[0] != len(y):
raise TypeError("Length of data samples ({0}) was not identical "
"to length of labels ({1})".format(X.shape[0], len(y)))
# Convert to numpy array.
if not isinstance(X, np.ndarray):
X = np.array(X)
if not isinstance(y, np.ndarray):
y = np.array(y)
# Write feature_data
fname = resource_filename('sudokuextract.data', "{0}-{1}-data.gz".format(data_source, which))
with gzip.GzipFile(fname, mode='wb') as f:
np.save(f, X)
# Write labels
fname = resource_filename('sudokuextract.data', "{0}-{1}-labels.gz".format(data_source, which))
with gzip.GzipFile(fname, mode='wb') as f:
np.save(f, y)
def fetch_all_xanadoku_images(folder_to_store_in, api_token):
import json
doc = json.loads(urlopen("https://xanadoku.herokuapp.com/getallsudokus/{0}".format(
api_token)).read().decode('utf8'))
for d in doc.get('sudokus'):
if not d.get('verified'):
continue
print("Saving {0}...".format(d.get('_id')))
img = download_image(d.get('raw_image_url'))
with open(os.path.join(os.path.abspath(os.path.expanduser(
folder_to_store_in)), d.get('_id') + '.jpg'), 'w') as f:
img.save(f)
with open(os.path.join(os.path.abspath(os.path.expanduser(
folder_to_store_in)), d.get('_id') + '.txt'), 'w') as f:
f.writelines([d.get('parsed_sudoku')[i:i+9] + '\n' for i in range(0, len(d.get('parsed_sudoku')), 9)])
|
|
INV_URL = "https://inventory.mozilla.org/en-US/"
import pdb
import re
INFO = 0
WARNING = 1
ERROR = 2
DEBUG = 3
BUILD = 4
def log(msg, level=0):
"""
0 - Info
1 - Warning
2 - Error
3 - Debug
4 - Build
"""
do_info = True
do_warning = True
do_error = True
do_debug = True
do_build = True
if do_info and level == 0:
print "[INFO] {0}\n".format(msg),
return
elif do_warning and level == 1:
print "[WARNING] {0}\n".format(msg),
return
elif do_error and level == 2:
print "[ERROR] {0}\n".format(msg),
return
elif do_debug and level == 3:
print "[DEBUG] {0}\n".format(msg),
return
elif do_build and level == 4:
print "[BUILD] {0}".format(msg),
return
def print_system(system):
return "{0} ({1}systems/edit/{2}/)".format(system, INV_URL, system.pk)
"""
>>> ip_to_domain_name('10.20.30.40')
'40.30.20.10.IN-ADDR.ARPA'
>>> ip_to_domain_name('10.20.30.40', lowercase=True)
'40.30.20.10.in-addr.arpa'
"""
def _ip_to_domain_name(ip, lowercase=False):
"""Convert an ip to dns zone form. The ip is assumed to be in valid dotted
decimal format."""
octets = ip.split('.')
name = '.IN-ADDR.ARPA.'
if lowercase:
name = name.lowercase
name = '.'.join(list(reversed(octets))) + name
return name
def dns2ip_form(dnsip):
dnsip = dnsip.upper()
dnsip = dnsip.replace('.IN-ADDR.ARPA.', '')
return '.'.join(list(reversed(dnsip.split('.'))))
def ensure_include(file_, file_type, include_file):
"""This function is magical. It will make sure that the 'include_file' has
an $INCLUDE statement that includes it. See :function:`_ensure_include` for
more info.
:param include_file: the file to be included
:type include_file: str
:param file_type: The type of DNS zone file. Either 'forward' or 'reverse'
:type file_type: str
:param file_: The file with the SOA in it.
:type file_: file
"""
fd = open(file_, 'r+')
try:
new_content = _ensure_include(fd, file_type, include_file)
fd.close()
fd = open(file_, 'w+')
fd.write(new_content)
except Exception, e:
raise Exception
finally:
fd.close()
def _ensure_include(text, file_type, include_file):
"""Read in a zone file and ensure that the string::
$INCLUDE <include_file>
exists somewhere in the file. If it does exist return None. If it doesn't
exist insert the statment above the _first_ A/PTR record found in the file.
:param text: the zone file.
:type text: A file-ish object (StringIO or actual file)
:param file_type: The type of DNS zone file. Either 'forward' or 'reverse'
:type file_type: str
:param include_file: the file to be included
:type include_file: str
"""
if _has_include(text, include_file):
text.seek(0)
return text.read()
text.seek(0) # Reset fp
done = False
return_text = ""
comment = "This include preserves $ORIGIN"
if file_type == 'forward':
matches = [re.compile("^\s*\S*\s*IN\s*A\s*.*"),
re.compile("^\s*\S*\s*IN\s*AAAA\s*.*")] # Match A and AAAA
else:
# Must be 'reverse'
matches = [re.compile("^\s*\S*\s*IN\s*PTR\s*.*")] # Match PTR
for raw_line in text.readlines():
if done == True:
return_text += raw_line
continue
line = raw_line.strip()
for regex in matches:
if regex.match(line):
log("Inventory include not found. Adding $INCLUDE "
"{0}".format(include_file), INFO)
return_text += "\n"
return_text += "$INCLUDE {0} ; {1}\n".format(include_file, comment)
return_text += "\n"
done = True
return_text += raw_line
return return_text
def _has_include(text, include_file=None):
"""Sanity check."""
is_file_include = re.compile("^\s*\$INCLUDE\s*([^;\s]*)\s*")
done = False
for raw_line in text.readlines():
file_include = is_file_include.match(raw_line)
if file_include:
include_str = file_include.groups(0)[0]
include_str = include_str.strip("'").strip('"')
if include_str == include_file:
log("Found existing include str: {0}".format(include_str), DEBUG)
return True
return False
def get_serial(file_):
"""
Retrieve the serial number of a zone.
:param file_: The file with the SOA in it.
:type file_: file
"""
with open(file_, 'r') as fd:
return _str_get_soa(fd)
def _str_get_serial(text):
"""Read in a zone file and find the serial number.
:param text: the zone file.
:type text: A file-ish object (StringIO or actual file descriptor)
:returns serial: The serial number
:serial: str
"""
# We already know it's in valid format.
isSOA = False
done = False
for raw_line in text.readlines():
if done:
break
line = raw_line.strip()
ll = LexLine(line)
if isSOA:
# If we made it here, this should be the serial.
serial = _lex_word(ll)
if serial.isdigit():
return serial
else:
return None
if not line or line[0] == '$' or line[0] == ';':
continue
# name ttl class rr name-server email-addr (sn ref ret ex min)
# 1 2 3 4 5 6 7 8 9 10 11
# Everything up through 6 needs to be on the same line.
_lex_word(ll) # name
_lex_ws(ll)
c = ll.pop()
if c.isdigit():
_lex_word(ll) # ttl
_lex_ws(ll)
else:
ll.unpop()
_lex_word(ll) # class
_lex_ws(ll)
rr = _lex_word(ll)
if rr.upper() != 'SOA':
continue # It's not an soa, keep going.
isSOA = True
_lex_ws(ll)
_lex_word(ll) # ns
_lex_ws(ll)
email = _lex_word(ll) # email
if email[-1:] == '(':
_lex_ws(ll)
else:
_lex_ws(ll)
next = ll.peek()
if next == '(':
ll.pop()
# We are into the numbers.
_lex_ws(ll)
serial = _lex_word(ll)
if not serial:
# The serial must be on the next line
continue
if serial.isdigit():
return serial
else:
return None
def _lex_word(ll):
word = ''
while True:
# Read in name
c = ll.pop()
if c is None:
if word:
return word
else:
return None
if re.match('\s', c):
ll.unpop()
break
else:
word = word + c
return word
def _lex_ws(ll):
while True:
# Read in name
c = ll.pop()
if c is None:
return
if re.match('\s', c):
continue
else:
ll.unpop()
break
return
class LexLine(object):
def __init__(self, line):
self.line = line
self.length = len(line)
self.pos = 0
def pop(self):
if self.pos == self.length:
return None
else:
c = self.line[self.pos]
self.pos += 1
return c
def unpop(self):
if self.pos > 0:
self.pos -= 1
def peek(self):
return self.line[self.pos]
|
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import array
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
run_module_suite, assert_raises, assert_allclose)
from scipy import signal
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('hanning', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('slepian', (2,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('tukey', (0.5,)),
]
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
class TestChebWin(object):
def test_cheb_odd_high_attenuation(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_odd = signal.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even_high_attenuation(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_even = signal.chebwin(54, at=-40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
def test_cheb_odd_low_attenuation(self):
cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
0.610151, 0.586405, 0.519052,
1.000000])
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_odd = signal.chebwin(7, at=-10)
assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_cheb_even_low_attenuation(self):
cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
0.541338, 0.541338, 0.51027,
0.451924, 1.000000])
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
cheb_even = signal.chebwin(8, at=-10)
assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
exponential_data = {
(4, None, 0.2, False): array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03]),
(4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,
0.0820849986238988, 0.00055308437014783]),
(4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,
0.60653065971263342, 0.22313016014842982]),
(4, 2, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03]),
(4, 2, 0.2, True): None,
(4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233]),
(4, 2, 1.0, True): None,
(5, None, 0.2, False): array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 0.2, True): array([4.53999297624848542e-05,
6.73794699908546700e-03, 1.00000000000000000e+00,
6.73794699908546700e-03, 4.53999297624848542e-05]),
(5, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03,
1.00000000000000000e+00, 6.73794699908546700e-03,
4.53999297624848542e-05]),
(5, 2, 0.2, True): None,
(5, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
0.36787944117144233, 0.1353352832366127]),
(5, 2, 1.0, True): None
}
def test_exponential():
for k, v in exponential_data.items():
if v is None:
assert_raises(ValueError, signal.exponential, *k)
else:
win = signal.exponential(*k)
assert_allclose(win, v, rtol=1e-14)
tukey_data = {
(4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),
(4, 0.9, True): array([0.0, 0.84312081893436686, 0.84312081893436686, 0.0]),
(4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),
(4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),
(4, 0.9, False): array([0.0, 0.58682408883346526, 1.0, 0.58682408883346526]),
(4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),
(5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),
(5, 0.8, True): array([0.0, 0.69134171618254492, 1.0, 0.69134171618254492, 0.0]),
(5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),
}
def test_tukey():
# Test against hardcoded data
for k, v in tukey_data.items():
if v is None:
assert_raises(ValueError, signal.tukey, *k)
else:
win = signal.tukey(*k)
assert_allclose(win, v, rtol=1e-14)
# Test extremes of alpha correspond to boxcar and hann
tuk0 = signal.tukey(100,0)
tuk1 = signal.tukey(100,1)
box0 = signal.boxcar(100)
han1 = signal.hann(100)
assert_array_almost_equal(tuk0, box0)
assert_array_almost_equal(tuk1, han1)
class TestGetWindow(object):
def test_boxcar(self):
w = signal.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
w = signal.get_window(('chebwin', -40), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
w = signal.get_window(('chebwin', -40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_array_as_window(self):
# github issue 3603
osfactor = 128
sig = np.arange(128)
win = signal.get_window(('kaiser', 8.0), osfactor // 2)
assert_raises(ValueError, signal.resample, (sig, len(sig) * osfactor), {'window': win})
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(signal, window_name)
w1 = window(7, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1, w2)
# just check the below runs
window(6, *params, sym=True)
window(6, *params, sym=False)
if __name__ == "__main__":
run_module_suite()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, vArmour Networks Inc.
#
import mock
from oslo.config import cfg
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent.linux import interface
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.openstack.common import uuidutils
from neutron.services.firewall.agents.varmour import varmour_router
from neutron.services.firewall.agents.varmour import varmour_utils
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_DIRECTOR = '1.1.1.1'
class TestVarmourRouter(base.BaseTestCase):
def setUp(self):
super(TestVarmourRouter, self).setUp()
self.conf = cfg.ConfigOpts()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(varmour_router.vArmourL3NATAgent.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_root_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.root_helper = 'sudo'
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
def _create_router(self):
router = varmour_router.vArmourL3NATAgent(HOSTNAME, self.conf)
router.rest.server = FAKE_DIRECTOR
router.rest.user = 'varmour'
router.rest.passwd = 'varmour'
return router
def _del_all_internal_ports(self, router):
router[l3_constants.INTERFACE_KEY] = []
def _del_internal_ports(self, router, port_idx):
del router[l3_constants.INTERFACE_KEY][port_idx]
def _add_internal_ports(self, router, port_count=1):
self._del_all_internal_ports(router)
for i in range(port_count):
port = {'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '10.0.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '10.0.%s.0/24' % i,
'gateway_ip': '10.0.%s.1' % i}}
router[l3_constants.INTERFACE_KEY].append(port)
def _del_all_floating_ips(self, router):
router[l3_constants.FLOATINGIP_KEY] = []
def _del_floating_ips(self, router, port_idx):
del router[l3_constants.FLOATINGIP_KEY][port_idx]
def _add_floating_ips(self, router, port_count=1):
self._del_all_floating_ips(router)
for i in range(port_count):
fip = {'id': _uuid(),
'port_id': router['gw_port']['id'],
'floating_ip_address': '172.24.4.%s' % (100 + i),
'fixed_ip_address': '10.0.0.%s' % (100 + i)}
router[l3_constants.FLOATINGIP_KEY].append(fip)
def _prepare_router_data(self, enable_snat=None):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '172.24.4.2',
'subnet_id': _uuid()}],
'subnet': {'cidr': '172.24.4.0/24',
'gateway_ip': '172.24.4.1'},
'ip_cidr': '172.24.4.226/28'}
int_ports = []
router = {
'id': router_id,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port}
if enable_snat is not None:
router['enable_snat'] = enable_snat
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
return ri
def test_agent_add_internal_network(self):
router = self._create_router()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
router._router_added(ri.router['id'], ri.router)
url = varmour_utils.REST_URL_CONF_NAT_RULE
prefix = varmour_utils.get_snat_rule_name(ri)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
self._add_internal_ports(ri.router, port_count=1)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 1, 'prefix %s' % prefix)
router._router_removed(ri.router['id'])
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
def test_agent_remove_internal_network(self):
router = self._create_router()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
router._router_added(ri.router['id'], ri.router)
url = varmour_utils.REST_URL_CONF_NAT_RULE
prefix = varmour_utils.get_snat_rule_name(ri)
self._add_internal_ports(ri.router, port_count=2)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 2, 'prefix %s' % prefix)
self._del_internal_ports(ri.router, 0)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 1, 'prefix %s' % prefix)
self._del_all_internal_ports(ri.router)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
router._router_removed(ri.router['id'])
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
def test_agent_add_floating_ips(self):
router = self._create_router()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
router._router_added(ri.router['id'], ri.router)
url = varmour_utils.REST_URL_CONF_NAT_RULE
prefix = varmour_utils.get_dnat_rule_name(ri)
self._add_floating_ips(ri.router, port_count=1)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 1, 'prefix %s' % prefix)
self._add_floating_ips(ri.router, port_count=2)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 2, 'prefix %s' % prefix)
router._router_removed(ri.router['id'])
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
def test_agent_remove_floating_ips(self):
router = self._create_router()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
self._add_floating_ips(ri.router, port_count=2)
router._router_added(ri.router['id'], ri.router)
url = varmour_utils.REST_URL_CONF_NAT_RULE
prefix = varmour_utils.get_dnat_rule_name(ri)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 2, 'prefix %s' % prefix)
self._del_floating_ips(ri.router, 0)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 1, 'prefix %s' % prefix)
self._del_all_floating_ips(ri.router)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
router._router_removed(ri.router['id'])
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
def test_agent_external_gateway(self):
router = self._create_router()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
router._router_added(ri.router['id'], ri.router)
url = varmour_utils.REST_URL_CONF_ZONE
prefix = varmour_utils.get_untrusted_zone_name(ri)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 1, 'prefix %s' % prefix)
del ri.router['gw_port']
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 1, 'prefix %s' % prefix)
router._router_removed(ri.router['id'])
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
def test_agent_snat_enable(self):
router = self._create_router()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
router._router_added(ri.router['id'], ri.router)
url = varmour_utils.REST_URL_CONF_NAT_RULE
prefix = varmour_utils.get_snat_rule_name(ri)
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
ri.router['enable_snat'] = False
router.process_router(ri)
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
router._router_removed(ri.router['id'])
n = router.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0, 'prefix %s' % prefix)
|
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
def test_centered_axis_validation():
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
@td.skip_if_no_scipy
def test_cmov_mean():
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, center=True).mean()
expected_values = [
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
expected = Series(expected_values)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window():
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected_values = [
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
expected = Series(expected_values)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner():
# GH 8238
# all nan
vals = Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods():
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(win_types):
# GH 8238
vals = np.array(range(10), dtype=float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"p": 2.0, "sig": 2.0},
"exponential": {"tau": 10},
}
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"p": 2.0, "sig": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_min_min_periods():
a = Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
msg = "min_periods 5 must be <= window 3"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max_min_periods():
a = Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
msg = "min_periods 5 must be <= window 3"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
def test_rolling_quantile_np_percentile():
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(quantile, interpolation, data):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value():
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param():
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
msg = "quantile value -0.1 not in \\[0, 1\\]"
with pytest.raises(ValueError, match=msg):
ser.rolling(3).quantile(-0.1)
msg = "quantile value 10.0 not in \\[0, 1\\]"
with pytest.raises(ValueError, match=msg):
ser.rolling(3).quantile(10.0)
msg = "must be real number, not str"
with pytest.raises(TypeError, match=msg):
ser.rolling(3).quantile("foo")
def test_rolling_std_1obs():
vals = Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt():
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 10:45:10 2016
@author: Mathias Aschwanden (mathias.aschwanden@gmail.com)
"""
import unittest
from unittest import TestCase
import os
import math
import sys
import copy
import numpy as np
import datetime
from matplotlib import pyplot as plt
if not os.path.abspath(__file__ + "/../../../") in sys.path:
sys.path.append(os.path.abspath(__file__ + "/../../../"))
from boxsimu.entities import Fluid, Variable
from boxsimu.box import Box
from boxsimu.transport import Flow, Flux
from boxsimu.condition import Condition
from boxsimu.system import BoxModelSystem
from boxsimu.process import Process, Reaction
from boxsimu.solver import Solver
from boxsimu import utils
from boxsimu.simulations import boxmodelsystem2
from boxsimu import ur
class BoxModelSystem2Test(TestCase):
"""Test boxsimu framework using an intermediate complex box model."""
def setUp(self, *args, **kwargs):
self.system = boxmodelsystem2.get_system()
self.solver = Solver(self.system)
self.la = self.system.boxes.lake
self.uo = self.system.boxes.upper_ocean
self.do = self.system.boxes.deep_ocean
self.se = self.system.boxes.sediment
self.po4 = self.system.variables.po4
self.no3 = self.system.variables.no3
self.phyto = self.system.variables.phyto
def tearDown(self, *args, **kwargs):
del(self.system)
del(self.solver)
del(self.la)
del(self.uo)
del(self.do)
del(self.se)
del(self.po4)
del(self.no3)
del(self.phyto)
def assertPintQuantityAlmostEqual(self, q1, q2, rel_tol=1e-7):
q1 = q1.to_base_units()
q2 = q2.to_base_units()
try:
self.assertTrue(math.isclose(q1.magnitude, q2.magnitude,
rel_tol=rel_tol))
except AssertionError:
raise AssertionError(
'{} != {} with relative tolerance of {}'.format(
q1, q2, rel_tol
)
)
self.assertEqual(q1.units, q2.units)
#####################################################
# Box Functions
#####################################################
def test_mass(self):
self.assertEqual(self.la.mass, 1e16*ur.kg + 6*ur.kg)
self.assertEqual(self.uo.mass, 3e19*ur.kg + 15*ur.kg)
self.assertEqual(self.do.mass, 1e21*ur.kg + 24*ur.kg)
self.assertEqual(self.se.mass, 1e10*ur.kg + 33*ur.kg)
def test_volume(self):
la_context = self.system.get_box_context(self.la)
uo_context = self.system.get_box_context(self.uo)
do_context = self.system.get_box_context(self.do)
se_context = self.system.get_box_context(self.se)
self.assertEqual(self.la.get_volume(la_context), 1e16/1020 * ur.meter**3)
self.assertEqual(self.uo.get_volume(uo_context), 3e19/1020 * ur.meter**3)
self.assertEqual(self.do.get_volume(do_context), 1e21/1020 * ur.meter**3)
self.assertEqual(self.se.get_volume(se_context), 1e10/2720 * ur.meter**3)
def test_concentration(self):
pass
#####################################################
# Base Functions
#####################################################
def test_box_id(self):
self.assertEqual(self.la.id, 1)
self.assertEqual(self.uo.id, 3)
self.assertEqual(self.do.id, 0)
self.assertEqual(self.se.id, 2)
def test_variable_id(self):
self.assertEqual(self.no3.id, 0)
self.assertEqual(self.phyto.id, 1)
self.assertEqual(self.po4.id, 2)
def test_N_boxes(self):
self.assertEqual(self.system.N_boxes, 4)
def test_N_variables(self):
self.assertEqual(self.system.N_variables, 3)
def test_context_of_box(self):
global_context = self.system.get_box_context()
lake_context = self.system.get_box_context(self.la)
upper_ocean_context = self.system.get_box_context(self.uo)
deep_ocean_context = self.system.get_box_context(self.do)
sediment_context = self.system.get_box_context(self.se)
# Test accessability of the condition attributes
self.assertEqual(global_context.T, 288 * ur.kelvin)
self.assertEqual(global_context.pH, 7.3)
self.assertEqual(lake_context.T, 290 * ur.kelvin)
self.assertEqual(lake_context.pH, 7.0)
self.assertEqual(upper_ocean_context.T, 280 * ur.kelvin)
self.assertEqual(upper_ocean_context.pH, 8.3)
self.assertEqual(deep_ocean_context.T, 275 * ur.kelvin)
self.assertEqual(deep_ocean_context.pH, 8.1)
self.assertEqual(sediment_context.T, 275 * ur.kelvin)
self.assertEqual(sediment_context.pH, 7.7)
# Test the accessability of the condition attributes of other boxes:
self.assertEqual(global_context.upper_ocean.condition.T, 280 * ur.kelvin)
self.assertEqual(global_context.deep_ocean.condition.pH, 8.1)
self.assertEqual(lake_context.upper_ocean.condition.T, 280 * ur.kelvin)
self.assertEqual(lake_context.deep_ocean.condition.pH, 8.1)
self.assertEqual(upper_ocean_context.lake.condition.T, 290 * ur.kelvin)
self.assertEqual(upper_ocean_context.deep_ocean.condition.pH, 8.1)
self.assertEqual(deep_ocean_context.upper_ocean.condition.T, 280 * ur.kelvin)
self.assertEqual(deep_ocean_context.lake.condition.pH, 7.0)
self.assertEqual(sediment_context.upper_ocean.condition.T, 280 * ur.kelvin)
self.assertEqual(sediment_context.lake.condition.pH, 7.0)
def test_context_evaluation_lambda_func(self):
system_copy = copy.deepcopy(self.system)
global_context = system_copy.get_box_context()
lake_context = system_copy.get_box_context(self.la)
upper_ocean_context = system_copy.get_box_context(self.uo)
deep_ocean_context = system_copy.get_box_context(self.do)
sediment_context = system_copy.get_box_context(self.se)
lambda1 = lambda t, c: c.T / (100*ur.kelvin) + c.pH
self.assertEqual(lambda1(0*ur.second, global_context), 2.88+7.3)
self.assertEqual(lambda1(0*ur.second, lake_context), 2.90+7.0)
self.assertEqual(lambda1(0*ur.second, upper_ocean_context), 2.80+8.3)
self.assertEqual(lambda1(0*ur.second, deep_ocean_context), 2.75+8.1)
self.assertEqual(lambda1(0*ur.second, sediment_context), 2.75+7.7)
#####################################################
# Fluid and Variable Mass/Concentration Vectors/Matrices
#####################################################
def test_fluid_mass_1Dlist_1Darray(self):
m = self.system.get_fluid_mass_1Darray()
self.assertEqual(m[self.la.id], 1e16 * ur.kg)
self.assertEqual(m[self.uo.id], 3e19 * ur.kg)
self.assertEqual(m[self.do.id], 1e21 * ur.kg)
self.assertEqual(m[self.se.id], 1e10 * ur.kg)
def test_variable_mass_1Darray(self):
m = self.system.get_variable_mass_1Darray(self.po4)
self.assertEqual(m[self.la.id], 1 * ur.kg)
self.assertEqual(m[self.uo.id], 4 * ur.kg)
self.assertEqual(m[self.do.id], 7 * ur.kg)
self.assertEqual(m[self.se.id], 10 * ur.kg)
m = self.system.get_variable_mass_1Darray(self.no3)
self.assertEqual(m[self.la.id], 2 * ur.kg)
self.assertEqual(m[self.uo.id], 5 * ur.kg)
self.assertEqual(m[self.do.id], 8 * ur.kg)
self.assertEqual(m[self.se.id], 11 * ur.kg)
m = self.system.get_variable_mass_1Darray(self.phyto)
self.assertEqual(m[self.la.id], 3 * ur.kg)
self.assertEqual(m[self.uo.id], 6 * ur.kg)
self.assertEqual(m[self.do.id], 9 * ur.kg)
self.assertEqual(m[self.se.id], 12 * ur.kg)
def test_variable_concentration_1Darray(self):
def _c(var_mass, fluid_mass):
return var_mass / (fluid_mass + var_mass) * ur.dimensionless
c = self.system.get_variable_concentration_1Darray(self.po4)
self.assertAlmostEqual(c[self.la.id], _c(3, 1e16))
self.assertAlmostEqual(c[self.uo.id], _c(3.3123, 3e19))
self.assertAlmostEqual(c[self.do.id], _c(3.492, 1e21))
self.assertAlmostEqual(c[self.se.id], _c(2.3484, 1e10))
c = self.system.get_variable_concentration_1Darray(self.no3)
self.assertAlmostEqual(c[self.la.id], _c(1, 1e16))
self.assertAlmostEqual(c[self.uo.id], _c(0.237, 3e19))
self.assertAlmostEqual(c[self.do.id], _c(1.12437, 1e21))
self.assertAlmostEqual(c[self.se.id], _c(9.23, 1e10))
c = self.system.get_variable_concentration_1Darray(self.phyto)
self.assertAlmostEqual(c[self.la.id], _c(0.324, 1e16))
self.assertAlmostEqual(c[self.uo.id], _c(0.7429, 3e19))
self.assertAlmostEqual(c[self.do.id], _c(4.324, 1e21))
self.assertAlmostEqual(c[self.se.id], _c(2.824, 1e10))
#####################################################
# Mass Flow Vectors/Matrices
#####################################################
def test_fluid_mass_internal_flow_2Darray(self):
A = self.system.get_fluid_mass_internal_flow_2Darray(0*ur.second)
# Check that diagonal elements are zero
for i in range(self.system.N_boxes):
self.assertEqual(A[i,i], 0 * ur.kg / ur.year)
# Check that the other values are set correctly
# Deep Ocean id=0 ; Lake id=1 ; Sediment id=2 ; Upper Ocean id=3
self.assertEqual(A[self.do.id, self.la.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.do.id, self.se.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.do.id, self.uo.id], 6e17*ur.kg/ur.year)
self.assertEqual(A[self.la.id, self.do.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.la.id, self.se.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.la.id, self.uo.id], 2e15*ur.kg/ur.year)
self.assertEqual(A[self.se.id, self.do.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.se.id, self.la.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.se.id, self.uo.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.uo.id, self.do.id], 6e17*ur.kg/ur.year)
self.assertEqual(A[self.uo.id, self.la.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.uo.id, self.se.id], 0*ur.kg/ur.year)
def test_fluid_mass_flow_sink_1Darray(self):
s = self.system.get_fluid_mass_flow_sink_1Darray(0*ur.second)
self.assertEqual(s[self.la.id], 1e15*ur.kg/ur.year)
self.assertEqual(s[self.uo.id], 2e15*ur.kg/ur.year)
self.assertEqual(s[self.do.id], 1e11*ur.kg/ur.year)
self.assertEqual(s[self.se.id], 0*ur.kg/ur.year)
def test_fluid_mass_flow_source_1Darray(self):
q = self.system.get_fluid_mass_flow_source_1Darray(0*ur.second)
self.assertEqual(q[self.la.id], 3e15*ur.kg/ur.year)
self.assertEqual(q[self.uo.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.do.id], 1e11*ur.kg/ur.year)
self.assertEqual(q[self.se.id], 0*ur.kg/ur.year)
#####################################################
# Variable Sink/Source Vectors
#####################################################
def test_variable_internal_flow_2Darray(self):
f_flow = np.ones(self.system.N_boxes)
flow_la_uo = 2e15*ur.kg/ur.year
flow_uo_do = 6e17*ur.kg/ur.year
flow_do_uo = 6e17*ur.kg/ur.year
for var in [self.po4, self.no3, self.phyto]:
A = self.system.get_variable_internal_flow_2Darray(
var, 0*ur.second, f_flow)
# Check that diagonal elements are zero
for i in range(self.system.N_boxes):
self.assertEqual(A[i,i], 0*ur.kg/ur.year)
# Deep Ocean id=0 ; Lake id=1 ; Sediment id=2 ; Upper Ocean id=3
c = self.system.get_variable_concentration_1Darray(var)
self.assertEqual(A[self.do.id, self.la.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.do.id, self.se.id], 0*ur.kg/ur.year)
self.assertPintQuantityAlmostEqual(A[self.do.id, self.uo.id],
flow_do_uo * c[self.do.id])
self.assertEqual(A[self.la.id, self.do.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.la.id, self.se.id], 0*ur.kg/ur.year)
self.assertPintQuantityAlmostEqual(A[self.la.id, self.uo.id],
flow_la_uo * c[self.la.id])
self.assertEqual(A[self.se.id, self.do.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.se.id, self.la.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.se.id, self.uo.id], 0*ur.kg/ur.year)
self.assertPintQuantityAlmostEqual(A[self.uo.id, self.do.id],
flow_uo_do * c[self.uo.id])
self.assertEqual(A[self.uo.id, self.la.id], 0*ur.kg/ur.year)
self.assertEqual(A[self.uo.id, self.se.id], 0*ur.kg/ur.year)
def test_variable_flow_sink_1Darray(self):
f_flow = np.ones(self.system.N_boxes)
flow_do_none = 1e11*ur.kg/ur.year
for var in [self.po4, self.no3, self.phyto]:
s = self.system.get_variable_flow_sink_1Darray(var, 0*ur.second,
f_flow)
c = self.system.get_variable_concentration_1Darray(var)
# Lake Evaporation does not transport tracers!
self.assertEqual(s[self.la.id], 0*ur.kg/ur.year)
# Upper Ocean Evaporation does not transport tracers!
self.assertEqual(s[self.uo.id], 0*ur.kg/ur.year)
self.assertPintQuantityAlmostEqual(s[self.do.id],
flow_do_none * c[self.do.id])
self.assertEqual(s[self.se.id], 0*ur.kg/ur.year)
def test_variable_flow_source_1Darray(self):
la_input_concentration = {self.po4: 4.6455e-8*ur.kg/ur.kg,
self.no3: 7*4.6455e-8*ur.kg/ur.kg}
do_input_concentration = la_input_concentration
for var in [self.po4, self.no3, self.phyto]:
q = self.system.get_variable_flow_source_1Darray(var, 0*ur.second)
la_input_c = la_input_concentration.get(var, 0*ur.kg/ur.kg)
la_inflow = 3e15*ur.kg/ur.year
do_input_c = do_input_concentration.get(var, 0*ur.kg/ur.kg)
do_inflow = 1e11*ur.kg/ur.year
self.assertPintQuantityAlmostEqual(q[self.la.id],
la_input_c * la_inflow)
self.assertEqual(q[self.uo.id], 0*ur.kg/ur.year)
self.assertPintQuantityAlmostEqual(q[self.do.id],
do_input_c * do_inflow)
self.assertEqual(q[self.se.id], 0*ur.kg/ur.year)
def test_variable_process_sink_1Darray(self):
s = self.system.get_variable_process_sink_1Darray(
self.po4, 0*ur.second)
m = self.system.get_variable_mass_1Darray(self.po4)
self.assertEqual(s[self.la.id], 0*ur.kg/ur.year)
self.assertPintQuantityAlmostEqual(s[self.uo.id],
m[self.uo.id]*0.01/ur.year)
self.assertEqual(s[self.do.id], 0*ur.kg/ur.year)
self.assertEqual(s[self.se.id], 0*ur.kg/ur.year)
s = self.system.get_variable_process_sink_1Darray(
self.no3, 0*ur.second)
m = self.system.get_variable_mass_1Darray(self.no3)
self.assertEqual(s[self.la.id], 0*ur.kg/ur.year)
self.assertPintQuantityAlmostEqual(s[self.uo.id],
m[self.uo.id]*0.01/ur.year)
self.assertEqual(s[self.do.id], 0*ur.kg/ur.year)
self.assertEqual(s[self.se.id], 0*ur.kg/ur.year)
s = self.system.get_variable_process_sink_1Darray(
self.phyto, 0*ur.second)
m = self.system.get_variable_mass_1Darray(self.phyto)
self.assertEqual(s[self.la.id], 0*ur.kg/ur.year)
self.assertEqual(s[self.uo.id], 0*ur.kg/ur.year)
self.assertEqual(s[self.do.id], 0*ur.kg/ur.year)
self.assertEqual(s[self.se.id], 0*ur.kg/ur.year)
def test_variable_process_source_1Darray(self):
q = self.system.get_variable_process_source_1Darray(
self.po4, 0*ur.second)
self.assertEqual(q[self.la.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.uo.id], 12345 * ur.kg / ur.year)
self.assertEqual(q[self.do.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.se.id], 0*ur.kg/ur.year)
q = self.system.get_variable_process_source_1Darray(
self.no3, 0*ur.second)
self.assertEqual(q[self.la.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.uo.id], 123456 * ur.kg / ur.year)
self.assertEqual(q[self.do.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.se.id], 0*ur.kg/ur.year)
q = self.system.get_variable_process_source_1Darray(
self.phyto, 0*ur.second)
self.assertEqual(q[self.la.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.uo.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.do.id], 0*ur.kg/ur.year)
self.assertEqual(q[self.se.id], 0*ur.kg/ur.year)
def test_variable_internal_flux_2Darray(self):
A = self.system.get_variable_internal_flux_2Darray(
self.po4, 0*ur.second)
for i in range(self.system.N_boxes):
self.assertEqual(A[i,i], 0 * ur.kg / ur.year)
self.assertEqual(A[self.do.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.do.id, self.se.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.do.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.se.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.se.id], 0 * ur.kg / ur.year)
A = self.system.get_variable_internal_flux_2Darray(
self.no3, 0*ur.second)
for i in range(self.system.N_boxes):
self.assertEqual(A[i,i], 0 * ur.kg / ur.year)
self.assertEqual(A[self.do.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.do.id, self.se.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.do.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.se.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.se.id], 0 * ur.kg / ur.year)
A = self.system.get_variable_internal_flux_2Darray(
self.phyto, 0*ur.second)
for i in range(self.system.N_boxes):
self.assertEqual(A[i,i], 0 * ur.kg / ur.year)
self.assertEqual(A[self.do.id, self.la.id], 0 * ur.kg / ur.year)
self.assertPintQuantityAlmostEqual(A[self.do.id, self.se.id],
self.uo.variables.phyto.mass * 0.01 / ur.year)
self.assertEqual(A[self.do.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.se.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.la.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.se.id, self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.do.id],
self.uo.variables.phyto.mass * 0.1 / ur.year)
self.assertEqual(A[self.uo.id, self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(A[self.uo.id, self.se.id], 0 * ur.kg / ur.year)
def test_variable_flux_sink_1Darray(self):
s = self.system.get_variable_flux_sink_1Darray(
self.po4, 0*ur.second)
self.assertEqual(s[self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.se.id], 0 * ur.kg / ur.year)
s = self.system.get_variable_flux_sink_1Darray(
self.no3, 0*ur.second)
self.assertEqual(s[self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.se.id], 0 * ur.kg / ur.year)
s = self.system.get_variable_flux_sink_1Darray(
self.phyto, 0*ur.second)
self.assertEqual(s[self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(s[self.se.id],
self.se.variables.phyto.mass * 0.1 / ur.year)
def test_variable_flux_source_1Darray(self):
q = self.system.get_variable_flux_source_1Darray(
self.po4, 0*ur.second)
self.assertEqual(q[self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(q[self.uo.id], 1e5 * ur.kg / ur.year)
self.assertEqual(q[self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(q[self.se.id], 0 * ur.kg / ur.year)
q = self.system.get_variable_flux_source_1Darray(
self.no3, 0*ur.second)
self.assertEqual(q[self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(q[self.uo.id], 2e4 * ur.kg / ur.year)
self.assertEqual(q[self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(q[self.se.id], 0 * ur.kg / ur.year)
q = self.system.get_variable_flux_source_1Darray(
self.phyto, 0*ur.second)
self.assertEqual(q[self.la.id], 0 * ur.kg / ur.year)
self.assertEqual(q[self.uo.id], 0 * ur.kg / ur.year)
self.assertEqual(q[self.do.id], 0 * ur.kg / ur.year)
self.assertEqual(q[self.se.id], 0 * ur.kg / ur.year)
def test_reaction_rate_cube(self):
C = self.system.get_reaction_rate_3Darray(
0*ur.second)
m_no3 = self.system.get_variable_mass_1Darray(self.no3)
m_phyto = self.system.get_variable_mass_1Darray(self.phyto)
rr_photosynthesis_la = 0.8 * m_no3[self.la.id] / (7.0 * ur.year)
rr_photosynthesis_uo = 0.8 * m_no3[self.uo.id] / (7.0 * ur.year)
rr_remineralization_la = 0.4 * m_phyto[self.la.id] / (114 * ur.year)
rr_remineralization_uo = 0.4 * m_phyto[self.uo.id] / (114 * ur.year)
rr_remineralization_do = 0.4 * m_phyto[self.do.id] / (114 * ur.year)
photo_id = 0
remin_id = 1
# Lake photosynthesis
self.assertPintQuantityAlmostEqual(C[self.la.id, self.po4.id, photo_id],
-rr_photosynthesis_la * 1)
self.assertPintQuantityAlmostEqual(C[self.la.id, self.no3.id, photo_id],
-rr_photosynthesis_la * 7)
self.assertPintQuantityAlmostEqual(C[self.la.id, self.phyto.id, photo_id],
rr_photosynthesis_la * 114)
# Upper Ocean photosynthesis
self.assertPintQuantityAlmostEqual(C[self.uo.id, self.po4.id, photo_id],
-rr_photosynthesis_uo * 1)
self.assertPintQuantityAlmostEqual(C[self.uo.id, self.no3.id, photo_id],
-rr_photosynthesis_uo * 7)
self.assertPintQuantityAlmostEqual(C[self.uo.id, self.phyto.id, photo_id],
rr_photosynthesis_uo * 114)
# Lake remineralization
self.assertPintQuantityAlmostEqual(C[self.la.id, self.po4.id, remin_id],
rr_remineralization_la * 1)
self.assertPintQuantityAlmostEqual(C[self.la.id, self.no3.id, remin_id],
rr_remineralization_la * 7)
self.assertPintQuantityAlmostEqual(C[self.la.id, self.phyto.id, remin_id],
-rr_remineralization_la * 114)
# Upper Ocean remineralization
self.assertPintQuantityAlmostEqual(C[self.uo.id, self.po4.id, remin_id],
rr_remineralization_uo * 1)
self.assertPintQuantityAlmostEqual(C[self.uo.id, self.no3.id, remin_id],
rr_remineralization_uo * 7)
self.assertPintQuantityAlmostEqual(C[self.uo.id, self.phyto.id, remin_id],
-rr_remineralization_uo * 114)
self.assertPintQuantityAlmostEqual(C[self.do.id, self.po4.id, remin_id],
rr_remineralization_do * 1)
self.assertPintQuantityAlmostEqual(C[self.do.id, self.no3.id, remin_id],
rr_remineralization_do * 7)
self.assertPintQuantityAlmostEqual(C[self.do.id, self.phyto.id, remin_id],
-rr_remineralization_do * 114)
# Rest of the Reaction Rate Cube has to be equal to zero
self.assertEqual(C[self.do.id, self.po4.id, photo_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.do.id, self.no3.id, photo_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.do.id, self.phyto.id, photo_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.se.id, self.po4.id, photo_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.se.id, self.no3.id, photo_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.se.id, self.phyto.id, photo_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.se.id, self.po4.id, remin_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.se.id, self.no3.id, remin_id], 0 * ur.kg / ur.year)
self.assertEqual(C[self.se.id, self.phyto.id, remin_id], 0 * ur.kg / ur.year)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that uploads the specified Skia Gerrit change to Android.
This script does the following:
* Downloads the repo tool.
* Inits and checks out the bare-minimum required Android checkout.
* Sets the required git config options in external/skia.
* Cherry-picks the specified Skia patch.
* Modifies the change subject to append a "Test:" line required for presubmits.
* Uploads the Skia change to Android's Gerrit instance.
After the change is uploaded to Android, developers can trigger TH and download
binaries (if required) after runs complete.
The script re-uses the workdir when it is run again. To start from a clean slate
delete the workdir.
Timings:
* ~1m15s when using an empty/non-existent workdir for the first time.
* ~15s when using a workdir previously populated by the script.
Example usage:
$ python upload_to_android.py -w /repos/testing -c 44200
"""
import argparse
import getpass
import json
import os
import subprocess
import stat
import urllib2
REPO_TOOL_URL = 'https://storage.googleapis.com/git-repo-downloads/repo'
SKIA_PATH_IN_ANDROID = os.path.join('external', 'skia')
ANDROID_REPO_URL = 'https://googleplex-android.googlesource.com'
REPO_BRANCH_NAME = 'experiment'
SKIA_GERRIT_INSTANCE = 'https://skia-review.googlesource.com'
SK_USER_CONFIG_PATH = os.path.join('include', 'config', 'SkUserConfig.h')
def get_change_details(change_num):
response = urllib2.urlopen('%s/changes/%s/detail?o=ALL_REVISIONS' % (
SKIA_GERRIT_INSTANCE, change_num), timeout=5)
content = response.read()
# Remove the first line which contains ")]}'\n".
return json.loads(content[5:])
def init_work_dir(work_dir):
if not os.path.isdir(work_dir):
print 'Creating %s' % work_dir
os.makedirs(work_dir)
# Ensure the repo tool exists in the work_dir.
repo_dir = os.path.join(work_dir, 'bin')
repo_binary = os.path.join(repo_dir, 'repo')
if not os.path.isdir(repo_dir):
print 'Creating %s' % repo_dir
os.makedirs(repo_dir)
if not os.path.exists(repo_binary):
print 'Downloading %s from %s' % (repo_binary, REPO_TOOL_URL)
response = urllib2.urlopen(REPO_TOOL_URL, timeout=5)
content = response.read()
with open(repo_binary, 'w') as f:
f.write(content)
# Set executable bit.
st = os.stat(repo_binary)
os.chmod(repo_binary, st.st_mode | stat.S_IEXEC)
# Create android-repo directory in the work_dir.
android_dir = os.path.join(work_dir, 'android-repo')
if not os.path.isdir(android_dir):
print 'Creating %s' % android_dir
os.makedirs(android_dir)
print """
About to run repo init. If it hangs asking you to run glogin then please:
* Exit the script (ctrl-c).
* Run 'glogin'.
* Re-run the script.
"""
os.chdir(android_dir)
subprocess.check_call(
'%s init -u %s/a/platform/manifest -g "all,-notdefault,-darwin" '
'-b master --depth=1'
% (repo_binary, ANDROID_REPO_URL), shell=True)
print 'Syncing the Android checkout at %s' % android_dir
subprocess.check_call('%s sync %s tools/repohooks -j 32 -c' % (
repo_binary, SKIA_PATH_IN_ANDROID), shell=True)
# Set the necessary git config options.
os.chdir(SKIA_PATH_IN_ANDROID)
subprocess.check_call(
'git config remote.goog.review %s/' % ANDROID_REPO_URL, shell=True)
subprocess.check_call(
'git config review.%s/.autoupload true' % ANDROID_REPO_URL, shell=True)
subprocess.check_call(
'git config user.email %s@google.com' % getpass.getuser(), shell=True)
return repo_binary
class Modifier:
def modify(self):
raise NotImplementedError
def get_user_msg(self):
raise NotImplementedError
class FetchModifier(Modifier):
def __init__(self, change_num, debug):
self.change_num = change_num
self.debug = debug
def modify(self):
# Download and cherry-pick the patch.
change_details = get_change_details(self.change_num)
latest_patchset = len(change_details['revisions'])
mod = int(self.change_num) % 100
download_ref = 'refs/changes/%s/%s/%s' % (
str(mod).zfill(2), self.change_num, latest_patchset)
subprocess.check_call(
'git fetch https://skia.googlesource.com/skia %s' % download_ref,
shell=True)
subprocess.check_call('git cherry-pick FETCH_HEAD', shell=True)
if self.debug:
# Add SK_DEBUG to SkUserConfig.h.
with open(SK_USER_CONFIG_PATH, 'a') as f:
f.write('#ifndef SK_DEBUG\n')
f.write('#define SK_DEBUG\n')
f.write('#endif//SK_DEBUG\n')
subprocess.check_call('git add %s' % SK_USER_CONFIG_PATH, shell=True)
# Amend the commit message to add a prefix that makes it clear that the
# change should not be submitted and a "Test:" line which is required by
# Android presubmit checks.
original_commit_message = change_details['subject']
new_commit_message = (
# Intentionally breaking up the below string because some presubmits
# complain about it.
'[DO ' + 'NOT ' + 'SUBMIT] %s\n\n'
'Test: Presubmit checks will test this change.' % (
original_commit_message))
subprocess.check_call('git commit --amend -m "%s"' % new_commit_message,
shell=True)
def get_user_msg(self):
return """
Open the above URL and trigger TH by checking 'Presubmit-Ready'.
You can download binaries (if required) from the TH link after it completes.
"""
# Add a legacy flag if it doesn't exist, or remove it if it exists.
class AndroidLegacyFlagModifier(Modifier):
def __init__(self, flag):
self.flag = flag
self.verb = "Unknown"
def modify(self):
flag_line = " #define %s\n" % self.flag
config_file = os.path.join('include', 'config', 'SkUserConfigManual.h')
with open(config_file) as f:
lines = f.readlines()
if flag_line not in lines:
lines.insert(
lines.index("#endif // SkUserConfigManual_DEFINED\n"), flag_line)
verb = "Add"
else:
lines.remove(flag_line)
verb = "Remove"
with open(config_file, 'w') as f:
for line in lines:
f.write(line)
subprocess.check_call('git add %s' % config_file, shell=True)
message = '%s %s\n\nTest: Presubmit checks will test this change.' % (
verb, self.flag)
subprocess.check_call('git commit -m "%s"' % message, shell=True)
def get_user_msg(self):
return """
Please open the above URL to review and land the change.
"""
def upload_to_android(work_dir, modifier):
repo_binary = init_work_dir(work_dir)
# Create repo branch.
subprocess.check_call('%s start %s .' % (repo_binary, REPO_BRANCH_NAME),
shell=True)
try:
modifier.modify()
# Upload to Android Gerrit.
subprocess.check_call('%s upload --verify' % repo_binary, shell=True)
print modifier.get_user_msg()
finally:
# Abandon repo branch.
subprocess.call('%s abandon %s' % (repo_binary, REPO_BRANCH_NAME),
shell=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--work-dir', '-w', required=True,
help='Directory where an Android checkout will be created (if it does '
'not already exist). Note: ~1GB space will be used.')
parser.add_argument(
'--change-num', '-c', required=True,
help='The skia-rev Gerrit change number that should be patched into '
'Android.')
parser.add_argument(
'--debug', '-d', action='store_true', default=False,
help='Adds SK_DEBUG to SkUserConfig.h.')
args = parser.parse_args()
upload_to_android(args.work_dir, FetchModifier(args.change_num, args.debug))
if __name__ == '__main__':
main()
|
|
import copy
from typing import List
import dataproperty
import typepy
from mbstrdecoder import MultiByteStrDecoder
from ...error import EmptyTableDataError
from ...style import ReStructuredTextStyler, StylerInterface
from .._table_writer import AbstractTableWriter
from ._text_writer import IndentationTextTableWriter
class RstTableWriter(IndentationTextTableWriter):
"""
A base class of reStructuredText table writer.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.char_header_row_separator = "="
self.char_cross_point = "+"
self.char_left_cross_point = "+"
self.char_right_cross_point = "+"
self.char_top_left_cross_point = "+"
self.char_top_right_cross_point = "+"
self.char_bottom_left_cross_point = "+"
self.char_bottom_right_cross_point = "+"
self.char_header_row_cross_point = "+"
self.char_header_row_left_cross_point = "+"
self.char_header_row_right_cross_point = "+"
self.char_opening_row_cross_point = "+"
self.char_closing_row_cross_point = "+"
self.indent_string = kwargs.get("indent_string", " ")
self.is_write_header_separator_row = True
self.is_write_value_separator_row = True
self.is_write_opening_row = True
self.is_write_closing_row = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
self._init_cross_point_maps()
def write_table(self, **kwargs) -> None:
with self._logger:
self._write_line(self._get_table_directive())
try:
self._verify_property()
except EmptyTableDataError:
self._logger.logger.debug("no tabular data found")
return
self._write_table(**kwargs)
if self.is_write_null_line_after_table:
self.write_null_line()
def _get_table_directive(self) -> str:
if typepy.is_null_string(self.table_name):
return ".. table::\n"
return f".. table:: {MultiByteStrDecoder(self.table_name).unicode_str}\n"
def _write_table(self, **kwargs) -> None:
self.inc_indent_level()
super()._write_table(**kwargs)
self.dec_indent_level()
def _create_styler(self, writer: AbstractTableWriter) -> StylerInterface:
return ReStructuredTextStyler(writer)
class RstCsvTableWriter(RstTableWriter):
"""
A table class writer for reStructuredText
`CSV table <http://docutils.sourceforge.net/docs/ref/rst/directives.html#id4>`__
format.
:Example:
:ref:`example-rst-csv-table-writer`
"""
FORMAT_NAME = "rst_csv_table"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
@property
def support_split_write(self) -> bool:
return True
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.column_delimiter = ", "
self.char_cross_point = ""
self.is_padding = False
self.is_write_header_separator_row = False
self.is_write_value_separator_row = False
self.is_write_closing_row = False
self._quoting_flags[typepy.Typecode.STRING] = True
def write_table(self, **kwargs) -> None:
"""
|write_table| with reStructuredText CSV table format.
:Example:
:ref:`example-rst-csv-table-writer`
.. note::
- |None| values are written as an empty string
"""
IndentationTextTableWriter.write_table(self, **kwargs)
def _get_opening_row_items(self) -> List[str]:
directive = ".. csv-table::"
if typepy.is_null_string(self.table_name):
return [directive]
return [f"{directive} {MultiByteStrDecoder(self.table_name).unicode_str}"]
def _write_opening_row(self) -> None:
self.dec_indent_level()
super()._write_opening_row()
self.inc_indent_level()
def _write_header(self) -> None:
if not self.is_write_header:
return
if typepy.is_not_empty_sequence(self.headers):
self._write_line(
':header: "{:s}"'.format(
'", "'.join(MultiByteStrDecoder(header).unicode_str for header in self.headers)
)
)
self._write_line(
":widths: " + ", ".join(str(col_dp.ascii_char_width) for col_dp in self._column_dp_list)
)
self._write_line()
def _get_value_row_separator_items(self) -> List[str]:
return []
def _get_closing_row_items(self) -> List[str]:
return []
class RstGridTableWriter(RstTableWriter):
"""
A table writer class for reStructuredText
`Grid Tables <http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#grid-tables>`__
format.
:Example:
:ref:`example-rst-grid-table-writer`
.. py:method:: write_table
|write_table| with reStructuredText grid tables format.
:Example:
:ref:`example-rst-grid-table-writer`
.. note::
- |None| values are written as an empty string
"""
FORMAT_NAME = "rst_grid_table"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
@property
def support_split_write(self) -> bool:
return False
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.char_left_side_row = "|"
self.char_right_side_row = "|"
class RstSimpleTableWriter(RstTableWriter):
"""
A table writer class for reStructuredText
`Simple Tables
<http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#simple-tables>`__
format.
:Example:
:ref:`example-rst-simple-table-writer`
.. py:method:: write_table
|write_table| with reStructuredText simple tables format.
:Example:
:ref:`example-rst-simple-table-writer`
.. note::
- |None| values are written as an empty string
"""
FORMAT_NAME = "rst_simple_table"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
@property
def support_split_write(self) -> bool:
return False
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.column_delimiter = " "
self.char_cross_point = " "
self.char_opening_row_cross_point = " "
self.char_closing_row_cross_point = " "
self.char_header_row_cross_point = " "
self.char_header_row_left_cross_point = " "
self.char_header_row_right_cross_point = " "
self.char_opening_row = "="
self.char_closing_row = "="
self.is_write_value_separator_row = False
self._init_cross_point_maps()
|
|
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase.ToontownGlobals import *
from toontown.toonbase import ToontownTimer
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import TTLocalizer
class DistributedTarget(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTarget')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.geom = None
self.numConsecutiveHits = 0
self.enabled = 0
self.score = 0
self.hitTime = 0
self.targetBounceTrack = None
self.pinballInfo = {}
self.pinballHiScore = 0
self.pinballHiScorer = ''
self.onscreenMessage = None
self.fadeTrack = None
return
def disable(self):
self.ignoreAll()
DistributedObject.DistributedObject.disable(self)
if self.targetBounceTrack:
self.targetBounceTrack.finish()
self.targetBounceTrack = None
if self.fadeTrack:
self.fadeTrack.pause()
self.fadeTrack = None
self.__clearOnscreenMessage()
return
def generateInit(self):
DistributedObject.DistributedObject.generateInit(self)
self.load()
def load(self):
self.timer = ToontownTimer.ToontownTimer()
self.timer.setPos(1.1, 0, -0.15)
self.timer.hide()
self.geom = loader.loadModel('phase_5.5/models/estate/target')
self.geom.reparentTo(base.cr.playGame.hood.loader.geom)
self.geom.setPos(0, 0, 40)
self.geom.setScale(3)
self.geom.stash()
self.hitSound = base.loadSfx('phase_4/audio/sfx/MG_Tag_A.ogg')
self.rewardSound = base.loadSfx('phase_4/audio/sfx/MG_pos_buzzer.ogg')
self.scoreText = TextNode('scoreText')
self.scoreText.setTextColor(1, 0, 0, 1)
self.scoreText.setAlign(self.scoreText.ACenter)
self.scoreText.setFont(getSignFont())
self.scoreText.setText('0')
self.scoreNode = self.timer.attachNewNode(self.scoreText)
self.scoreNode.setPos(0, 0, 0.35)
self.scoreNode.setScale(0.25)
self.curPinballScoreText = TextNode('pinballScoreText')
self.curPinballScoreText.setTextColor(1, 0, 0, 1)
self.curPinballScoreText.setAlign(self.scoreText.ACenter)
self.curPinballScoreText.setFont(getSignFont())
self.curPinballScoreText.setText('')
self.curPinballScoreNode = render.attachNewNode(self.curPinballScoreText)
self.curPinballScoreNode.setPos(0.5, 0.5, 0.3)
self.curPinballScoreNode.setScale(0.25)
colSphere = CollisionSphere(0, 0, 0, 3.5)
colSphere.setTangible(0)
colNode = CollisionNode('targetSphere')
colNode.addSolid(colSphere)
colSphereNode = self.geom.attachNewNode(colNode)
self.accept('hitTarget', self.handleHitTarget)
self.accept('missedTarget', self.handleMissedTarget)
self.accept('entertargetSphere', self.handleEnterTarget)
def delete(self):
self.ignoreAll()
self.scoreNode.removeNode()
del self.scoreNode
self.curPinballScoreNode.removeNode()
del self.curPinballScoreNode
self.geom.removeNode()
del self.geom
self.timer.destroy()
del self.timer
del self.rewardSound
del self.hitSound
DistributedObject.DistributedObject.delete(self)
def setState(self, enabled, score, time):
if self.enabled != enabled:
if self.fadeTrack:
self.fadeTrack.pause()
if enabled:
self.fadeTrack = Sequence(Func(base.localAvatar.setSystemMessage, 0, TTLocalizer.EstateTargetGameStart), Func(self.geom.unstash), self.geom.colorScaleInterval(1.0, Vec4(1.0, 1.0, 1.0, 1.0)), Wait(1), Func(base.localAvatar.setSystemMessage, 0, TTLocalizer.EstateTargetGameInst))
else:
self.fadeTrack = Sequence(self.geom.colorScaleInterval(1.0, Vec4(1.0, 1.0, 1.0, 0.0)), Func(self.geom.stash), Func(self.hideTimer), Func(base.localAvatar.setSystemMessage, 0, TTLocalizer.EstateTargetGameEnd))
self.fadeTrack.start()
self.enabled = enabled
if score != self.score:
self.setLevel(score)
if time != self.hitTime:
self.setTimer(time)
def setReward(self, reward):
base.playSfx(self.rewardSound)
def handleEnterTarget(self, collEntry):
self.handleHitTarget()
def handleHitTarget(self, avId = None, vel = None):
if not avId:
avId = base.localAvatar.doId
if self.enabled:
self.sendUpdate('setResult', [avId])
if vel:
if self.targetBounceTrack:
self.targetBounceTrack.finish()
pos = self.geom.getPos()
dist = Vec3(vel)
dist.normalize()
newPos = pos - dist * 1.5
springPos = pos + dist
self.notify.debug('reaction distance = %s,%s,%s' % (vel[0], vel[1], vel[2]))
self.targetBounceTrack = Sequence(LerpPosInterval(self.geom, duration=0.1, pos=newPos, blendType='easeOut'), LerpPosInterval(self.geom, duration=0.25, pos=springPos, blendType='easeOut'), LerpPosInterval(self.geom, duration=0.2, pos=pos, blendType='easeOut'))
self.targetBounceTrack.start()
def handleMissedTarget(self):
if self.enabled:
self.sendUpdate('setResult', [0])
def handleHitCloud(self):
if self.enabled:
self.sendUpdate('setBonus', [0.5])
def setLevel(self, level):
self.notify.debug('setLevel(%s)' % level)
self.score = level
base.playSfx(self.hitSound)
self.scoreText.setText('+' + str(int(self.score)))
def setTimer(self, time):
self.hitTime = time
self.notify.debug('updateTimer(%s)' % self.enabled)
if self.enabled:
self.showTimer()
self.notify.debug('hitTime = %s' % self.hitTime)
self.timer.setTime(self.hitTime)
self.timer.countdown(self.hitTime)
def showTimer(self):
if base.localAvatar.animFSM.getCurrentState().getName() != 'ReadBook':
base.setCellsAvailable([base.rightCells[0]], 0)
self.timer.show()
def hideTimer(self):
self.timer.hide()
base.setCellsAvailable([base.rightCells[0]], 1)
def setPosition(self, x, y, z):
self.geom.setPos(x, y, z)
def showScore(self):
scoreName = self.pinballHiScorer[0:12]
if scoreName:
if len(self.pinballHiScorer) > 12:
scoreName += TTLocalizer.PinballHiScoreAbbrev
titleText = TTLocalizer.PinballHiScore % scoreName
scoreText = TTLocalizer.PinballScoreHolder % self.pinballHiScore
pinballEntry = self.pinballInfo.get(base.localAvatar.doId)
if pinballEntry:
titleText += TTLocalizer.PinballYourBestScore
scoreText += TTLocalizer.PinballScoreHolder % pinballEntry[0]
titleText += TTLocalizer.PinballScore % (pinballEntry[1], pinballEntry[2])
scoreText += TTLocalizer.PinballScoreHolder % (pinballEntry[1] * pinballEntry[2])
self.__showOnscreenMessage(titleText, scoreText)
def setCurPinballScore(self, avId, score, multiplier):
self.notify.debug('setCurPinballScore %d %d %d' % (avId, score, multiplier))
if self.pinballInfo.get(avId) == None:
self.pinballInfo[avId] = [0, 0, 0]
pinballEntry = self.pinballInfo[avId]
pinballEntry[1] = score
pinballEntry[2] = multiplier
curScore = score * multiplier
if curScore > pinballEntry[0]:
pinballEntry[0] = curScore
if curScore > self.pinballHiScore:
self.pinballHiScore = pinballEntry[0]
toon = base.cr.doId2do.get(avId)
if toon:
self.pinballHiScorer = toon.getName()
self.showScore()
return
def b_setCurPinballScore(self, avId, score, multiplier):
self.setCurPinballScore(avId, score, multiplier)
self.sendUpdate('setCurPinballScore', [avId, score, multiplier])
def __showOnscreenMessage(self, titleText, scoreText):
self.notify.debug('----- __showOnscreenmessage')
if not self.onscreenMessage:
self.onscreenMessage = DirectFrame(relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=GlobalDialogColor, geom_scale=(12, 1, 3), pos=(0, 0, 0.8), scale=0.1)
titles = DirectLabel(parent=self.onscreenMessage, relief=None, text=titleText, text_fg=VBase4(0, 0, 0, 1), text_align=TextNode.ALeft, text_scale=0.7, pos=(-5.75, 0, 0.5))
scores = DirectLabel(parent=self.onscreenMessage, relief=None, text=scoreText, text_fg=VBase4(1, 0, 0, 1), text_align=TextNode.ARight, text_scale=0.7, pos=(5.75, 0, 0.5))
self.onscreenMessage.titles = titles
self.onscreenMessage.scores = scores
else:
self.onscreenMessage.titles['text'] = titleText
self.onscreenMessage.scores['text'] = scoreText
base.foobar = self.onscreenMessage
return
def __clearOnscreenMessage(self):
self.notify.debug('----- __clearOnscreenMessage')
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
return
def setPinballHiScore(self, score):
self.pinballHiScore = score
self.showScore()
def setPinballHiScorer(self, name):
self.pinballHiScorer = name
self.showScore()
def hideGui(self):
if self.timer:
self.hideTimer()
if self.onscreenMessage:
self.onscreenMessage.hide()
def showGui(self):
if self.timer:
if self.enabled:
self.showTimer()
if self.onscreenMessage:
self.onscreenMessage.show()
|
|
#!/usr/bin/env python
from __future__ import print_function
class FreelanCFGserver(object):
""""""
def __init__(self):
self.defaults = { 'enabled': False,
'listen_on': '0.0.0.0:443',
'protocol': 'https',
'server_certificate_file': None,
'server_private_key_file': None,
'certification_authority_certificate_file': None,
'certification_authority_private_key_file': None,
'authentication_script': None }
self.enabled=False
self.listen_on= '0.0.0.0:443'
self.protocol='https'
self.server_certificate_file=None
self.server_private_key_file=None
self.certification_authority_certificate_file=None
self.certification_authority_private_key_file=None
self.authentication_script=None
class FreelanCFGclient(object):
""""""
def __init__(self):
self.defaults = { 'enabled': False,
'server_endpoint': '127.0.0.1:443',
'protocol': 'https',
'disable_peer_verification': False,
'disable_host_verification': False,
'username': None,
'password': None,
'public_endpoint': '0.0.0.0' }
self.enabled=False
self.server_endpoint= '127.0.0.1:443'
self.protocol='https'
self.disable_peer_verification=False
self.disable_host_verification=False
self.username=None
self.password=None
self.public_endpoint='0.0.0.0'
class FreelanCFGfscp(object):
""""""
def __init__(self):
self.defaults = { 'hostname_resolution_protocol': 'ipv4',
'listen_on': '0.0.0.0:12000',
'listen_on_device': None,
'hello_timeout': '3000',
'contact': None,
'accept_contact_requests': True,
'accept_contacts': True,
'dynamic_contact_file': None,
'never_contact': None,
'cipher_capability': ['ecdhe_rsa_aes256_gcm_sha384', 'ecdhe_rsa_aes128_gcm_sha256'],
'elliptic_curve_capability': ['sect571k1', 'secp384r1'] }
self.hostname_resolution_protocol='ipv4'
self.listen_on='0.0.0.0:12000'
self.listen_on_device=None
self.hello_timeout='3000'
self.contact=None
self.accept_contact_requests=True
self.accept_contacts=True
self.dynamic_contact_file=None
self.never_contact=None
self.cipher_capability=['ecdhe_rsa_aes256_gcm_sha384', 'ecdhe_rsa_aes128_gcm_sha256']
self.elliptic_curve_capability=['sect571k1', 'secp384r1']
class FreelanCFGtap(object):
""""""
def __init__(self):
self.defaults = { 'type': 'tap',
'enabled': True,
'name': None,
'mtu': 'auto',
'mss_override': 'auto',
'metric': 'auto',
'ipv4_address_prefix_length': '9.0.0.1/24',
'ipv6_address_prefix_length': '2aa1::1/8',
'remote_ipv4_address': '9.0.0.0',
'arp_proxy_enabled': False,
'arp_proxy_fake_ethernet_address': '00:aa:bb:cc:dd:ee',
'dhcp_proxy_enabled': True,
'dhcp_server_ipv4_address_prefix_length': '9.0.0.0/24',
'dhcp_server_ipv6_address_prefix_length': '2aa1::/8',
'up_script': None,
'down_script': None }
self.type='tap'
self.enabled=True
self.name=None
self.mtu='auto'
self.mss_override='auto'
self.metric='auto'
self.ipv4_address_prefix_length='9.0.0.1/24'
self.ipv6_address_prefix_length='2aa1::1/8'
self.remote_ipv4_address='9.0.0.0'
self.arp_proxy_enabled=False
self.arp_proxy_fake_ethernet_address='00:aa:bb:cc:dd:ee'
self.dhcp_proxy_enabled=True
self.dhcp_server_ipv4_address_prefix_length='9.0.0.0/24'
self.dhcp_server_ipv6_address_prefix_length='2aa1::/8'
self.up_script=None
self.down_script=None
class FreelanCFGswitch(object):
""""""
def __init__(self):
self.defaults = { 'routing_method': 'switch',
'relay_mode_enabled': False }
self.routing_method='switch'
self.relay_mode_enabled=False
class FreelanCFGrouter(object):
""""""
def __init__(self):
self.defaults = { 'local_ip_route': None,
'local_dns_server': None,
'client_routing_enabled': True,
'accept_routes_requests': True,
'internal_route_acceptance_policy': 'unicast_in_network',
'system_route_acceptance_policy': None,
'maximum_routes_limit': '1',
'dns_servers_acceptance_policy': 'in_network',
'dns_script': None }
self.local_ip_route=None
self.local_dns_server=None
self.client_routing_enabled=True
self.accept_routes_requests=True
self.internal_route_acceptance_policy='unicast_in_network'
self.system_route_acceptance_policy=None
self.maximum_routes_limit='1'
self.dns_servers_acceptance_policy='in_network'
self.dns_script=None
class FreelanCFGsecurity(object):
""""""
def __init__(self):
self.defaults = { 'passphrase': None,
'passphrase_salt': 'freelan',
'passphrase_iterations_count': '2000',
'signature_certificate_file': None,
'signature_private_key_file': None,
'certificate_validation_method': 'default',
'certificate_validation_script': None,
'authority_certificate_file': None,
'certificate_revocation_validation_method': None,
'certificate_revocation_list_file': None }
self.passphrase=None
self.passphrase_salt='freelan'
self.passphrase_iterations_count='2000'
self.signature_certificate_file=None
self.signature_private_key_file=None
self.certificate_validation_method='default'
self.certificate_validation_script=None
self.authority_certificate_file=None
self.certificate_revocation_validation_method=None
self.certificate_revocation_list_file=None
class FreelanCFG(object):
"""holds freelan config info"""
def __init__(self):
self.server = FreelanCFGserver()
self.client = FreelanCFGclient()
self.fscp = FreelanCFGfscp()
self.tap_adapter = FreelanCFGtap()
self.switch = FreelanCFGswitch()
self.router = FreelanCFGrouter()
self.security = FreelanCFGsecurity()
def print(self, defaults=False):
cfg = self.build(defaults=defaults)
for cfg_line in cfg:
print(cfg_line)
def validate(self):
"""Validation of configuration to check for required values"""
if not self.server.enabled:
if self.security.signature_certificate_file is self.security.defaults['signature_certificate_file']:
print("ISSUE: If you are not configuring a server, you need to set 'signature_certificate_file'")
if self.security.signature_private_key_file is self.security.defaults['signature_private_key_file']:
print("ISSUE: If you are not configuring a server, you need to set 'signature_private_key_file'")
else:
if self.client.enabled:
print("ISSUE: Client and server enabled at the same time?")
if self.server.protocol is self.server.defaults['protocol']:
if self.server.server_certificate_file is self.server.defaults['server_certificate_file'] or \
self.server.server_private_key_file is self.server.defaults['server_private_key_file']:
print("ISSUE: 'server_certificate_file' and/or 'server_private_key_file' are not configured and will be auto-generated.")
if self.server.certification_authority_certificate_file is self.server.defaults['certification_authority_certificate_file'] or \
self.server.certification_authority_private_key_file is self.server.defaults['certification_authority_private_key_file']:
print("ISSUE: 'certification_authority_certificate_file' and/or 'certification_authority_private_key_file' are not configured and will be auto-generated - this is NOT recommended.")
if self.server.authentication_script is self.server.defaults['authentication_script']:
print("ISSUE: No 'authentication_script' has been provided and all authentication requests will be rejected!")
if self.client.enabled:
if self.client.server_endpoint is self.client.defaults['server_endpoint']:
print("ISSUE: You are running in client mode, but you are using a default server address.")
if not self.client.disable_peer_verification is self.client.defaults['disable_peer_verification'] or \
not self.client.disable_host_verification is self.client.defaults['disable_host_verification']:
print("ISSUE: Disabling peer/host verification is NOT recommended - AT ALL.")
if self.client.username is self.client.defaults['username'] or \
self.client.password is self.client.defaults['password']:
print("ISSUE: No username and/or password has been configured for a client.")
if self.fscp.contact is self.fscp.defaults['contact']:
if not self.server.enabled and not self.client.enabled:
print("ISSUE: You have not defined any contact points while you are neither running as server nor client.")
## hostname_resolution_protocol=ipv4/ipv6
## ipv4_address_prefix_length=9.0.0.1/24
## ipv6_address_prefix_length=2aa1::1/8
if self.security.authority_certificate_file is self.security.defaults['authority_certificate_file']:
print("ISSUE: You need to set 'authority_certificate_file'")
if self.tap_adapter.ipv4_address_prefix_length is self.tap_adapter.defaults['ipv4_address_prefix_length']:
print("ISSUE: You are using the default network address - make sure you set a different ip for every machine 'ipv4_address_prefix_length'")
def build(self, defaults=False):
cfg = []
cfg.append("[server]")
cfg_sec = self.build_section(self.server, defaults=defaults)
cfg.extend(cfg_sec)
cfg.append("[client]")
cfg_sec = self.build_section(self.client, defaults=defaults)
cfg.extend(cfg_sec)
cfg.append("[fscp]")
cfg_sec = self.build_section(self.fscp, defaults=defaults)
cfg.extend(cfg_sec)
cfg.append("[tap]")
cfg_sec = self.build_section(self.tap_adapter, defaults=defaults)
cfg.extend(cfg_sec)
cfg.append("[switch]")
cfg_sec = self.build_section(self.switch, defaults=defaults)
cfg.extend(cfg_sec)
cfg.append("[router]")
cfg_sec = self.build_section(self.router, defaults=defaults)
cfg.extend(cfg_sec)
# NOT recommended to do this!
#self.security.certificate_validation_method = None
cfg.append("[security]")
cfg_sec = self.build_section(self.security, defaults=defaults)
cfg.extend(cfg_sec)
return cfg
def build_section(self, section, defaults=False):
cfg = []
for k,default_v in section.defaults.iteritems():
self_kv = getattr(section, k)
#print ("Key: " + str(k) + " || Value: " + str(self_kv))
if self_kv is None:
if (not (self_kv is default_v) or defaults):
cfg.append(k+'=')
continue
if isinstance(self_kv, basestring) or isinstance(self_kv, bool) or self_kv is None:
self_kv = [self_kv]
if isinstance(default_v, basestring) or isinstance(default_v, bool) or default_v is None:
default_v = [default_v]
for kv in self_kv:
if (not kv in default_v) or defaults:
if isinstance(kv, bool):
if kv:
cfg.append(k+'='+'yes')
else:
cfg.append(k+'='+'no')
else:
cfg.append(k+'='+str(kv))
return cfg
|
|
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Removes trailing whitespace from code blocks that cause horizontal scrolling
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block \t\t\t\t\t\t\t
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
import re
from typing import Any, Dict, Iterable, List, Mapping, MutableSequence, Optional
import markdown
from django.utils.html import escape
from markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension
from zerver.lib.exceptions import BugdownRenderingException
from zerver.lib.tex import render_tex
# Global vars
FENCE_RE = re.compile("""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(
\\{?\\.?
(?P<lang>
[a-zA-Z0-9_+-./#]*
) # "py" or "javascript"
\\}?
) # language, like ".py" or "{javascript}"
[ ]* # spaces
(
\\{?\\.?
(?P<header>
[^~`]*
)
\\}?
) # header for features that use fenced block header syntax (like spoilers)
$
""", re.VERBOSE)
CODE_WRAP = '<pre><code%s>%s\n</code></pre>'
LANG_TAG = ' class="%s"'
def validate_curl_content(lines: List[str]) -> None:
error_msg = """
Missing required -X argument in curl command:
{command}
""".strip()
for line in lines:
regex = r'curl [-](sS)?X "?(GET|DELETE|PATCH|POST)"?'
if line.startswith('curl'):
if re.search(regex, line) is None:
raise BugdownRenderingException(error_msg.format(command=line.strip()))
CODE_VALIDATORS = {
'curl': validate_curl_content,
}
class FencedCodeExtension(markdown.Extension):
def __init__(self, config: Mapping[str, Any] = {}) -> None:
self.config = {
'run_content_validators': [
config.get('run_content_validators', False),
'Boolean specifying whether to run content validation code in CodeHandler',
],
}
for key, value in config.items():
self.setConfig(key, value)
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
processor = FencedBlockPreprocessor(
md, run_content_validators=self.config['run_content_validators'][0])
md.preprocessors.register(processor, 'fenced_code_block', 25)
class BaseHandler:
def handle_line(self, line: str) -> None:
raise NotImplementedError()
def done(self) -> None:
raise NotImplementedError()
def generic_handler(processor: Any, output: MutableSequence[str],
fence: str, lang: str, header: str,
run_content_validators: bool=False,
default_language: Optional[str]=None) -> BaseHandler:
lang = lang.lower()
if lang in ('quote', 'quoted'):
return QuoteHandler(processor, output, fence, default_language)
elif lang == 'math':
return TexHandler(processor, output, fence)
elif lang == 'spoiler':
return SpoilerHandler(processor, output, fence, header)
else:
return CodeHandler(processor, output, fence, lang, run_content_validators)
def check_for_new_fence(processor: Any, output: MutableSequence[str], line: str,
run_content_validators: bool=False,
default_language: Optional[str]=None) -> None:
m = FENCE_RE.match(line)
if m:
fence = m.group('fence')
lang = m.group('lang')
header = m.group('header')
if not lang and default_language:
lang = default_language
handler = generic_handler(processor, output, fence, lang, header,
run_content_validators, default_language)
processor.push(handler)
else:
output.append(line)
class OuterHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
run_content_validators: bool=False,
default_language: Optional[str]=None) -> None:
self.output = output
self.processor = processor
self.run_content_validators = run_content_validators
self.default_language = default_language
def handle_line(self, line: str) -> None:
check_for_new_fence(self.processor, self.output, line,
self.run_content_validators, self.default_language)
def done(self) -> None:
self.processor.pop()
class CodeHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
fence: str, lang: str, run_content_validators: bool=False) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lang = lang
self.lines: List[str] = []
self.run_content_validators = run_content_validators
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self) -> None:
text = '\n'.join(self.lines)
# run content validators (if any)
if self.run_content_validators:
validator = CODE_VALIDATORS.get(self.lang, lambda text: None)
validator(self.lines)
text = self.processor.format_code(self.lang, text)
text = self.processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class QuoteHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
fence: str, default_language: Optional[str]=None) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lines: List[str] = []
self.default_language = default_language
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line, default_language=self.default_language)
def done(self) -> None:
text = '\n'.join(self.lines)
text = self.processor.format_quote(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class SpoilerHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
fence: str, spoiler_header: str) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.spoiler_header = spoiler_header
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line)
def done(self) -> None:
if len(self.lines) == 0:
# No content, do nothing
return
else:
header = self.spoiler_header
text = '\n'.join(self.lines)
text = self.processor.format_spoiler(header, text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class TexHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str], fence: str) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line)
def done(self) -> None:
text = '\n'.join(self.lines)
text = self.processor.format_tex(text)
text = self.processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
def __init__(self, md: markdown.Markdown, run_content_validators: bool=False) -> None:
markdown.preprocessors.Preprocessor.__init__(self, md)
self.checked_for_codehilite = False
self.run_content_validators = run_content_validators
self.codehilite_conf: Dict[str, List[Any]] = {}
def push(self, handler: BaseHandler) -> None:
self.handlers.append(handler)
def pop(self) -> None:
self.handlers.pop()
def run(self, lines: Iterable[str]) -> List[str]:
""" Match and store Fenced Code Blocks in the HtmlStash. """
output: List[str] = []
processor = self
self.handlers: List[BaseHandler] = []
default_language = None
try:
default_language = self.md.zulip_realm.default_code_block_language
except AttributeError:
pass
handler = OuterHandler(processor, output, self.run_content_validators, default_language)
self.push(handler)
for line in lines:
self.handlers[-1].handle_line(line)
while self.handlers:
self.handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Bugdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != '':
output.append('')
return output
def format_code(self, lang: str, text: str) -> str:
if lang:
langclass = LANG_TAG % (lang,)
else:
langclass = ''
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(text,
linenums=self.codehilite_conf['linenums'][0],
guess_lang=self.codehilite_conf['guess_lang'][0],
css_class=self.codehilite_conf['css_class'][0],
style=self.codehilite_conf['pygments_style'][0],
use_pygments=self.codehilite_conf['use_pygments'][0],
lang=(lang or None),
noclasses=self.codehilite_conf['noclasses'][0])
code = highliter.hilite()
else:
code = CODE_WRAP % (langclass, self._escape(text))
return code
def format_quote(self, text: str) -> str:
paragraphs = text.split("\n\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines if line != ''))
return "\n\n".join(quoted_paragraphs)
def format_spoiler(self, header: str, text: str) -> str:
output = []
header_div_open_html = '<div class="spoiler-block"><div class="spoiler-header">'
end_header_start_content_html = '</div><div class="spoiler-content"' \
' aria-hidden="true">'
footer_html = '</div></div>'
output.append(self.placeholder(header_div_open_html))
output.append(header)
output.append(self.placeholder(end_header_start_content_html))
output.append(text)
output.append(self.placeholder(footer_html))
return "\n\n".join(output)
def format_tex(self, text: str) -> str:
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' +
escape(paragraph) + '</span>')
return "\n\n".join(tex_paragraphs)
def placeholder(self, code: str) -> str:
return self.md.htmlStash.store(code)
def _escape(self, txt: str) -> str:
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:
return FencedCodeExtension(kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
import unittest
from copy import deepcopy
from mock import Mock
import random
from cellardoor.model import Model, Entity, Link, InverseLink, Text, ListOf, Integer, Float, Enum
from cellardoor.api import API
from cellardoor.api.methods import ALL, LIST, GET, CREATE
from cellardoor.storage import Storage
from cellardoor import errors
from cellardoor.authorization import ObjectProxy
identity = ObjectProxy('identity')
item = ObjectProxy('item')
class CopyingMock(Mock):
def __call__(self, *args, **kwargs):
args = deepcopy(args)
kwargs = deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
storage = Storage()
model = Model(storage=storage)
api = API(model)
class Foo(model.Entity):
stuff = Text(required=True)
optional_stuff = Text()
bars = InverseLink('Bar', 'foo')
bazes = ListOf(Link('Baz'))
embedded_bazes = ListOf(Link('Baz', embeddable=True))
embedded_foos = ListOf(Link('Foo', embeddable=True, embed_by_default=False, embedded_fields=('stuff',)))
secret = Text(hidden=True)
class Bar(model.Entity):
foo = Link(Foo)
embedded_foo = Link(Foo, embeddable=True)
bazes = ListOf(Link('Baz', ondelete=Link.CASCADE))
number = Integer()
name = Text()
class Baz(model.Entity):
name = Text(required=True)
foo = InverseLink(Foo, 'bazes', multiple=False)
embedded_foo = InverseLink(Foo, 'bazes', multiple=False, embeddable=True, embed_by_default=False)
class Hidden(model.Entity):
name = Text(hidden=True)
foo = Integer
class Littorina(model.Entity):
size = Float()
class Shell(model.Entity):
color = Enum('Brown', 'Gray', 'Really brown')
class LittorinaLittorea(Littorina):
shell = Link('Shell', embeddable=True)
class Planet(model.Entity):
pass
class NullSingleTarget(model.Entity):
pass
class NullSingleReferrer(model.Entity):
target = Link(NullSingleTarget)
class NullMultiTarget(model.Entity):
pass
class NullMultiReferrer(model.Entity):
targets = ListOf(Link(NullMultiTarget))
class CascadeTarget(model.Entity):
pass
class CascadeReferrer(model.Entity):
target = Link(CascadeTarget, ondelete=Link.CASCADE)
class AnyFunctionAuthModel(model.Entity):
pass
class Foos(api.Interface):
entity = Foo
method_authorization = {
ALL: None
}
enabled_filters = ('stuff',)
enabled_sort = ('stuff',)
hidden_field_authorization = identity.role == 'admin'
class ReadOnlyFoos(api.Interface):
entity = Foo
singular_name = 'readonly_foo'
method_authorization = {
(LIST, GET): None
}
class Bars(api.Interface):
entity = Bar
method_authorization = {
ALL: None
}
enabled_filters = ('number',)
enabled_sort = ('number', 'name')
default_sort = ('+name',)
class Bazes(api.Interface):
entity = Baz
plural_name = 'bazes'
method_authorization = {
ALL: None
}
enabled_filters = ('name',)
enabled_sort = ('name',)
default_limit = 10
max_limit = 20
class Hiddens(api.Interface):
entity = Hidden
enabled_filters = ('name',)
enabled_sort = ('name',)
method_authorization = {
LIST: identity.exists(),
CREATE: identity.role == 'admin',
GET: item.foo == 23
}
hidden_field_authorization = identity.foo == 'bar'
class Littorinas(api.Interface):
entity = Littorina
method_authorization = {
ALL: None
}
class Shells(api.Interface):
entity = Shell
method_authorization = {
ALL: None
}
class Planets(api.Interface):
entity = Planet
method_authorization = {
LIST: item.foo == 23
}
class NullSingleTargets(api.Interface):
entity = NullSingleTarget
method_authorization = {
ALL: None
}
class NullSingleReferrers(api.Interface):
entity = NullSingleReferrer
method_authorization = {
ALL: None
}
class NullMultiTargets(api.Interface):
entity = NullMultiTarget
method_authorization = {
ALL: None
}
class NullMultiReferrers(api.Interface):
entity = NullMultiReferrer
method_authorization = {
ALL: None
}
class CascadeTargets(api.Interface):
entity = CascadeTarget
method_authorization = {
ALL: None
}
class CascadeReferrers(api.Interface):
entity = CascadeReferrer
method_authorization = {
ALL: None
}
auth_fn_get = Mock(return_value=False)
auth_fn_list = Mock(return_value=True)
class AnyFunctionAuthModels(api.Interface):
entity = AnyFunctionAuthModel
method_authorization = {
GET: auth_fn_get,
LIST: auth_fn_list
}
class InterfaceTest(unittest.TestCase):
def setUp(self):
storage = Storage()
model.storage = storage
for interface in api.interfaces.values():
interface.set_storage(storage)
def get_interface(self, name, storage=None):
if storage is None:
storage = Storage()
interface = api.interfaces[name]
interface.set_storage(storage)
return interface
def test_create_fail_validation(self):
"""
Fails if the request fields don't pass validation.
"""
with self.assertRaises(errors.CompoundValidationError):
api.interfaces['foos'].create({})
def test_create_succeed(self):
"""
Creates a new item in persistent storage if we pass validation.
"""
foos = self.get_interface('foos')
foos.storage.create = CopyingMock(return_value='123')
foo = foos.create({'stuff':'foo'})
foos.storage.create.assert_called_once_with(Foo, {'stuff':'foo'})
self.assertEquals(foo, {'_id':'123', 'stuff':'foo'})
def test_list(self):
"""
Returns a list of created items
"""
saved_foos = []
for i in range(0,3):
saved_foos.append(
{'stuff':'foo#%d' % i, '_id':i}
)
foos = self.get_interface('foos')
foos.storage.get = CopyingMock(return_value=saved_foos)
fetched_foos = foos.list()
foos.storage.get.assert_called_once_with(Foo, sort=(), filter=None, limit=0, offset=0, count=False)
self.assertEquals(fetched_foos, saved_foos)
def test_get(self):
"""
Can get a single item
"""
foos = self.get_interface('foos')
foo = {'_id':123, 'stuff':'foo'}
foos.storage.get_by_id = CopyingMock(return_value=foo)
fetched_foo = foos.get(foo['_id'])
foos.storage.get_by_id.assert_called_once_with(Foo, foo['_id'])
self.assertEquals(fetched_foo, foo)
def test_get_nonexistent(self):
"""
Trying to fetch a nonexistent item raises an error.
"""
foos = self.get_interface('foos')
foos.storage.get_by_id = Mock(return_value=None)
with self.assertRaises(errors.NotFoundError):
foos.get(123)
def test_update(self):
"""
Can update a subset of fields
"""
foos = self.get_interface('foos')
foo = {'_id':123, 'stuff':'baz'}
foos.storage.update = Mock(return_value=foo)
foos.storage.get_by_id = Mock(return_value=foo)
updated_foo = foos.update(123, {'stuff':'baz'})
foos.storage.update.assert_called_once_with(Foo, 123, {'stuff':'baz'}, replace=False)
self.assertEquals(updated_foo, foo)
def test_update_nonexistent(self):
"""
Trying to update a nonexistent item raises an error.
"""
foos = self.get_interface('foos')
foos.storage.update = Mock(return_value=None)
foos.storage.get_by_id = Mock(return_value={})
with self.assertRaises(errors.NotFoundError):
foos.update(123, {})
def test_replace(self):
"""
Can replace a whole existing item
"""
foos = self.get_interface('foos')
foo = {'_id':123, 'stuff':'baz'}
foos.storage.update = Mock(return_value=foo)
foos.storage.get_by_id = Mock(return_value={})
updated_foo = foos.replace(123, {'stuff':'baz'})
foos.storage.update.assert_called_once_with(Foo, 123, {'stuff':'baz'}, replace=True)
self.assertEquals(updated_foo, foo)
def test_replace_nonexistent(self):
"""
Trying to replace a nonexistent item raises an error.
"""
foos = self.get_interface('foos')
foos.storage.update = Mock(return_value=None)
foos.storage.get_by_id = Mock(return_value={})
with self.assertRaises(errors.NotFoundError):
foos.replace(123, {'stuff':'foo'})
def test_delete_nonexistent(self):
"""
Raise an error when trying to delete an item that doesn't exist
"""
foos = self.get_interface('foos')
foos.storage.get_by_id = Mock(return_value=None)
with self.assertRaises(errors.NotFoundError):
foos.delete(123, inverse_delete=False)
def test_delete(self):
"""
Can remove an existing item
"""
foos = self.get_interface('foos')
foos.storage.get_by_id = Mock(return_value={'_id':123, 'stuff':'foo'})
foos.storage.delete = Mock(return_value=None)
foos.delete(123, inverse_delete=False)
foos.storage.get_by_id.assert_called_once_with(Foo, 123)
foos.storage.delete.assert_called_once_with(Foo, 123)
def test_single_link_validation_fail(self):
"""
Fails validation if setting a link to a non-existent ID.
"""
foos = api.interfaces['foos']
bars = api.interfaces['bars']
foos.storage.get_by_id = Mock(return_value=None)
with self.assertRaises(errors.CompoundValidationError):
bars.create({'foo':'123'})
def test_single_link(self):
"""
Can get a link through a link.
"""
foo = {'_id':'123', 'stuff':'foo'}
bar = {'_id':'321', 'foo':'123'}
foos = self.get_interface('foos')
bars = self.get_interface('bars')
foos.storage.get_by_id = Mock(return_value=foo)
bars.storage.get_by_id = Mock(return_value=bar)
linked_foo = bars.link('321', 'foo')
self.assertEquals(linked_foo, foo)
bars.storage.get_by_id.assert_called_once_with(Bar, '321')
foos.storage.get_by_id.assert_called_once_with(Foo, '123')
def test_single_link_get_embedded(self):
"""
Embedded links are included when fetching the referencing item.
"""
foo = {'_id':'123', 'stuff':'foo'}
bar = {'_id':'321', 'embedded_foo':'123'}
foos = self.get_interface('foos')
bars = self.get_interface('bars')
foos.storage.get_by_id = Mock(return_value=foo)
bars.storage.get_by_id = Mock(return_value=bar)
bar = bars.get('321')
self.assertEquals(bar['embedded_foo'], foo)
def test_multiple_link(self):
"""
Can set a list of links when creating an item
"""
created_bazes = []
baz_ids = []
for i in range(0,3):
baz = { 'name':'Baz#%d' % i, '_id':'%d' % i }
created_bazes.append(baz)
baz_ids.append(baz['_id'])
foo = {'_id':'123', 'bazes':baz_ids}
foos = self.get_interface('foos')
bazes = self.get_interface('bazes')
foos.storage.get_by_id = Mock(return_value=foo)
foos.storage.check_filter = Mock(return_value=None)
bazes.storage.get_by_ids = Mock(return_value=created_bazes)
bazes.storage.check_filter = Mock(return_value=None)
linked_bazes = foos.link(foo['_id'], 'bazes', sort=('+name',), filter={'name':'foo'}, offset=10, limit=20)
self.assertEquals(linked_bazes, created_bazes)
bazes.storage.get_by_ids.assert_called_once_with(Baz, baz_ids, sort=('+name',), filter={'name':'foo'}, offset=10, limit=20, count=False)
def test_multiple_link_default_order(self):
"""
Linked items are always in field order unless a sort option is set
"""
ordered_bazes = []
baz_ids = []
for i in range(0,3):
baz = { 'name':'Baz#%d' % i, '_id':'%d' % i }
ordered_bazes.append(baz)
baz_ids.append(baz['_id'])
random_bazes = [b for b in ordered_bazes]
random.shuffle(random_bazes)
foo = {'_id':'123', 'bazes':baz_ids}
foos = api.interfaces['foos']
bazes = api.interfaces['bazes']
foos.storage.get_by_id = Mock(return_value=foo)
bazes.storage.check_filter = Mock(return_value=None)
bazes.storage.get_by_ids = Mock(return_value=random_bazes)
linked_bazes = foos.link(foo['_id'], 'bazes')
self.assertEquals(linked_bazes, ordered_bazes)
linked_bazes = foos.link(foo['_id'], 'bazes', sort=('+name',))
self.assertEquals(linked_bazes, random_bazes)
def test_multiple_link_get_embedded(self):
"""
Embedded link list is included when fetching the referencing item.
"""
created_bazes = []
baz_ids = []
for i in range(0,3):
baz = { 'name':'Baz#%d' % i, '_id':'%d' % i }
created_bazes.append(baz)
baz_ids.append(baz['_id'])
foo = {'_id':'123', 'embedded_bazes':baz_ids}
foos = self.get_interface('foos')
bazes = self.get_interface('bazes')
foos.storage.get_by_id = Mock(return_value=foo)
bazes.storage.get_by_ids = Mock(return_value=created_bazes)
fetched_foo = foos.get(foo['_id'])
self.assertEquals(fetched_foo['embedded_bazes'], created_bazes)
def test_single_inverse_link(self):
"""
Can resolve a single link.
"""
created_bazes = []
baz_ids = []
for i in range(0,3):
baz = { 'name':'Baz#%d' % i, '_id':'%d' % i }
created_bazes.append(baz)
baz_ids.append(baz['_id'])
foo = {'_id':'123', 'bazes':baz_ids}
foos = self.get_interface('foos')
bazes = self.get_interface('bazes')
foos.storage.get = Mock(return_value=[foo])
bazes.storage.get_by_id = Mock(return_value=created_bazes[0])
linked_foo = bazes.link(baz_ids[0], 'foo')
bazes.storage.get_by_id.assert_called_once_with(Baz, baz_ids[0])
foos.storage.get.assert_called_once_with(Foo, filter={'bazes':baz_ids[0]}, limit=1)
self.assertEquals(linked_foo, foo)
def test_single_inverse_link_embedded(self):
"""
Single embedded links are automatically resolved.
"""
created_bazes = []
baz_ids = []
for i in range(0,3):
baz = { 'name':'Baz#%d' % i, '_id':'%d' % i }
created_bazes.append(baz)
baz_ids.append(baz['_id'])
foo = {'_id':'123', 'bazes':baz_ids}
foos = self.get_interface('foos')
foos.storage.get = Mock(return_value=[foo])
bazes = self.get_interface('bazes')
bazes.storage.get_by_id = Mock(return_value=created_bazes[0])
baz = bazes.get(baz_ids[0], embed=('embedded_foo',))
self.assertEquals(baz['embedded_foo'], foo)
def test_multiple_inverse_link(self):
"""
Can resolve a multiple link the same way as a link.
"""
foo = {'stuff':'foo', '_id':'123'}
bar_items = []
bar_ids = []
for i in range(0,3):
bar = {'foo':foo['_id'], '_id':'%s' % i}
bar_items.append(bar)
bar_ids.append(bar['_id'])
foos = self.get_interface('foos')
foos.storage.get_by_id = Mock(return_value=foo)
foos.storage.check_filter = Mock(return_value=None)
bars = self.get_interface('bars')
bars.storage.get = Mock(return_value=bar_items)
bars.storage.check_filter = Mock(return_value=None)
linked_bars = api.interfaces['foos'].link(foo['_id'], 'bars', sort=('-name',), filter={'number':'7'}, limit=10, offset=20)
foos.storage.get_by_id.assert_called_once_with(Foo, foo['_id'])
bars.storage.get.assert_called_once_with(Bar, sort=('-name',), filter={'foo': '123', 'number':'7'}, limit=10, offset=20, count=False)
self.assertEquals(linked_bars, bar_items)
def test_embed_polymorphic(self):
"""Interfaces properly embed links when fetching descendants of the interface's entity"""
littorinas = self.get_interface('littorinas')
shells = self.get_interface('shells')
littorinas.storage.get = Mock(return_value=[
{'_id': '1', '_type':'Littorina.LittorinaLittorea', 'shell':'2'}])
shells.storage.get_by_id = Mock(return_value={'_id':'2', 'color': 'Really brown'})
result = littorinas.list()
shells.storage.get_by_id.assert_called_once_with(Shell, '2')
self.assertEquals(result, [{'_id': '1', '_type':'Littorina.LittorinaLittorea', 'shell':{'_id':'2', 'color': 'Really brown'}}])
def test_sort_fail(self):
"""
Trying to sort by a sort-disabled field raises an error.
"""
with self.assertRaises(errors.DisabledFieldError) as cm:
api.interfaces['foos'].list(sort=('+optional_stuff',))
def test_sort_default(self):
"""
If no sort is set, the default is used.
"""
bars = self.get_interface('bars')
bars.storage.get = Mock(return_value=[])
bars.list()
bars.storage.get.assert_called_once_with(Bar, sort=('+name',), filter=None, limit=0, offset=0, count=False)
def test_auth_required_not_present(self):
"""Raise NotAuthenticatedError if authorization requires authentication and it is not present."""
with self.assertRaises(errors.NotAuthenticatedError):
api.interfaces['hiddens'].list()
def test_auth_required_present(self):
"""Don't raise NotAuthenticatedError if authentication is required and present."""
hiddens = self.get_interface('hiddens')
hiddens.storage.get = Mock(return_value=[])
hiddens.list(context={'identity':{'foo':'bar'}})
def test_auth_failed(self):
"""Raises NotAuthorizedError if the authorization rule fails"""
with self.assertRaises(errors.NotAuthorizedError):
api.interfaces['hiddens'].create({}, context={'identity':{}})
with self.assertRaises(errors.NotAuthorizedError):
api.interfaces['hiddens'].create({}, context={'identity':{'role':'foo'}})
def test_auth_pass(self):
"""Does not raise NotAuthorizedError if the authorization rule passes"""
hiddens = self.get_interface('hiddens')
hiddens.storage.create = Mock(return_value={})
hiddens.create({}, context={'identity':{'role':'admin'}})
def test_auth_result_fail(self):
"""Raises NotAuthorizedError if a result rule doesn't pass."""
hiddens = self.get_interface('hiddens')
hiddens.storage.get_by_id = Mock(return_value={'foo':700})
with self.assertRaises(errors.NotAuthorizedError):
hiddens.get(123)
def test_auth_result_fail_list(self):
"""Raises NotAuthorizedError if a member of a result list doesn't pass a rule."""
planets = self.get_interface('planets')
planets.storage.get = Mock(return_value=[{'foo':700}])
with self.assertRaises(errors.NotAuthorizedError):
planets.list()
def test_auth_result_pass(self):
"""Does not raise NotAuthorizedError if a result rule passes."""
hiddens = self.get_interface('hiddens')
hiddens.storage.get_by_id = Mock(return_value={'foo':23})
hiddens.get(123)
def test_auth_arbitrary_function(self):
"""Will use an arbitrary function to check authorization"""
anyfunctionauthmodels = self.get_interface('anyfunctionauthmodels')
anyfunctionauthmodels.storage.get_by_id = Mock(return_value={'foo':'123'})
anyfunctionauthmodels.storage.get = Mock(return_value=[{'foo':'a'}])
with self.assertRaises(errors.NotAuthorizedError):
anyfunctionauthmodels.get('666')
auth_fn_get.assert_called_once_with({'item':{'foo':'123'}})
anyfunctionauthmodels.list()
auth_fn_list.assert_called_once_with({'item':{'foo':'a'}})
def test_hidden_result(self):
"""Hidden fields aren't shown in results."""
hiddens = self.get_interface('hiddens')
hiddens.storage.create = Mock(return_value={'_id':'123', 'name':'foo'})
obj = hiddens.create({'name':'foo'}, context={'identity':{'role':'admin'}})
self.assertNotIn('name', obj)
def test_hidden_show_fail(self):
"""Hidden fields aren't shown in results even when show_hidden=True if the user is not authorized."""
hiddens = self.get_interface('hiddens')
hiddens.storage.get_by_id = Mock(return_value={'_id':'123', 'name':'foo', 'foo':23})
obj = hiddens.get('123', show_hidden=True)
self.assertNotIn('name', obj)
def test_hidden_succeed(self):
"""Hidden fields are shown when show_hidden=True and the user is authorized."""
hiddens = self.get_interface('hiddens')
hiddens.storage.get_by_id = Mock(return_value={'_id':'123', 'name':'foo', 'foo':23})
obj = hiddens.get('123', show_hidden=True, context={'identity':{'foo':'bar'}})
self.assertIn('name', obj)
def test_hidden_filter(self):
"""Can't filter by a hidden field without authorization."""
hiddens = self.get_interface('hiddens')
hiddens.storage.check_filter = Mock(side_effect=errors.DisabledFieldError)
with self.assertRaises(errors.DisabledFieldError):
hiddens.list(filter={'name':'zoomy'}, context={'identity':{}})
hiddens.storage.check_filter.assert_called_once_with({'name':'zoomy'}, set(['_type', '_id']), {'identity': {}})
def test_hidden_filter_authorized(self):
"""Can filter by a hidden field when authorized."""
hiddens = self.get_interface('hiddens')
hiddens.storage.check_filter = Mock(return_value=None)
hiddens.storage.get = Mock(return_value=[])
hiddens.list(filter={'name':'zoomy'}, context={'identity':{'foo':'bar'}})
hiddens.storage.check_filter.assert_called_once_with({'name':'zoomy'}, set(['name', '_type', '_id']), {'item': [], 'identity': {'foo': 'bar'}})
def test_hidden_sort_fail(self):
"""Can't sort by a hidden field without authorization."""
with self.assertRaises(errors.DisabledFieldError) as cm:
api.interfaces['hiddens'].list(sort=('+name',), context={'identity':{}})
self.assertEquals(cm.exception.message, 'The "name" field cannot be used for sorting.')
def test_authorization_bypass(self):
"""Can bypass authorization for methods, filters and sort."""
hiddens = self.get_interface('hiddens')
hiddens.storage.get = Mock(return_value=[{'name':'zoomy', 'foo':23}])
results = hiddens.list(filter={'name':'zoomy'}, sort=('+name',), bypass_authorization=True, show_hidden=True)
hiddens.storage.get.assert_called_once_with(Hidden, sort=('+name',), filter={'name':'zoomy'}, limit=0, offset=0, count=False)
self.assertEquals(results, [{'name':'zoomy', 'foo':23}])
def test_hooks(self):
foos = self.get_interface('foos')
foos.storage.get_by_id = Mock(return_value={'foo':23})
foos.storage.get = Mock(return_value=[{'foo':23}])
foos.storage.create = Mock(return_value=123)
foos.storage.update = Mock(return_value={'foo':23})
foos.storage.delete = Mock()
foos.inverse_delete = Mock()
foos.storage.check_filter = Mock()
foos.before_get = Mock()
foos.after_get = Mock()
foos.before_list = Mock()
foos.after_list = Mock()
foos.before_create = Mock()
foos.after_create = Mock()
foos.before_update = Mock()
foos.after_update = Mock()
foos.before_delete = Mock()
foos.after_delete = Mock()
context = {'identity':{'foo':'bar'}}
foos.get(123, context=context)
foos.before_get.assert_called_once_with(context['identity'], 123)
foos.after_get.assert_called_once_with(context['identity'], {'foo':23})
foos.list(filter={'stuff':'things'}, context=context)
foos.before_list.assert_called_once_with(context['identity'], {'stuff':'things'})
foos.after_list.assert_called_once_with(context['identity'], [{'foo':23}])
foos.create({'stuff':'things'}, context=context)
foos.before_create.assert_called_once_with(context['identity'], {'stuff':'things'})
foos.after_create.assert_called_once_with(context['identity'], {'stuff':'things', '_id':123})
foos.update(123, {'things':'stuff'}, context=context)
foos.before_update.assert_called_once_with(context['identity'], {'foo':23}, {'things':'stuff'})
foos.after_update.assert_called_once_with(context['identity'], {'foo':23})
foos.delete(123, context=context)
foos.before_delete.assert_called_once_with(context['identity'], {'foo':23})
foos.after_delete.assert_called_once_with(context['identity'], {'foo':23})
def test_disabled_method(self):
"""An error is raised when attempting to call a disabled method."""
with self.assertRaises(errors.DisabledMethodError):
api.interfaces['readonly_foos'].create({})
def test_default_limit(self):
"""A default limit is used when limit is not passed"""
bazes = self.get_interface('bazes')
bazes.storage.get = Mock(return_value=[])
bazes.list()
bazes.storage.get.assert_called_once_with(Baz, sort=(), filter=None, offset=0, limit=10, count=False)
def test_max_limit(self):
"""Limit can't exceed max_limit"""
bazes = self.get_interface('bazes')
bazes.storage.get = Mock(return_value=[])
bazes.list(limit=50)
bazes.storage.get.assert_called_once_with(Baz, sort=(), filter=None, offset=0, limit=20, count=False)
def test_default_embedded_not_default(self):
"""A link can be embeddable but not embedded"""
foos = self.get_interface('foos')
foos.storage.get = Mock(return_value=[{'_id':'123', 'embedded_foos':['1','2','3']}])
foos.storage.get_by_ids = Mock(return_value=[])
foos.list()
self.assertFalse(foos.storage.get_by_ids.called)
def test_default_not_embedded_not_default_included(self):
"""A link that is not embedded by default can still be embedded"""
foos = self.get_interface('foos')
foos.storage.get = Mock(return_value=[{'_id':'123', 'embedded_foos':['1','2','3']}])
foos.storage.get_by_ids = Mock(return_value=[])
foos.list(embed=['embedded_foos'])
foos.storage.get_by_ids.assert_called_once_with(Foo, ['1','2','3'], sort=(), filter=None, limit=0, offset=0, count=False)
def test_embeddable_included_if_fields_set(self):
"""An embeddable field is included if it is in the fields argument"""
foos = self.get_interface('foos')
foos.storage.get = Mock(return_value=[{'_id':'123', 'embedded_foos':['1','2','3']}])
foos.storage.get_by_ids = Mock(return_value=[])
foos.list(fields=['embedded_foos'])
foos.storage.get_by_ids.assert_called_once_with(Foo, ['1','2','3'], sort=(), filter=None, limit=0, offset=0, count=False)
def test_embeddable_fields(self):
"""Only fields in an entity's embedded_fields list are included"""
foos = self.get_interface('foos')
foos.storage.get = Mock(return_value=[{'_id':'123', 'embedded_foos':['1','2','3']}])
foos.storage.get_by_ids = Mock(return_value=[{'_id':'2', 'stuff':123, 'optional_stuff':456}])
result = foos.list(embed=('embedded_foos',))
self.assertEquals(result, [{'_id':'123', 'embedded_foos':[{'_id':'2', 'stuff':123}]}])
def test_field_subset(self):
"""Can fetch only a subset of fields"""
foos = self.get_interface('foos')
foos.storage.get_by_id = CopyingMock(return_value={'_id':'123', 'stuff':123, 'optional_stuff':456})
result = foos.get('123', fields=('optional_stuff',))
self.assertEquals(result, {'_id':'123', 'optional_stuff':456})
def test_no_fields(self):
"""Only an item's ID is included if fields is an empty list"""
foos = self.get_interface('foos')
foos.storage.get_by_id = CopyingMock(return_value={'_id':'123', 'stuff':123, 'optional_stuff':456})
result = foos.get('123', fields=())
self.assertEquals(result, {'_id':'123'})
def test_fields_empty(self):
"""All of an item's visible fields are returned if the fields list is omitted"""
foo = {'_id':'123', 'stuff':123, 'optional_stuff':456}
foos = self.get_interface('foos')
foos.storage.get_by_id = CopyingMock(return_value=foo)
result = foos.get('123')
self.assertEquals(result, foo)
def test_fields_empty_hidden_field(self):
"""All of an item's visible fields are returned if the fields list is omitted when an entity has hidden fields"""
hiddens = self.get_interface('hiddens')
hiddens.storage.get_by_id = CopyingMock(return_value={'_id':'123', 'name':'hidden', 'foo':23})
result = hiddens.get('123')
self.assertEquals(result, {'_id':'123', 'foo':23})
def test_fields_empty_hidden_list(self):
"""All of an item's visible fields are returned when listing items"""
foos = self.get_interface('foos')
foos.storage.get = CopyingMock(return_value=[{'_id':'123', 'stuff':'foo', 'secret':'i like valuer'}])
result = foos.list()
self.assertEquals(result, [{'_id':'123', 'stuff':'foo'}])
def test_count(self):
"""Can get a count instead of a list of items"""
foos = self.get_interface('foos')
foos.storage.get = Mock(return_value=42)
result = foos.list(count=True)
self.assertEquals(result, 42)
foos.storage.get.assert_called_once_with(Foo, filter=None, sort=(), offset=0, limit=0, count=True)
def test_count_link(self):
"""Can count a list link instead of getting the items"""
foos = self.get_interface('foos')
bazes = self.get_interface('bazes')
foos.storage.get_by_id = Mock(return_value={'_id':'123', 'bazes':['1','2','3']})
bazes.storage.get_by_ids = Mock(return_value=42)
result = foos.link('123', 'bazes', count=True)
self.assertEquals(result, 42)
bazes.storage.get_by_ids.assert_called_with(Baz, ['1','2','3'], filter=None, sort=(), offset=0, limit=10, count=True)
def test_count_inverse_link(self):
"""Can count a multiple link instead of getting the items"""
foo = {'stuff':'foo', '_id':'123'}
bars = []
for i in range(0,3):
bar = {'foo':foo['_id'], '_id':'%s' % i}
bars.append(bar)
foos = api.interfaces['foos']
bars = api.interfaces['bars']
foos.storage.get_by_id = Mock(return_value=foo)
bars.storage.get = Mock(return_value=3)
bars.storage.check_filter = Mock(return_value=None)
result = foos.link(foo['_id'], 'bars', count=True)
self.assertEquals(result, 3)
bars.storage.get.assert_called_once_with(Bar, filter={'foo':'123'}, sort=('+name',), offset=0, limit=0, count=True)
def test_reverse_delete_null_single(self):
"""Removing a single linked item with a NULL rule, nulls the referencing item's link field"""
targets = self.get_interface('nullsingletargets')
targets.storage.get_by_id = Mock(return_value={'_id':'123'})
targets.storage.delete = Mock()
referrers = self.get_interface('nullsinglereferrers')
referrers.storage.get = Mock(return_value=[{'_id':'666'}])
referrers.storage.check_filter = Mock(return_value=None)
referrers.storage.update = Mock(return_value={})
referrers.storage.get_by_id = Mock(return_value={})
targets.delete('123')
targets.storage.get_by_id.assert_called_once_with(NullSingleTarget, '123')
targets.storage.delete.assert_called_once_with(NullSingleTarget, '123')
referrers.storage.get.assert_called_once_with(NullSingleReferrer, filter={'target':'123'}, count=False, sort=(), offset=0, limit=0)
referrers.storage.update.assert_called_once_with(NullSingleReferrer, '666', {'target':None}, replace=False)
def test_reverse_delete_null_multi(self):
"""Removing a multi-linked item with a NULL rule, removes the links in the referencing item's field"""
targets = api.interfaces['nullmultitargets']
targets.storage.get_by_id = Mock(return_value={'_id':'123'})
targets.storage.delete = Mock()
referrers = api.interfaces['nullmultireferrers']
referrers.storage.get = Mock(return_value=[{'_id':'666', 'targets':['555', '123', '888']}])
referrers.storage.check_filter = Mock(return_value=None)
referrers.storage.update = Mock(return_value={})
targets.delete('123')
targets.storage.get_by_id.assert_any_call(NullMultiTarget, '123')
targets.storage.get_by_id.assert_any_call(NullMultiTarget, '555', fields={})
targets.storage.get_by_id.assert_any_call(NullMultiTarget, '888', fields={})
targets.storage.delete.assert_called_once_with(NullMultiTarget, '123')
referrers.storage.get.assert_called_once_with(NullMultiReferrer, filter={'targets':'123'}, count=False, sort=(), offset=0, limit=0)
referrers.storage.update.assert_called_once_with(NullMultiReferrer, '666', {'targets':['555', '888']}, replace=False)
def test_reverse_delete_cascade(self):
"""Removing a single linked item with a CASCADE rule, deletes the referencing item"""
targets = self.get_interface('cascadetargets')
targets.storage.get_by_id = Mock(return_value={'_id':'123'})
targets.storage.delete = Mock()
referrers = self.get_interface('cascadereferrers')
referrers.storage.get = Mock(return_value=[{'_id':'666'}])
referrers.storage.get_by_id = Mock(return_value={'_id':'666'})
referrers.storage.check_filter = Mock(return_value=None)
referrers.storage.delete = Mock()
targets.delete('123')
targets.storage.get_by_id.assert_called_once_with(CascadeTarget, '123')
targets.storage.delete.assert_called_once_with(CascadeTarget, '123')
referrers.storage.get.assert_called_once_with(CascadeReferrer, filter={'target':'123'}, count=False, sort=(), offset=0, limit=0)
referrers.storage.delete.assert_called_once_with(CascadeReferrer, '666')
|
|
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import sys
import os
import subprocess
import argparse
import re
import json
# add the current to sys.path
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPTS_DIR)
from string import Template
from LoggerManager import getTempLogFile, logger, initConsoleLogging
from MCompReposCommitter import MCompReposCommitter
from MCompReposCommitter import getDefaultCommitMsgFileByPatchInfo
from MCompReposCommitter import generateCommitMsgFileByPatchInfo
from GitUtils import getGitRepoRevisionHash
from PatchSequenceApply import PatchSequenceApply
from VistATestClient import VistATestClientFactory
from VistATestClient import DEFAULT_NAMESPACE, DEFAULT_INSTANCE
from VistAMComponentExtractor import VistADataExtractor
from PatchInfoParser import PatchInfo
from VistAPackageInfoFetcher import VistAPackageInfoFetcher
from IntersysCacheUtils import backupCacheDataByGitHash, startCache
from IntersysCacheUtils import getCacheBackupNameByHash, restoreCacheData
"""
constants
"""
"""
class
"""
class PatchIncrInstallExtractCommit(object):
def __init__(self, config):
self._parseJSONConfig(config)
def _parseJSONConfig(self, config):
with open(config, 'r') as configFile:
self._config = json.load(configFile)
testClientConfig = self._config['VistA_Connection']
self._instance = testClientConfig.get('instance',DEFAULT_INSTANCE)
self._useSudo = testClientConfig.get('useSudo', False)
def _createTestClient(self):
testClientConfig = self._config['VistA_Connection']
system = testClientConfig['system']
namespace = testClientConfig.get('namespace', DEFAULT_NAMESPACE)
username = testClientConfig.get('username', None)
password = testClientConfig.get('password', None)
prompt = testClientConfig.get('prompt', None)
return VistATestClientFactory.createVistATestClient(system,
prompt=prompt,
namespace=namespace,
instance=self._instance,
username=username,
password=password)
def _autoRecover(self):
"""
private method to recover the right cache.dat based on the patch
installed in the running VistA instance.
"""
from GitUtils import getCommitInfo
mExtractConfig = self._config['M_Extract']
mRepo = mExtractConfig['M_repo']
mRepoBranch = mExtractConfig.get('M_repo_branch', None)
if not mRepoBranch:
mRepoBranch = 'master' # default is the master branch
commitInfo = getCommitInfo(mRepo, mRepoBranch)
if not commitInfo:
logger.error("Can not read commit info from %s branch %s" % (mRepo,
mRepoBranch))
return -1
logger.debug(commitInfo)
""" convert datetime to VistA T- format """
from datetime import datetime
commitDate = datetime.fromtimestamp(int(commitInfo['%ct']))
timeDiff = datetime.now() - commitDate
days = timeDiff.days + 30 # extra 30 days
logger.debug("Totol dates to query is %s" % days)
installNames = None
idx = commitInfo['%s'].find('Install: ')
if idx >= 0:
installNames = commitInfo['%s'][len('Install: '):].split(', ')
logger.info("installNames is %s" % installNames)
if installNames is None:
logger.error("Can not find patch installed after")
return -1
""" check to see what and when is the last patch installed """
testClient = self._createTestClient()
""" make sure cache instance is up and running """
startCache(self._instance, self._useSudo)
with testClient:
patchInfoFetch = VistAPackageInfoFetcher(testClient)
output = patchInfoFetch.getAllPatchInstalledAfterByTime("T-%s" % days)
if not output: # must be an error or something, skip backup
logger.error("Can not get patch installation information from VistA")
return -1
logger.debug(output)
""" logic to check if we need to recover from cache backup data """
found = False
for idx in xrange(0,len(output)):
if output[idx][0] == installNames[-1]:
found = True
break
if found and idx == len(output) - 1:
""" last patch is the same as last in the commit """
logger.info("No need to recover.")
return 0
if not found or idx < len(output) - 1:
""" check to see if cache.dat exist in the backup dir"""
backupConfig = self._config.get('Backup')
backupDir = backupConfig['backup_dir']
if not os.path.exists(backupDir):
logger.error("%s does not exist" % backupDir)
return -4
cacheDir = backupConfig['cache_dat_dir']
origDir = os.path.join(cacheDir, "CACHE.DAT")
""" identify the exists of backup file in the right format """
commitHash = commitInfo['%H']
cacheBackupFile = os.path.join(backupDir,
getCacheBackupNameByHash(commitHash))
if not os.path.exists(cacheBackupFile):
logger.error("backup file %s does not exist" % cacheBackupFile)
return -5
logger.info("Need to restore from backup data %s" % cacheBackupFile)
restoreCacheData(self._instance, cacheBackupFile,
cacheDir, self._useSudo)
startCache(self._instance, self._useSudo)
return 0
return -1
def run(self):
patchApplyConfig = self._config['Patch_Apply']
isContinuous = patchApplyConfig.get('continuous')
patchLogDir = patchApplyConfig['log_dir']
if not os.path.exists(patchLogDir):
logger.error("%s does not exist" % patchLogDir)
return False
inputPatchDir = patchApplyConfig['input_patch_dir']
mExtractConfig = self._config['M_Extract']
mRepo = mExtractConfig['M_repo']
mRepoBranch = mExtractConfig.get('M_repo_branch', None)
outputDir = mExtractConfig['temp_output_dir']
if not os.path.exists(outputDir):
os.mkdirs(outputDir)
extractLogDir = mExtractConfig['log_dir']
commitMsgDir = mExtractConfig['commit_msg_dir']
if not os.path.exists(commitMsgDir):
logger.error("%s does not exist" % commitMsgDir)
return False
backupConfig = self._config.get('Backup')
if backupConfig and backupConfig['auto_recover']:
self._autoRecover()
while True:
startCache(self._instance, self._useSudo)
testClient = self._createTestClient()
with testClient:
patchApply = PatchSequenceApply(testClient, patchLogDir)
outPatchList = patchApply.generatePatchSequence(inputPatchDir)
if not outPatchList:
logger.info("No Patch needs to apply")
return True
patchInfo = outPatchList[0]
logger.info(patchInfo)
result = patchApply.applyPatchSequenceByInstallName(
patchInfo.installName,
patchOnly=True)
if result < 0:
logger.error("Error installing patch %s" % patchInfo.installName)
return False
elif result == 0:
logger.info("%s is already installed" % patchInfo.installName)
continue
commitFile = getDefaultCommitMsgFileByPatchInfo(patchInfo,
dir=commitMsgDir)
generateCommitMsgFileByPatchInfo(patchInfo, commitFile,
reposDir=SCRIPTS_DIR)
MExtractor = VistADataExtractor(mRepo, outputDir, extractLogDir,
gitBranch=mRepoBranch)
MExtractor.extractData(testClient)
commit = MCompReposCommitter(mRepo)
commit.commit(commitFile)
if backupConfig:
backupDir = backupConfig['backup_dir']
if not os.path.exists(backupDir):
logger.error("%s does not exist" % backupDir)
return False
cacheDir = backupConfig['cache_dat_dir']
origDir = os.path.join(cacheDir, "CACHE.DAT")
backupCacheDataByGitHash(self._instance, origDir, backupDir,
mRepo, mRepoBranch, self._useSudo)
startCache(self._instance, self._useSudo)
if not isContinuous:
break
return True
if __name__ == '__main__':
initConsoleLogging()
parserDescr = 'Incremental install Patch, extract M Comp and commit'
parser = argparse.ArgumentParser(description=parserDescr)
parser.add_argument('configFile', help='Configuration file in JSON format')
result = parser.parse_args()
runTest = PatchIncrInstallExtractCommit(result.configFile)
runTest.run()
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join, basename
import re
import pandas as pd
from tgp.util import system_call
from .util import (get_artifact_information, split_mapping_file,
generate_demux_file, generate_artifact_info)
def generate_parameters_string(parameters):
"""Generates the parameters string from the parameters dictionary
Parameters
----------
parameters : dict
The parameter values, keyed by parameter name
Returns
-------
str
A string with the parameters to the CLI call
"""
flag_params = ['rev_comp_barcode', 'rev_comp_mapping_barcodes', 'rev_comp']
str_params = ['max_bad_run_length', 'min_per_read_length_fraction',
'sequence_max_n', 'phred_quality_threshold', 'barcode_type',
'max_barcode_errors', 'phred_offset']
result = ["--%s %s" % (sp, parameters[sp]) for sp in str_params
if parameters[sp] != ""]
for fp in flag_params:
if parameters[fp]:
result.append("--%s" % fp)
return ' '.join(result)
def get_sample_names_by_run_prefix(mapping_file):
"""Generates a dictionary of run_prefix and sample names
Parameters
----------
mapping_file : str
The mapping file
Returns
-------
dict
Dict mapping run_prefix to sample id
Raises
------
ValueError
If there is more than 1 sample per run_prefix
"""
qiime_map = pd.read_csv(mapping_file, delimiter='\t', dtype=str,
encoding='utf-8')
qiime_map.set_index('#SampleID', inplace=True)
samples = {}
errors = []
for prefix, df in qiime_map.groupby('run_prefix'):
len_df = len(df)
if len_df != 1:
errors.append('%s has %d samples (%s)' % (prefix, len_df,
', '.join(df.index)))
else:
samples[prefix] = df.index.values[0]
if errors:
raise ValueError("You have run_prefix values with multiple "
"samples: %s" % ' -- '.join(errors))
return samples
def generate_per_sample_fastq_command(forward_seqs, reverse_seqs, barcode_fps,
mapping_file, output_dir, params_str):
"""Generates the per-sample FASTQ split_libraries_fastq.py command
Parameters
----------
forward_seqs : list of str
The list of forward seqs filepaths
reverse_seqs : list of str
The list of reverse seqs filepaths
barcode_fps : list of str
The list of barcode filepaths
mapping_file : str
The path to the mapping file
output_dir : str
The path to the split libraries output directory
params_str : str
The string containing the parameters to pass to
split_libraries_fastq.py
Returns
-------
str
The CLI to execute
Raises
------
ValueError
- If barcode_fps is not an empty list
- If there are run prefixes in the mapping file that do not match
the sample names
"""
if barcode_fps:
raise ValueError('per_sample_FASTQ can not have barcodes: %s'
% (', '.join(basename(b) for b in barcode_fps)))
sn_by_rp = get_sample_names_by_run_prefix(mapping_file)
samples = []
for f in forward_seqs:
f = basename(f)
if re.match("^[0-9]+\_.*", f):
# getting just the main filename
f = basename(f).split('_', 1)[1]
# removing extentions: fastq or fastq.gz
if 'fastq' in f.lower().rsplit('.', 2):
f = f[:f.lower().rindex('.fastq')]
# this try/except block is simply to retrieve all possible errors
# and display them in the next if block
try:
samples.append(sn_by_rp[f])
del sn_by_rp[f]
except KeyError:
pass
if sn_by_rp:
raise ValueError(
'Some run_prefix values do not match your sample names: %s'
% ', '.join(sn_by_rp.keys()))
cmd = str("split_libraries_fastq.py --store_demultiplexed_fastq "
"-i %s --sample_ids %s -o %s %s"
% (','.join(forward_seqs), ','.join(samples),
output_dir, params_str))
return cmd
def generate_split_libraries_fastq_cmd(filepaths, mapping_file, atype,
out_dir, parameters):
"""Generates the split_libraries_fastq.py command
Parameters
----------
filepaths : list of (str, str)
The artifact filepaths and their type
mapping_file : str
The artifact QIIME-compliant mapping file
atype : str
The artifact type
out_dir : str
The job output directory
Returns
-------
str
The CLI to execute
Raises
------
NotImplementedError
If there is a not supported filepath type
ValueError
If the number of barcode files and the number of sequence files do not
match
"""
forward_seqs = []
reverse_seqs = []
barcode_fps = []
for fp, fp_type in filepaths:
if fp_type == 'raw_forward_seqs':
forward_seqs.append(fp)
elif fp_type == 'raw_reverse_seqs':
reverse_seqs.append(fp)
elif fp_type == 'raw_barcodes':
barcode_fps.append(fp)
elif fp_type == 'html_summary':
# Ignore the HTML summary file
continue
else:
raise NotImplementedError("File type not supported %s" % fp_type)
# We need to sort the filepaths to make sure that each lane's file is in
# the same order, so they match when passed to split_libraries_fastq.py
# All files should be prefixed with run_prefix, so the ordering is
# ensured to be correct
forward_seqs = sorted(forward_seqs)
reverse_seqs = sorted(reverse_seqs)
barcode_fps = sorted(barcode_fps)
output_dir = join(out_dir, "sl_out")
params_str = generate_parameters_string(parameters)
if atype == "per_sample_FASTQ":
cmd = generate_per_sample_fastq_command(
forward_seqs, reverse_seqs, barcode_fps, mapping_file,
output_dir, params_str)
else:
if len(barcode_fps) != len(forward_seqs):
raise ValueError("The number of barcode files and the number of "
"sequence files should match: %d != %s"
% (len(barcode_fps), len(forward_seqs)))
map_out_dir = join(out_dir, 'mappings')
mapping_files = sorted(split_mapping_file(mapping_file, map_out_dir))
cmd = str("split_libraries_fastq.py --store_demultiplexed_fastq -i %s "
"-b %s -m %s -o %s %s"
% (','.join(forward_seqs), ','.join(barcode_fps),
','.join(mapping_files), output_dir, params_str))
return cmd, output_dir
def split_libraries_fastq(qclient, job_id, parameters, out_dir):
"""Run split libraries fastq with the given parameters
Parameters
----------
qclient : tgp.qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
parameters : dict
The parameter values to run split libraries
out_dir : str
Yhe path to the job's output directory
Returns
-------
dict
The results of the job
"""
# Step 1 get the rest of the information need to run split libraries
qclient.update_job_step(job_id, "Step 1 of 4: Collecting information")
artifact_id = parameters['input_data']
filepaths, mapping_file, atype = get_artifact_information(
qclient, artifact_id)
# Step 2 generate the split libraries fastq command
qclient.update_job_step(job_id, "Step 2 of 4: Generating command")
command, sl_out = generate_split_libraries_fastq_cmd(
filepaths, mapping_file, atype, out_dir, parameters)
# Step 3 execute split libraries
qclient.update_job_step(
job_id, "Step 3 of 4: Executing demultiplexing and quality control")
std_out, std_err, return_value = system_call(command)
if return_value != 0:
raise RuntimeError(
"Error processing files:\nStd output: %s\n Std error:%s"
% (std_out, std_err))
# Step 4 generate the demux file
qclient.update_job_step(job_id, "Step 4 of 4: Generating demux file")
generate_demux_file(sl_out)
artifacts_info = generate_artifact_info(sl_out)
return True, artifacts_info, ""
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from magnum.api.controllers import link
from magnum.api.controllers.v1 import base as v1_base
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.api import validation
from magnum.common import exception
from magnum.common import k8s_manifest
from magnum import objects
class ReplicationControllerPatchType(v1_base.K8sPatchType):
@staticmethod
def internal_attrs():
defaults = v1_base.K8sPatchType.internal_attrs()
return defaults + ['/replicas']
class ReplicationController(v1_base.K8sResourceBase):
"""API representation of a ReplicationController.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a
ReplicationController.
"""
uuid = types.uuid
"""Unique UUID for this ReplicationController"""
images = [wtypes.text]
"""A list of images used by containers in this ReplicationController."""
replicas = wsme.wsattr(wtypes.IntegerType(), readonly=True)
"""Replicas of this ReplicationController"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated rc links"""
def __init__(self, **kwargs):
super(ReplicationController, self).__init__()
self.fields = []
for field in objects.ReplicationController.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(rc, url, expand=True):
if not expand:
rc.unset_fields_except(['uuid', 'name', 'images', 'bay_uuid',
'labels', 'replicas'])
rc.links = [link.Link.make_link('self', url,
'rcs', rc.uuid),
link.Link.make_link('bookmark', url,
'rcs', rc.uuid,
bookmark=True)]
return rc
@classmethod
def convert_with_links(cls, rpc_rc, expand=True):
rc = ReplicationController(**rpc_rc.as_dict())
return cls._convert_with_links(rc, pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='f978db47-9a37-4e9f-8572-804a10abc0aa',
name='MyReplicationController',
images=['MyImage'],
bay_uuid='f978db47-9a37-4e9f-8572-804a10abc0ab',
labels={'name': 'foo'},
replicas=2,
manifest_url='file:///tmp/rc.yaml',
manifest='''{
"metadata": {
"name": "name_of_rc"
},
"spec":{
"replicas":2,
"selector":{
"name":"frontend"
},
"template":{
"metadata":{
"labels":{
"name":"frontend"
}
},
"spec":{
"containers":[
{
"name":"test-redis",
"image":"steak/for-dinner",
"ports":[
{
"containerPort":80,
"protocol":"TCP"
}
]
}
]
}
}
}
}''',
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def parse_manifest(self):
try:
manifest = k8s_manifest.parse(self._get_manifest())
except ValueError as e:
raise exception.InvalidParameterValue(message=str(e))
try:
self.name = manifest["metadata"]["name"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field metadata['name'] can't be empty in manifest.")
try:
self.replicas = manifest["spec"]["replicas"]
except (KeyError, TypeError):
pass
try:
self.selector = manifest["spec"]["selector"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['selector'] can't be empty in manifest.")
try:
self.labels = manifest["spec"]["template"]["metadata"]["labels"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['template']['metadata']['labels'] "
"can't be empty in manifest.")
try:
images = []
for cont in manifest["spec"]["template"]["spec"]["containers"]:
images.append(cont["image"])
self.images = images
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['template']['spec']['containers'] "
"can't be empty in manifest.")
class ReplicationControllerCollection(collection.Collection):
"""API representation of a collection of ReplicationControllers."""
rcs = [ReplicationController]
"""A list containing ReplicationController objects"""
def __init__(self, **kwargs):
self._type = 'rcs'
@staticmethod
def convert_with_links(rpc_rcs, limit, url=None, expand=False, **kwargs):
collection = ReplicationControllerCollection()
collection.rcs = [ReplicationController.convert_with_links(p, expand)
for p in rpc_rcs]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.rcs = [ReplicationController.sample(expand=False)]
return sample
class ReplicationControllersController(rest.RestController):
"""REST controller for ReplicationControllers."""
def __init__(self):
super(ReplicationControllersController, self).__init__()
_custom_actions = {
'detail': ['GET'],
}
def _get_rcs_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.ReplicationController.get_by_uuid(
pecan.request.context,
marker)
rcs = pecan.request.rpcapi.rc_list(
pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return ReplicationControllerCollection.convert_with_links(
rcs, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(ReplicationControllerCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, rc_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of ReplicationControllers.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_rcs_collection(marker, limit, sort_key,
sort_dir)
@wsme_pecan.wsexpose(ReplicationControllerCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, rc_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of ReplicationControllers with detail.
:param rc_uuid: UUID of a ReplicationController, to get only
ReplicationControllers for the ReplicationController.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(jay-lau-513): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "rcs":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['rcs', 'detail'])
return self._get_rcs_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@wsme_pecan.wsexpose(ReplicationController, types.uuid_or_name)
def get_one(self, rc_ident):
"""Retrieve information about the given ReplicationController.
:param rc_ident: UUID or logical name of a ReplicationController.
"""
rpc_rc = api_utils.get_rpc_resource('ReplicationController', rc_ident)
return ReplicationController.convert_with_links(rpc_rc)
@wsme_pecan.wsexpose(ReplicationController, body=ReplicationController,
status_code=201)
@validation.enforce_bay_types('kubernetes')
def post(self, rc):
"""Create a new ReplicationController.
:param rc: a ReplicationController within the request body.
"""
rc.parse_manifest()
rc_dict = rc.as_dict()
context = pecan.request.context
auth_token = context.auth_token_info['token']
rc_dict['project_id'] = auth_token['project']['id']
rc_dict['user_id'] = auth_token['user']['id']
rc_obj = objects.ReplicationController(context, **rc_dict)
new_rc = pecan.request.rpcapi.rc_create(rc_obj)
if not new_rc:
raise exception.InvalidState()
# Set the HTTP Location Header
pecan.response.location = link.build_url('rcs', new_rc.uuid)
return ReplicationController.convert_with_links(new_rc)
@wsme.validate(types.uuid, [ReplicationControllerPatchType])
@wsme_pecan.wsexpose(ReplicationController, types.uuid_or_name,
body=[ReplicationControllerPatchType])
def patch(self, rc_ident, patch):
"""Update an existing rc.
:param rc_ident: UUID or logical name of a ReplicationController.
:param patch: a json PATCH document to apply to this rc.
"""
rpc_rc = api_utils.get_rpc_resource('ReplicationController', rc_ident)
# Init manifest and manifest_url field because we don't store them
# in database.
rpc_rc['manifest'] = None
rpc_rc['manifest_url'] = None
try:
rc_dict = rpc_rc.as_dict()
rc = ReplicationController(**api_utils.apply_jsonpatch(rc_dict,
patch))
if rc.manifest or rc.manifest_url:
rc.parse_manifest()
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.ReplicationController.fields:
try:
patch_val = getattr(rc, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_rc[field] != patch_val:
rpc_rc[field] = patch_val
if rc.manifest or rc.manifest_url:
pecan.request.rpcapi.rc_update(rpc_rc)
else:
rpc_rc.save()
return ReplicationController.convert_with_links(rpc_rc)
@wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204)
def delete(self, rc_ident):
"""Delete a ReplicationController.
:param rc_uuid: UUID of a ReplicationController.
"""
rpc_rc = api_utils.get_rpc_resource('ReplicationController', rc_ident)
pecan.request.rpcapi.rc_delete(rpc_rc.uuid)
|
|
"""Support for the Amazon Polly text to speech service."""
import logging
import voluptuous as vol
from homeassistant.components.tts import PLATFORM_SCHEMA, Provider
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_REGION = "region_name"
CONF_ACCESS_KEY_ID = "aws_access_key_id"
CONF_SECRET_ACCESS_KEY = "aws_secret_access_key"
CONF_PROFILE_NAME = "profile_name"
ATTR_CREDENTIALS = "credentials"
DEFAULT_REGION = "us-east-1"
SUPPORTED_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"ca-central-1",
"eu-west-1",
"eu-central-1",
"eu-west-2",
"eu-west-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-2",
"ap-northeast-1",
"ap-south-1",
"sa-east-1",
]
CONF_VOICE = "voice"
CONF_OUTPUT_FORMAT = "output_format"
CONF_SAMPLE_RATE = "sample_rate"
CONF_TEXT_TYPE = "text_type"
SUPPORTED_VOICES = [
"Zhiyu", # Chinese
"Mads",
"Naja", # Danish
"Ruben",
"Lotte", # Dutch
"Russell",
"Nicole", # English Australian
"Brian",
"Amy",
"Emma", # English
"Aditi",
"Raveena", # English, Indian
"Joey",
"Justin",
"Matthew",
"Ivy",
"Joanna",
"Kendra",
"Kimberly",
"Salli", # English
"Geraint", # English Welsh
"Mathieu",
"Celine",
"Lea", # French
"Chantal", # French Canadian
"Hans",
"Marlene",
"Vicki", # German
"Aditi", # Hindi
"Karl",
"Dora", # Icelandic
"Giorgio",
"Carla",
"Bianca", # Italian
"Takumi",
"Mizuki", # Japanese
"Seoyeon", # Korean
"Liv", # Norwegian
"Jacek",
"Jan",
"Ewa",
"Maja", # Polish
"Ricardo",
"Vitoria", # Portuguese, Brazilian
"Cristiano",
"Ines", # Portuguese, European
"Carmen", # Romanian
"Maxim",
"Tatyana", # Russian
"Enrique",
"Conchita",
"Lucia", # Spanish European
"Mia", # Spanish Mexican
"Miguel",
"Penelope", # Spanish US
"Astrid", # Swedish
"Filiz", # Turkish
"Gwyneth", # Welsh
]
SUPPORTED_OUTPUT_FORMATS = ["mp3", "ogg_vorbis", "pcm"]
SUPPORTED_SAMPLE_RATES = ["8000", "16000", "22050"]
SUPPORTED_SAMPLE_RATES_MAP = {
"mp3": ["8000", "16000", "22050"],
"ogg_vorbis": ["8000", "16000", "22050"],
"pcm": ["8000", "16000"],
}
SUPPORTED_TEXT_TYPES = ["text", "ssml"]
CONTENT_TYPE_EXTENSIONS = {"audio/mpeg": "mp3", "audio/ogg": "ogg", "audio/pcm": "pcm"}
DEFAULT_VOICE = "Joanna"
DEFAULT_OUTPUT_FORMAT = "mp3"
DEFAULT_TEXT_TYPE = "text"
DEFAULT_SAMPLE_RATES = {"mp3": "22050", "ogg_vorbis": "22050", "pcm": "16000"}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(SUPPORTED_REGIONS),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES),
vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT): vol.In(
SUPPORTED_OUTPUT_FORMATS
),
vol.Optional(CONF_SAMPLE_RATE): vol.All(
cv.string, vol.In(SUPPORTED_SAMPLE_RATES)
),
vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE): vol.In(
SUPPORTED_TEXT_TYPES
),
}
)
def get_engine(hass, config):
"""Set up Amazon Polly speech component."""
output_format = config.get(CONF_OUTPUT_FORMAT)
sample_rate = config.get(CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format])
if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format):
_LOGGER.error(
"%s is not a valid sample rate for %s", sample_rate, output_format
)
return None
config[CONF_SAMPLE_RATE] = sample_rate
import boto3
profile = config.get(CONF_PROFILE_NAME)
if profile is not None:
boto3.setup_default_session(profile_name=profile)
aws_config = {
CONF_REGION: config.get(CONF_REGION),
CONF_ACCESS_KEY_ID: config.get(CONF_ACCESS_KEY_ID),
CONF_SECRET_ACCESS_KEY: config.get(CONF_SECRET_ACCESS_KEY),
}
del config[CONF_REGION]
del config[CONF_ACCESS_KEY_ID]
del config[CONF_SECRET_ACCESS_KEY]
polly_client = boto3.client("polly", **aws_config)
supported_languages = []
all_voices = {}
all_voices_req = polly_client.describe_voices()
for voice in all_voices_req.get("Voices"):
all_voices[voice.get("Id")] = voice
if voice.get("LanguageCode") not in supported_languages:
supported_languages.append(voice.get("LanguageCode"))
return AmazonPollyProvider(polly_client, config, supported_languages, all_voices)
class AmazonPollyProvider(Provider):
"""Amazon Polly speech api provider."""
def __init__(self, polly_client, config, supported_languages, all_voices):
"""Initialize Amazon Polly provider for TTS."""
self.client = polly_client
self.config = config
self.supported_langs = supported_languages
self.all_voices = all_voices
self.default_voice = self.config.get(CONF_VOICE)
self.name = "Amazon Polly"
@property
def supported_languages(self):
"""Return a list of supported languages."""
return self.supported_langs
@property
def default_language(self):
"""Return the default language."""
return self.all_voices.get(self.default_voice).get("LanguageCode")
@property
def default_options(self):
"""Return dict include default options."""
return {CONF_VOICE: self.default_voice}
@property
def supported_options(self):
"""Return a list of supported options."""
return [CONF_VOICE]
def get_tts_audio(self, message, language=None, options=None):
"""Request TTS file from Polly."""
voice_id = options.get(CONF_VOICE, self.default_voice)
voice_in_dict = self.all_voices.get(voice_id)
if language != voice_in_dict.get("LanguageCode"):
_LOGGER.error("%s does not support the %s language", voice_id, language)
return None, None
resp = self.client.synthesize_speech(
OutputFormat=self.config[CONF_OUTPUT_FORMAT],
SampleRate=self.config[CONF_SAMPLE_RATE],
Text=message,
TextType=self.config[CONF_TEXT_TYPE],
VoiceId=voice_id,
)
return (
CONTENT_TYPE_EXTENSIONS[resp.get("ContentType")],
resp.get("AudioStream").read(),
)
|
|
#!/usr/bin/env python
import os, sys, traceback
import getpass
from threading import Thread
from subprocess import *
if(sys.hexversion < 0x03000000):
import Queue
else:
import queue as Queue
# svmtrain and gnuplot executable
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmtrain_exe = "../svm-train"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmtrain_exe = r"..\windows\svm-train.exe"
# svmtrain_exe = r"c:\Program Files\libsvm\windows\svm-train.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
# global parameters and their default values
fold = 5
c_begin, c_end, c_step = -5, 15, 2
g_begin, g_end, g_step = 3, -15, -2
global dataset_pathname, dataset_title, pass_through_string
global out_filename, png_filename
# experimental
telnet_workers = []
ssh_workers = []
nr_local_worker = 1
# process command line options, set global parameters
def process_options(argv=sys.argv):
global fold
global c_begin, c_end, c_step
global g_begin, g_end, g_step
global dataset_pathname, dataset_title, pass_through_string
global svmtrain_exe, gnuplot_exe, gnuplot, out_filename, png_filename
usage = """\
Usage: grid.py [-log2c begin,end,step] [-log2g begin,end,step] [-v fold]
[-svmtrain pathname] [-gnuplot pathname] [-out pathname] [-png pathname]
[additional parameters for svm-train] dataset"""
if len(argv) < 2:
print(usage)
sys.exit(1)
dataset_pathname = argv[-1]
dataset_title = os.path.split(dataset_pathname)[1]
out_filename = '{0}.out'.format(dataset_title)
png_filename = '{0}.png'.format(dataset_title)
pass_through_options = []
i = 1
while i < len(argv) - 1:
if argv[i] == "-log2c":
i = i + 1
(c_begin,c_end,c_step) = map(float,argv[i].split(","))
elif argv[i] == "-log2g":
i = i + 1
(g_begin,g_end,g_step) = map(float,argv[i].split(","))
elif argv[i] == "-v":
i = i + 1
fold = argv[i]
elif argv[i] in ('-c','-g'):
print("Option -c and -g are renamed.")
print(usage)
sys.exit(1)
elif argv[i] == '-svmtrain':
i = i + 1
svmtrain_exe = argv[i]
elif argv[i] == '-gnuplot':
i = i + 1
gnuplot_exe = argv[i]
elif argv[i] == '-out':
i = i + 1
out_filename = argv[i]
elif argv[i] == '-png':
i = i + 1
png_filename = argv[i]
else:
pass_through_options.append(argv[i])
i = i + 1
pass_through_string = " ".join(pass_through_options)
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(dataset_pathname),"dataset not found"
gnuplot = Popen(gnuplot_exe,stdin = PIPE).stdin
def range_f(begin,end,step):
# like range, but works on non-integer too
seq = []
while True:
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1: return seq
mid = int(n/2)
left = permute_sequence(seq[:mid])
right = permute_sequence(seq[mid+1:])
ret = [seq[mid]]
while left or right:
if left: ret.append(left.pop(0))
if right: ret.append(right.pop(0))
return ret
def redraw(db,best_param,tofile=False):
if len(db) == 0: return
begin_level = round(max(x[2] for x in db)) - 3
step_size = 0.5
best_log2c,best_log2g,best_rate = best_param
if tofile:
gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n")
gnuplot.write("set output \"{0}\"\n".format(png_filename.replace('\\','\\\\')).encode())
#gnuplot.write(b"set term postscript color solid\n")
#gnuplot.write("set output \"{0}.ps\"\n".format(dataset_title).encode().encode())
elif is_win32:
gnuplot.write(b"set term windows\n")
else:
gnuplot.write( b"set term x11\n")
gnuplot.write(b"set xlabel \"log2(C)\"\n")
gnuplot.write(b"set ylabel \"log2(gamma)\"\n")
gnuplot.write("set xrange [{0}:{1}]\n".format(c_begin,c_end).encode())
gnuplot.write("set yrange [{0}:{1}]\n".format(g_begin,g_end).encode())
gnuplot.write(b"set contour\n")
gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode())
gnuplot.write(b"unset surface\n")
gnuplot.write(b"unset ztics\n")
gnuplot.write(b"set view 0,0\n")
gnuplot.write("set title \"{0}\"\n".format(dataset_title).encode())
gnuplot.write(b"unset label\n")
gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \
at screen 0.5,0.85 center\n". \
format(best_log2c, best_log2g, best_rate).encode())
gnuplot.write("set label \"C = {0} gamma = {1}\""
" at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode())
gnuplot.write(b"set key at screen 0.9,0.9\n")
gnuplot.write(b"splot \"-\" with lines\n")
db.sort(key = lambda x:(x[0], -x[1]))
prevc = db[0][0]
for line in db:
if prevc != line[0]:
gnuplot.write(b"\n")
prevc = line[0]
gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode())
gnuplot.write(b"e\n")
gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure
gnuplot.flush()
def calculate_jobs():
c_seq = permute_sequence(range_f(c_begin,c_end,c_step))
g_seq = permute_sequence(range_f(g_begin,g_end,g_step))
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
i = 0
j = 0
jobs = []
while i < nr_c or j < nr_g:
if i/nr_c < j/nr_g:
# increase C resolution
line = []
for k in range(0,j):
line.append((c_seq[i],g_seq[k]))
i = i + 1
jobs.append(line)
else:
# increase g resolution
line = []
for k in range(0,i):
line.append((c_seq[k],g_seq[j]))
j = j + 1
jobs.append(line)
return jobs
class WorkerStopToken: # used to notify the worker to stop
pass
class Worker(Thread):
def __init__(self,name,job_queue,result_queue):
Thread.__init__(self)
self.name = name
self.job_queue = job_queue
self.result_queue = result_queue
def run(self):
while True:
(cexp,gexp) = self.job_queue.get()
if cexp is WorkerStopToken:
self.job_queue.put((cexp,gexp))
# print('worker {0} stop.'.format(self.name))
break
try:
rate = self.run_one(2.0**cexp,2.0**gexp)
if rate is None: raise RuntimeError("get no rate")
except:
# we failed, let others do that and we just quit
traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
self.job_queue.put((cexp,gexp))
print('worker {0} quit.'.format(self.name))
break
else:
self.result_queue.put((self.name,cexp,gexp,rate))
class LocalWorker(Worker):
def run_one(self,c,g):
cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \
(svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname)
result = Popen(cmdline,shell=True,stdout=PIPE).stdout
for line in result.readlines():
if str(line).find("Cross") != -1:
return float(line.split()[-1][0:-1])
class SSHWorker(Worker):
def __init__(self,name,job_queue,result_queue,host):
Worker.__init__(self,name,job_queue,result_queue)
self.host = host
self.cwd = os.getcwd()
def run_one(self,c,g):
cmdline = 'ssh -x {0} "cd {1}; {2} -c {3} -g {4} -v {5} {6} {7}"'.format \
(self.host,self.cwd, \
svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname)
result = Popen(cmdline,shell=True,stdout=PIPE).stdout
for line in result.readlines():
if str(line).find("Cross") != -1:
return float(line.split()[-1][0:-1])
class TelnetWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,username,password):
Worker.__init__(self,name,job_queue,result_queue)
self.host = host
self.username = username
self.password = password
def run(self):
import telnetlib
self.tn = tn = telnetlib.Telnet(self.host)
tn.read_until("login: ")
tn.write(self.username + "\n")
tn.read_until("Password: ")
tn.write(self.password + "\n")
# XXX: how to know whether login is successful?
tn.read_until(self.username)
#
print('login ok', self.host)
tn.write("cd "+os.getcwd()+"\n")
Worker.run(self)
tn.write("exit\n")
def run_one(self,c,g):
cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \
(svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname)
result = self.tn.write(cmdline+'\n')
(idx,matchm,output) = self.tn.expect(['Cross.*\n'])
for line in output.split('\n'):
if str(line).find("Cross") != -1:
return float(line.split()[-1][0:-1])
def main():
# set parameters
process_options()
# put jobs in queue
jobs = calculate_jobs()
job_queue = Queue.Queue(0)
result_queue = Queue.Queue(0)
for line in jobs:
for (c,g) in line:
job_queue.put((c,g))
# hack the queue to become a stack --
# this is important when some thread
# failed and re-put a job. It we still
# use FIFO, the job will be put
# into the end of the queue, and the graph
# will only be updated in the end
job_queue._put = job_queue.queue.appendleft
# fire telnet workers
if telnet_workers:
nr_telnet_worker = len(telnet_workers)
username = getpass.getuser()
password = getpass.getpass()
for host in telnet_workers:
TelnetWorker(host,job_queue,result_queue,
host,username,password).start()
# fire ssh workers
if ssh_workers:
for host in ssh_workers:
SSHWorker(host,job_queue,result_queue,host).start()
# fire local workers
for i in range(nr_local_worker):
LocalWorker('local',job_queue,result_queue).start()
# gather results
done_jobs = {}
result_file = open(out_filename, 'w')
db = []
best_rate = -1
best_c1,best_g1 = None,None
for line in jobs:
for (c,g) in line:
while (c, g) not in done_jobs:
(worker,c1,g1,rate) = result_queue.get()
done_jobs[(c1,g1)] = rate
result_file.write('{0} {1} {2}\n'.format(c1,g1,rate))
result_file.flush()
if (rate > best_rate) or (rate==best_rate and g1==best_g1 and c1<best_c1):
best_rate = rate
best_c1,best_g1=c1,g1
best_c = 2.0**c1
best_g = 2.0**g1
print("[{0}] {1} {2} {3} (best c={4}, g={5}, rate={6})".format \
(worker,c1,g1,rate, best_c, best_g, best_rate))
db.append((c,g,done_jobs[(c,g)]))
redraw(db,[best_c1, best_g1, best_rate])
redraw(db,[best_c1, best_g1, best_rate],True)
job_queue.put((WorkerStopToken,None))
print("{0} {1} {2}".format(best_c, best_g, best_rate))
main()
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests exercising the analytics internals (not individual analytics)."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import time
from webtest import app
from common import catch_and_log
from common import crypto
from common import utils as common_utils
from models import data_sources
from models import entities
from models import transforms
from models.data_sources import utils as data_sources_utils
from google.appengine.ext import db
# Data source must be registered before we import actions; actions imports
# 'main', which does all setup and registration in package scope.
class Character(entities.BaseEntity):
user_id = db.StringProperty(indexed=True)
goal = db.StringProperty(indexed=True)
name = db.StringProperty(indexed=False)
age = db.IntegerProperty(indexed=False)
rank = db.IntegerProperty(indexed=True)
_PROPERTY_EXPORT_BLACKLIST = [name]
def for_export(self, transform_fn):
model = super(Character, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
return model
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class CharacterDataSource(data_sources.AbstractDbTableRestDataSource):
@classmethod
def get_name(cls):
return 'character'
@classmethod
def get_entity_class(cls):
return Character
data_sources.Registry.register(CharacterDataSource)
from tests.functional import actions
class DataSourceTest(actions.TestBase):
def setUp(self):
super(DataSourceTest, self).setUp()
with common_utils.Namespace(self.NAMESPACE):
self.characters = [
Character(
user_id='001', goal='L', rank=4, age=8, name='Charlie'),
Character(
user_id='002', goal='L', rank=6, age=6, name='Sally'),
Character(
user_id='003', goal='L', rank=0, age=8, name='Lucy'),
Character(
user_id='004', goal='G', rank=2, age=7, name='Linus'),
Character(
user_id='005', goal='G', rank=8, age=8, name='Max'),
Character(
user_id='006', goal='G', rank=1, age=8, name='Patty'),
Character(
user_id='007', goal='R', rank=9, age=35, name='Othmar'),
Character(
user_id='008', goal='R', rank=5, age=2, name='Snoopy'),
Character(
user_id='009', goal='R', rank=7, age=8, name='Pigpen'),
Character(
user_id='010', goal='R', rank=3, age=8, name='Violet'),
]
for c in self.characters:
c.put()
def tearDown(self):
with common_utils.Namespace(self.NAMESPACE):
db.delete(Character.all(keys_only=True).run())
super(DataSourceTest, self).tearDown()
class PiiExportTest(DataSourceTest):
COURSE_NAME = 'test_course'
ADMIN_EMAIL = 'admin@foo.com'
NAMESPACE = 'ns_' + COURSE_NAME
def setUp(self):
super(PiiExportTest, self).setUp()
self.app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'The Course')
self.data_source_context = (
CharacterDataSource.get_context_class().build_blank_default({}, 20))
def test_get_non_pii_data(self):
data = self._get_page_data(0)
self.assertEquals(10, len(data))
for item in data:
self.assertNotIn('name', item)
def test_get_non_pii_schema(self):
schema = self._get_schema()
self.assertNotIn('name', schema)
def test_get_pii_data(self):
self.data_source_context.send_uncensored_pii_data = True
data = self._get_page_data(0)
self.assertEquals(10, len(data))
for item in data:
self.assertIn('name', item)
def test_get_pii_schema(self):
self.data_source_context.send_uncensored_pii_data = True
schema = self._get_schema()
self.assertIn('name', schema)
def _get_schema(self):
log = catch_and_log.CatchAndLog()
schema = CharacterDataSource.get_schema(
self.app_context, log, self.data_source_context)
return schema
def _get_page_data(self, page_number):
log = catch_and_log.CatchAndLog()
schema = self._get_schema()
data, _ = CharacterDataSource.fetch_values(
self.app_context, self.data_source_context, schema, log,
page_number)
return data
class PaginatedTableTest(DataSourceTest):
"""Verify operation of paginated access to AppEngine DB tables."""
NAMESPACE = ''
def test_simple_read(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get('/rest/data/character/items').body)
self.assertIn('data', response)
self._verify_data(self.characters, response['data'])
self.assertIn('schema', response)
self.assertIn('user_id', response['schema'])
self.assertIn('age', response['schema'])
self.assertIn('rank', response['schema'])
self.assertNotIn('name', response['schema']) # blacklisted
self.assertIn('log', response)
self.assertIn('source_context', response)
self.assertIn('params', response)
self.assertEquals([], response['params']['filters'])
self.assertEquals([], response['params']['orderings'])
def test_admin_required(self):
with self.assertRaisesRegexp(app.AppError, 'Bad response: 403'):
self.get('/rest/data/character/items')
def test_filtered_read(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
# Single greater-equal filter
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank>=7').body)
self.assertEquals(3, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] >= 7)
# Single less-than filter
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<7').body)
self.assertEquals(7, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] < 7)
# Multiple filters finding some rows
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<5&filter=goal=L').body)
self.assertEquals(2, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] < 5)
self.assertTrue(character['goal'] == 'L')
def test_ordered_read(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
# Single ordering by rank
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=rank').body)
self.assertEquals(10, len(response['data']))
prev_rank = -1
for character in response['data']:
self.assertTrue(character['rank'] > prev_rank)
prev_rank = character['rank']
# Single ordering by rank, descending
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=-rank').body)
self.assertEquals(10, len(response['data']))
prev_rank = 10
for character in response['data']:
self.assertTrue(character['rank'] < prev_rank)
prev_rank = character['rank']
# Order by goal then rank
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=goal&ordering=rank').body)
self.assertEquals(10, len(response['data']))
prev_goal = 'A'
prev_rank = -1
for character in response['data']:
self.assertTrue(character['goal'] >= prev_goal)
if character['goal'] != prev_goal:
prev_rank = -1
prev_goal = character['goal']
else:
self.assertTrue(character['rank'] > prev_rank)
prev_rank = character['rank']
def test_filtered_and_ordered(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<7&ordering=rank').body)
self.assertEquals(7, len(response['data']))
prev_rank = -1
for character in response['data']:
self.assertTrue(character['rank'] > prev_rank)
self.assertTrue(character['rank'] < 7)
def test_illegal_filters_and_orderings(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filter=foo').body)
self._assert_have_critical_error(
response,
'Filter specification "foo" is not of the form: <name><op><value>')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=foo=9').body)
self._assert_have_critical_error(
response,
'field "foo" which is not in the schema for type "Character"')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank=kitten').body)
self._assert_have_critical_error(
response,
'invalid literal for int() with base 10: \'kitten\'')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<<7').body)
self._assert_have_critical_error(
response,
'"rank<<7" uses an unsupported comparison operation "<<"')
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=foo').body)
self._assert_have_critical_error(
response,
'Invalid property name \'foo\'')
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=age').body)
self._assert_have_critical_error(
response,
'Property \'age\' is not indexed')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=age>5').body)
self._assert_have_critical_error(
response,
'Property \'age\' is not indexed')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<7&ordering=goal').body)
self._assert_have_critical_error(
response,
'First ordering property must be the same as inequality filter')
def _assert_have_critical_error(self, response, expected_message):
email = 'admin@google.com'
actions.login(email, is_admin=True)
for log in response['log']:
if (log['level'] == 'critical' and
expected_message in log['message']):
return
self.fail('Expected a critical error containing "%s"' %
expected_message)
def test_pii_encoding(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
token = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
response = transforms.loads(self.get('/rest/data/character/items').body)
for d in response['data']:
# Ensure that field marked as needing transformation is cleared
# when we don't pass in an XSRF token used for generating a secret
# for encrypting.
self.assertEquals('None', d['user_id'])
self.assertEquals(str(db.Key.from_path(Character.kind(), 'None')),
d['key'])
# Ensure that field marked for blacklist is suppressed.
self.assertFalse('name' in d)
response = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token).body)
for d in response['data']:
# Ensure that field marked as needing transformation is cleared
# when we don't pass in an XSRF token used for generating a secret
# for encrypting.
self.assertIsNotNone(d['user_id'])
self.assertNotEquals('None', d['key'])
# Ensure that field marked for blacklist is still suppressed.
self.assertFalse('name' in d)
def test_pii_encoding_changes(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
token1 = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
time.sleep(1) # Legit: XSRF token is time-based, so will change.
token2 = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
self.assertNotEqual(token1, token2)
response1 = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token1).body)
response2 = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token2).body)
for c1, c2 in zip(response1['data'], response2['data']):
self.assertNotEquals(c1['user_id'], c2['user_id'])
self.assertNotEquals(c1['key'], c2['key'])
def test_sequential_pagination(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=0').body)
source_context = response['source_context']
self.assertEquals(0, response['page_number'])
self._verify_data(self.characters[:3], response['data'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=1'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(1, response['page_number'])
self._verify_data(self.characters[3:6], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=2'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(2, response['page_number'])
self._verify_data(self.characters[6:9], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 2 start cursor present; end cursor missing',
'fetch page 2 using limit 3',
'fetch page 2 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=3'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(3, response['page_number'])
self._verify_data(self.characters[9:], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 3 start cursor present; end cursor missing',
'fetch page 3 using limit 3',
'fetch page 3 is partial; not saving end cursor',
])
def test_non_present_page_request(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=9&page_number=5').body)
self._verify_data(self.characters[9:], response['data'])
self.assertEquals(1, response['page_number'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 9',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 9',
'fetch page 1 is partial; not saving end cursor',
'Fewer pages available than requested. Stopping at last page 1',
])
def test_empty_last_page_request(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=10&page_number=3').body)
self._verify_data([], response['data'])
self.assertEquals(1, response['page_number'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 10',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 10',
'fetch page 1 is partial; not saving end cursor',
'Fewer pages available than requested. Stopping at last page 1',
])
def test_nonsequential_pagination(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=2').body)
source_context = response['source_context']
self.assertEquals(2, response['page_number'])
self._verify_data(self.characters[6:9], response['data'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 saving end cursor',
'fetch page 2 start cursor present; end cursor missing',
'fetch page 2 using limit 3',
'fetch page 2 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=1'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self._verify_data(self.characters[3:6], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 1 start cursor present; end cursor present',
])
def test_pagination_filtering_and_ordering(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank>=5&ordering=rank'
'&chunk_size=3&page_number=1').body)
source_context = response['source_context']
self.assertEquals(1, response['page_number'])
self._verify_data([self.characters[4], self.characters[6]],
response['data'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 is partial; not saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank>=5&ordering=rank'
'&chunk_size=3&page_number=0'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(0, response['page_number'])
self._verify_data([self.characters[7], self.characters[1],
self.characters[8]], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 0 start cursor missing; end cursor present',
])
def test_parameters_can_be_omitted_if_using_source_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank>=5&ordering=rank'
'&chunk_size=3&page_number=1').body)
source_context = response['source_context']
self._verify_data([self.characters[4], self.characters[6]],
response['data'])
# This should load identical items, without having to respecify
# filters, ordering, chunk_size.
response = transforms.loads(self.get(
'/rest/data/character/items?page_number=1'
'&source_context=%s' % source_context).body)
self.assertEquals(1, response['page_number'])
self._verify_data([self.characters[4], self.characters[6]],
response['data'])
self._assert_have_only_logs(response, [
'Continuing use of existing context',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 is partial; not saving end cursor',
])
def test_build_default_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get('/rest/data/character/items').body)
self._assert_have_only_logs(response, [
'Building new default context',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 10000',
'fetch page 0 is partial; not saving end cursor',
])
def test_change_filtering_invalidates_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank>=5'
'&chunk_size=3&page_number=0').body)
source_context = response['source_context']
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<5'
'&chunk_size=3&page_number=0'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self._verify_data([self.characters[2], self.characters[5],
self.characters[3]], response['data'])
self._assert_have_only_logs(response, [
'Existing context and parameters mismatch; '
'discarding existing and creating new context.',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
])
def test_change_ordering_invalidates_context(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=rank'
'&chunk_size=3&page_number=0').body)
source_context = response['source_context']
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=-rank'
'&chunk_size=3&page_number=0'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self._verify_data([self.characters[6], self.characters[4],
self.characters[8]], response['data'])
self._assert_have_only_logs(response, [
'Existing context and parameters mismatch; '
'discarding existing and creating new context.',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
])
def _assert_have_only_logs(self, response, messages):
for message in messages:
found_index = -1
for index, log in enumerate(response['log']):
if message in log['message']:
found_index = index
break
if found_index < 0:
self.fail('Expected to find message "%s" in logs' % message)
else:
del response['log'][found_index]
if response['log']:
self.fail('Unexpected message "%s"' % response['log'][0])
def _verify_data(self, characters, data):
for c, d in zip(characters, data):
self.assertEquals(c.rank, d['rank'])
self.assertEquals(c.age, d['age'])
|
|
#
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
from lib.common import compat
from lib.irma.common.utils import IrmaReturnCode, IrmaScanStatus, IrmaProbeType
from lib.irma.common.exceptions import IrmaDatabaseResultNotFound, \
IrmaValueError, IrmaTaskError, IrmaFtpError
import frontend.controllers.braintasks as celery_brain
import frontend.controllers.ftpctrl as ftp_ctrl
from frontend.helpers.sessions import session_transaction
from frontend.models.sqlobjects import Scan, File, FileWeb, ProbeResult
from lib.common.mimetypes import Magic
from lib.irma.common.utils import IrmaScanRequest
from frontend.controllers import braintasks
import ntpath
from lib.common.utils import save_to_file
from lib.common.hash import sha256sum, sha1sum, md5sum
from frontend.helpers.utils import build_sha256_path
from fasteners import interprocess_locked
from config.parser import get_lock_path
log = logging.getLogger(__name__)
interprocess_lock_path = get_lock_path()
# ===================
# Internals helpers
# ===================
def _new_file(fileobj, session):
sha256 = sha256sum(fileobj)
# split files between subdirs
path = build_sha256_path(sha256)
try:
# The file exists
log.debug("try opening file with sha256: %s", sha256)
file = File.load_from_sha256(sha256, session)
if file.path is None:
log.debug("file sample missing writing it")
save_to_file(fileobj, path)
file.path = path
except IrmaDatabaseResultNotFound:
# It doesn't
time = compat.timestamp()
sha1 = sha1sum(fileobj)
md5 = md5sum(fileobj)
# determine file mimetype
magic = Magic()
# magic only deal with buffer
# feed it with a 4MB buffer
mimetype = magic.from_buffer(fileobj.read(2 ** 22))
size = save_to_file(fileobj, path)
log.debug("not present, saving, sha256 %s sha1 %s"
"md5 %s size %s mimetype: %s",
sha256, sha1, md5, size, mimetype)
file = File(sha256, sha1, md5, size, mimetype, path, time, time)
session.add(file)
return file
def _new_fileweb(scan, filename, fileobj, session):
log.debug("filename: %s", filename)
file = _new_file(fileobj, session)
(path, name) = ntpath.split(filename)
file_web = FileWeb(file, name, path, scan)
session.add(file_web)
session.commit()
return file_web
def _add_empty_result(fw, probelist, scan, session):
log.debug("fw: %s", fw.name)
scan_known_results = _fetch_known_results(fw.file, scan, session)
updated_probelist = []
for probe_name in probelist:
# Fetch the ref results for the file
ref_results = filter(lambda x: x.name == probe_name,
fw.file.ref_results)
# Fetch the already produced result in the current scan
scan_results = filter(lambda x: x.name == probe_name,
scan_known_results)
if len(ref_results) == 1 and not scan.force:
# we ask for results already present
# and we found one use it
probe_result = ref_results[0]
fw.probe_results.append(probe_result)
log.debug("link refresult for %s probe %s",
fw.name,
probe_name)
elif scan.force and len(scan_results) == 1:
# We ask for a new analysis
# but there is already one in current scan
# just link it
log.debug("link scanresult for %s probe %s",
fw.name,
probe_name)
probe_result = scan_results[0]
fw.probe_results.append(probe_result)
else:
# results is not known or analysis is forced
# create empty result
# TODO probe types
log.debug("creating empty result for %s probe %s",
fw.name,
probe_name)
probe_result = ProbeResult(
None,
probe_name,
None,
None,
file_web=fw
)
# A job scan should be sent
# let the probe in scan_request
updated_probelist.append(probe_name)
session.add(probe_result)
session.commit()
return updated_probelist
def _fetch_known_results(file, scan, session):
scan_known_result = []
known_fw_list = FileWeb.load_by_scanid_fileid(scan.id, file.id, session)
if len(known_fw_list) > 1:
log.debug("found %d file in current scan",
len(known_fw_list))
scan_known_result = known_fw_list[0].probe_results
log.debug("%d known results",
len(scan_known_result))
return scan_known_result
def _add_empty_results(fw_list, scan_request, scan, session):
log.debug("scanid : %s scan_request: %s", scan.external_id,
scan_request.to_dict())
new_scan_request = IrmaScanRequest()
for fw in fw_list:
probelist = scan_request.get_probelist(fw.file.sha256)
updated_probe_list = _add_empty_result(fw, probelist, scan, session)
# Update scan_request according to results already known linked
# in _add_empty_result
if len(updated_probe_list) > 0:
filehash = fw.file.sha256
mimetype = scan_request.get_mimetype(filehash)
log.debug("Update scan_request for file %s"
"previously asked %s now %s",
filehash, scan_request.get_probelist(filehash),
updated_probe_list)
new_scan_request.add_file(filehash,
updated_probe_list,
mimetype)
log.debug("new scan_request %s", new_scan_request.to_dict())
return new_scan_request
def _create_scan_request(fw_list, probelist, mimetype_filtering):
# Create scan request
# dict of sha256 : probe_list
# force parameter taken into account
log.debug("probelist: %s mimetype_filtering: %s",
probelist, mimetype_filtering)
scan_request = IrmaScanRequest()
for fw in fw_list:
scan_request.add_file(fw.file.sha256,
probelist,
fw.file.mimetype)
if mimetype_filtering is True:
srdict = scan_request.to_dict()
filtered_srdict = braintasks.mimetype_filter_scan_request(srdict)
scan_request = IrmaScanRequest(filtered_srdict)
return scan_request
def _sanitize_res(d):
if isinstance(d, unicode):
# Fix for JSONB
return d.replace("\u0000", "").replace(u"\x00", "")
elif isinstance(d, list):
return [_sanitize_res(x) for x in d]
elif isinstance(d, dict):
new = {}
for k, v in d.iteritems():
newk = k.replace('.', '_').replace('$', '')
new[newk] = _sanitize_res(v)
return new
else:
return d
def _append_new_files_to_scan(scan, uploaded_files, session):
new_fws = []
for (file_name, file_sha256) in uploaded_files.items():
file_obj = ftp_ctrl.download_file_data(scan.external_id, file_sha256)
fw = _new_fileweb(scan, file_name, file_obj, session)
file_obj.close()
log.debug("scan %s: new fileweb id %s for file %s",
scan.external_id, fw.external_id, fw.name)
new_fws.append(fw)
return new_fws
def _resubmit_files(scan, parent_file, resubmit_fws, hash_uploaded, session):
fws = parent_file.files_web
if len(fws) == 0:
log.error("file %s not found in scan", parent_file.sha256)
return
fws_filtered = []
for fw in resubmit_fws:
# Either fw is already in scan and duplicate result
if fw.file.sha256 in hash_uploaded:
# grab probelist from filewebs linked to the same file
# in current scan
probelist = [p.name for p in _fetch_known_results(fw.file,
scan, session)]
# and add link to their results
_add_empty_result(fw, probelist, scan, session)
else:
# if new to scan, build a new one
# (done later in _add_empty_results)
fws_filtered.append(fw)
log.debug("scan %s: %d new files to resubmit",
scan.external_id, len(fws_filtered))
if len(fws_filtered) != 0:
scan_request = _create_scan_request(fws_filtered,
scan.get_probelist(),
scan.mimetype_filtering)
scan_request = _add_empty_results(fws_filtered, scan_request,
scan, session)
celery_brain.scan_launch(scan.external_id, scan_request.to_dict())
return
def _fetch_probe_result(fw, probe):
pr_list = filter(lambda x: x.name == probe, fw.probe_results)
if len(pr_list) > 1:
log.error("Integrity error: multiple results for "
"file {0} probe {1}".format(fw.name, probe))
return pr_list[0]
def _update_ref_results(fw, file, pr):
rr_list = filter(lambda x: x.name == pr.name, file.ref_results)
if len(rr_list) == 0:
# current probe is not part of ref results
# just add it
file.ref_results.append(pr)
elif len(rr_list) == 1:
# a reference result already exist
# replace it
file.ref_results.remove(rr_list[0])
file.ref_results.append(pr)
else:
log.error("Integrity error: multiple refresults for "
"file {0} probe {1}".format(file.sha256, pr.name))
return
# ================
# Public methods
# ================
def add_files(scan, files, session):
""" add file(s) to the specified scan
:param scanid: id returned by scan_new
:param files: dict of {filename, file-obj}
:rtype: int
:return: int - total number of files for the scan
:raise: IrmaDataBaseError, IrmaValueError
"""
log.debug("scanid: %s", scan.external_id)
IrmaScanStatus.filter_status(scan.status,
IrmaScanStatus.empty,
IrmaScanStatus.ready)
if scan.status == IrmaScanStatus.empty:
# on first file added update status to 'ready'
scan.set_status(IrmaScanStatus.ready)
for (filename, data) in files.items():
# Using ntpath.split as it handles
# windows path and Linux path
log.debug("filename: %s", filename)
_new_fileweb(scan, filename, data, session)
session.commit()
# launch operation is divided in two parts
# one is synchronous, the other called by
# a celery task is asynchronous (Ftp transfer)
def check_probe(scan, probelist, session):
""" check_probe specified scan
:param scanid: id returned by scan_new
:rtype: dict of 'code': int, 'msg': str [, optional 'probe_list':list]
:return:
on success 'probe_list' is the list of probes used for the scan
on error 'msg' gives reason message
:raise: IrmaDataBaseError, IrmaValueError
"""
IrmaScanStatus.filter_status(scan.status,
IrmaScanStatus.ready,
IrmaScanStatus.ready)
all_probe_list = celery_brain.probe_list()
if probelist is not None:
unknown_probes = []
for p in probelist:
if p not in all_probe_list:
unknown_probes.append(p)
if len(unknown_probes) != 0:
reason = "probe {0} unknown".format(", ".join(unknown_probes))
raise IrmaValueError(reason)
else:
probelist = all_probe_list
log.debug("scanid: %s probelist: %s", scan.external_id, probelist)
scan.set_probelist(probelist)
session.commit()
def cancel(scan, session):
""" cancel all remaining jobs for specified scan
:param scanid: id returned by scan_new
:rtype: dict of 'cancel_details': total':int, 'finished':int,
'cancelled':int
:return:
informations about number of cancelled jobs by irma-brain
:raise: IrmaDatabaseError, IrmaTaskError
"""
log.debug("scanid: %s", scan.external_id)
if scan.status < IrmaScanStatus.uploaded:
# If not launched answer directly
scan.set_status(IrmaScanStatus.cancelled)
session.commit()
return None
if scan.status != IrmaScanStatus.launched:
# If too late answer directly
status_str = IrmaScanStatus.label[scan.status]
if IrmaScanStatus.is_error(scan.status):
# let the cancel finish and keep the error status
return None
else:
reason = "can not cancel scan in {0} status".format(status_str)
raise IrmaValueError(reason)
# Else ask brain for job cancel
(retcode, res) = celery_brain.scan_cancel(scan.external_id)
if retcode == IrmaReturnCode.success:
s_processed = IrmaScanStatus.label[IrmaScanStatus.processed]
if 'cancel_details' in res:
scan.set_status(IrmaScanStatus.cancelled)
session.commit()
return res['cancel_details']
elif res['status'] == s_processed:
# if scan is finished for the brain
# it means we are just waiting for results
scan.set_status(IrmaScanStatus.processed)
session.commit()
reason = "can not cancel scan in {0} status".format(res['status'])
raise IrmaValueError(reason)
else:
raise IrmaTaskError(res)
# Used by tasks.py, second part of the scan launch operation
def launch_asynchronous(scanid):
log.debug("scanid: %s", scanid)
with session_transaction() as session:
scan = Scan.load_from_ext_id(scanid, session=session)
IrmaScanStatus.filter_status(scan.status,
IrmaScanStatus.ready,
IrmaScanStatus.ready)
scan_request = _create_scan_request(scan.files_web,
scan.get_probelist(),
scan.mimetype_filtering)
scan_request = _add_empty_results(scan.files_web, scan_request,
scan, session)
# Nothing to do
if scan_request.nb_files == 0:
scan.set_status(IrmaScanStatus.finished)
session.commit()
log.warning("scanid: %s finished nothing to do", scanid)
return
try:
upload_list = list()
for file in scan.files:
upload_list.append(file.path)
ftp_ctrl.upload_scan(scanid, upload_list)
except IrmaFtpError as e:
log.error("scanid: %s ftp upload error %s", scanid, str(e))
scan.set_status(IrmaScanStatus.error_ftp_upload)
session.commit()
return
# launch new celery scan task on brain
celery_brain.scan_launch(scanid, scan_request.to_dict())
scan.set_status(IrmaScanStatus.uploaded)
session.commit()
log.info("scanid: %s uploaded", scanid)
return
def set_launched(scanid, scan_report_dict):
""" set status launched for scan
:param scanid: id returned by scan_new
:param scanreport: scan details output by brain
:return: None
:raise: IrmaDatabaseError
"""
with session_transaction() as session:
log.info("scanid: %s is now launched", format(scanid))
scan = Scan.load_from_ext_id(scanid, session=session)
if scan.status == IrmaScanStatus.uploaded:
scan.set_status(IrmaScanStatus.launched)
session.commit()
def set_result(scanid, file_hash, probe, result):
with session_transaction() as session:
scan = Scan.load_from_ext_id(scanid, session=session)
fws = scan.get_filewebs_by_sha256(file_hash)
if len(fws) == 0:
log.error("file %s not found in scan", file_hash)
return
fws_file = File.load_from_sha256(file_hash, session)
fws_file.timestamp_last_scan = compat.timestamp()
fws_file.update(['timestamp_last_scan'], session=session)
sanitized_res = _sanitize_res(result)
# update results for all files with same sha256
for fw in fws:
# Update main reference results with fresh results
pr = _fetch_probe_result(fw, probe)
_update_ref_results(fw, fw.file, pr)
fw.file.update(session=session)
# fill ProbeResult with probe raw results
pr.doc = sanitized_res
pr.status = sanitized_res.get('status', None)
s_type = sanitized_res.get('type', None)
pr.type = IrmaProbeType.normalize(s_type)
pr.update(session=session)
probedone = []
for fw_pr in fw.probe_results:
if fw_pr.doc is not None:
probedone.append(fw_pr.name)
log.info("scanid: %s result from %s probedone %s",
scanid, probe, probedone)
is_finished(scanid)
# insure there is only one call running at a time
# among the different workers
@interprocess_locked(interprocess_lock_path)
def is_finished(scanid):
with session_transaction() as session:
scan = Scan.load_from_ext_id(scanid, session=session)
if scan.finished() and scan.status != IrmaScanStatus.finished:
scan.set_status(IrmaScanStatus.finished)
session.commit()
# launch flush celery task on brain
log.debug("scanid: %s calling scan_flush", scan.external_id)
celery_brain.scan_flush(scan.external_id)
def handle_output_files(scanid, parent_file_hash, probe, result):
with session_transaction() as session:
scan = Scan.load_from_ext_id(scanid, session=session)
uploaded_files = result.get('uploaded_files', None)
if uploaded_files is None or not scan.resubmit_files:
log.debug("scanid: %s Nothing to resubmit or resubmit disabled",
scanid)
return
log.info("scanid: %s appending new uploaded files %s",
scanid, uploaded_files.keys())
parent_file = File.load_from_sha256(parent_file_hash, session)
# filter already present file in current scan
hash_uploaded = [f.sha256 for f in scan.files]
new_fws = _append_new_files_to_scan(scan, uploaded_files, session)
for fw in new_fws:
parent_file.children.append(fw)
_resubmit_files(scan, parent_file, new_fws, hash_uploaded, session)
|
|
#################################### IMPORTS ###################################
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils \
import test_not_implemented, unordered_equality, unittest
else:
from test import test_utils
from test.test_utils \
import test_not_implemented, unordered_equality, unittest
import pygame
from pygame import draw
################################################################################
class DrawModuleTest(unittest.TestCase):
def setUp(self):
(self.surf_w, self.surf_h) = self.surf_size = (320, 200)
self.surf = pygame.Surface(self.surf_size, pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
def test_rect__fill(self):
# __doc__ (as of 2008-06-25) for pygame.draw.rect:
# pygame.draw.rect(Surface, color, Rect, width=0): return Rect
# draw a rectangle shape
rect = pygame.Rect(10, 10, 25, 20)
drawn = draw.rect(self.surf, self.color, rect, 0)
self.assert_(drawn == rect)
#Should be colored where it's supposed to be
for pt in test_utils.rect_area_pts(rect):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt == self.color)
#And not where it shouldn't
for pt in test_utils.rect_outer_bounds(rect):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt != self.color)
def test_rect__one_pixel_lines(self):
# __doc__ (as of 2008-06-25) for pygame.draw.rect:
# pygame.draw.rect(Surface, color, Rect, width=0): return Rect
# draw a rectangle shape
rect = pygame.Rect(10, 10, 56, 20)
drawn = draw.rect(self.surf, self.color, rect, 1)
self.assert_(drawn == rect)
#Should be colored where it's supposed to be
for pt in test_utils.rect_perimeter_pts(drawn):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt == self.color)
#And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt != self.color)
def test_line(self):
# __doc__ (as of 2008-06-25) for pygame.draw.line:
# pygame.draw.line(Surface, color, start_pos, end_pos, width=1): return Rect
# draw a straight line segment
drawn = draw.line(self.surf, self.color, (1, 0), (200, 0)) #(l, t), (l, t)
self.assert_(drawn.right == 201,
"end point arg should be (or at least was) inclusive"
)
#Should be colored where it's supposed to be
for pt in test_utils.rect_area_pts(drawn):
self.assert_(self.surf.get_at(pt) == self.color)
#And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
self.assert_(self.surf.get_at(pt) != self.color)
#Line width greater that 1
line_width = 2
offset = 5
a = (offset, offset)
b = (self.surf_size[0] - offset, a[1])
c = (a[0], self.surf_size[1] - offset)
d = (b[0], c[1])
e = (a[0] + offset, c[1])
f = (b[0], c[0] + 5)
lines = [(a, d), (b, c), (c, b), (d, a),
(a, b), (b, a), (a, c), (c, a),
(a, e), (e, a), (a, f), (f, a),
(a, a),]
for p1, p2 in lines:
msg = "%s - %s" % (p1, p2)
if p1[0] <= p2[0]:
plow = p1
phigh = p2
else:
plow = p2
phigh = p1
self.surf.fill((0, 0, 0))
rec = draw.line(self.surf, (255, 255, 255), p1, p2, line_width)
xinc = yinc = 0
if abs(p1[0] - p2[0]) > abs(p1[1] - p2[1]):
yinc = 1
else:
xinc = 1
for i in range(line_width):
p = (p1[0] + xinc * i, p1[1] + yinc * i)
self.assert_(self.surf.get_at(p) == (255, 255, 255), msg)
p = (p2[0] + xinc * i, p2[1] + yinc * i)
self.assert_(self.surf.get_at(p) == (255, 255, 255), msg)
p = (plow[0] - 1, plow[1])
self.assert_(self.surf.get_at(p) == (0, 0, 0), msg)
p = (plow[0] + xinc * line_width, plow[1] + yinc * line_width)
self.assert_(self.surf.get_at(p) == (0, 0, 0), msg)
p = (phigh[0] + xinc * line_width, phigh[1] + yinc * line_width)
self.assert_(self.surf.get_at(p) == (0, 0, 0), msg)
if p1[0] < p2[0]:
rx = p1[0]
else:
rx = p2[0]
if p1[1] < p2[1]:
ry = p1[1]
else:
ry = p2[1]
w = abs(p2[0] - p1[0]) + 1 + xinc * (line_width - 1)
h = abs(p2[1] - p1[1]) + 1 + yinc * (line_width - 1)
msg += ", %s" % (rec,)
self.assert_(rec == (rx, ry, w, h), msg)
def todo_test_aaline(self):
# __doc__ (as of 2008-08-02) for pygame.draw.aaline:
# pygame.draw.aaline(Surface, color, startpos, endpos, blend=1): return Rect
# draw fine antialiased lines
#
# Draws an anti-aliased line on a surface. This will respect the
# clipping rectangle. A bounding box of the affected area is returned
# returned as a rectangle. If blend is true, the shades will be be
# blended with existing pixel shades instead of overwriting them. This
# function accepts floating point values for the end points.
#
self.fail()
def todo_test_aalines(self):
# __doc__ (as of 2008-08-02) for pygame.draw.aalines:
# pygame.draw.aalines(Surface, color, closed, pointlist, blend=1): return Rect
#
# Draws a sequence on a surface. You must pass at least two points in
# the sequence of points. The closed argument is a simple boolean and
# if true, a line will be draw between the first and last points. The
# boolean blend argument set to true will blend the shades with
# existing shades instead of overwriting them. This function accepts
# floating point values for the end points.
#
self.fail()
def todo_test_arc(self):
# __doc__ (as of 2008-08-02) for pygame.draw.arc:
# pygame.draw.arc(Surface, color, Rect, start_angle, stop_angle,
# width=1): return Rect
#
# draw a partial section of an ellipse
#
# Draws an elliptical arc on the Surface. The rect argument is the
# area that the ellipse will fill. The two angle arguments are the
# initial and final angle in radians, with the zero on the right. The
# width argument is the thickness to draw the outer edge.
#
self.fail()
def todo_test_circle(self):
# __doc__ (as of 2008-08-02) for pygame.draw.circle:
# pygame.draw.circle(Surface, color, pos, radius, width=0): return Rect
# draw a circle around a point
#
# Draws a circular shape on the Surface. The pos argument is the
# center of the circle, and radius is the size. The width argument is
# the thickness to draw the outer edge. If width is zero then the
# circle will be filled.
#
self.fail()
def todo_test_ellipse(self):
# __doc__ (as of 2008-08-02) for pygame.draw.ellipse:
# pygame.draw.ellipse(Surface, color, Rect, width=0): return Rect
# draw a round shape inside a rectangle
#
# Draws an elliptical shape on the Surface. The given rectangle is the
# area that the circle will fill. The width argument is the thickness
# to draw the outer edge. If width is zero then the ellipse will be
# filled.
#
self.fail()
def todo_test_lines(self):
# __doc__ (as of 2008-08-02) for pygame.draw.lines:
# pygame.draw.lines(Surface, color, closed, pointlist, width=1): return Rect
# draw multiple contiguous line segments
#
# Draw a sequence of lines on a Surface. The pointlist argument is a
# series of points that are connected by a line. If the closed
# argument is true an additional line segment is drawn between the
# first and last points.
#
# This does not draw any endcaps or miter joints. Lines with sharp
# corners and wide line widths can have improper looking corners.
#
self.fail()
def todo_test_polygon(self):
# __doc__ (as of 2008-08-02) for pygame.draw.polygon:
# pygame.draw.polygon(Surface, color, pointlist, width=0): return Rect
# draw a shape with any number of sides
#
# Draws a polygonal shape on the Surface. The pointlist argument is
# the vertices of the polygon. The width argument is the thickness to
# draw the outer edge. If width is zero then the polygon will be
# filled.
#
# For aapolygon, use aalines with the 'closed' parameter.
self.fail()
################################################################################
if __name__ == '__main__':
unittest.main()
|
|
from decimal import Decimal
from datetime import date
from django.test import (
TestCase,
TransactionTestCase,
skipUnlessDBFeature,
)
from django.utils.html import strip_tags
import tablib
from import_export import resources
from import_export import fields
from import_export import widgets
from import_export import results
from import_export.instance_loaders import ModelInstanceLoader
from ..models import Book, Author, Category
class MyResource(resources.Resource):
name = fields.Field()
email = fields.Field()
class Meta:
export_order = ('email', 'name')
class ResourceTest(TestCase):
def setUp(self):
self.my_resource = MyResource()
def test_fields(self):
fields = self.my_resource.fields
self.assertIn('name', fields)
def test_field_column_name(self):
field = self.my_resource.fields['name']
self.assertIn(field.column_name, 'name')
def test_meta(self):
self.assertIsInstance(self.my_resource._meta,
resources.ResourceOptions)
def test_get_export_order(self):
self.assertEqual(self.my_resource.get_export_headers(),
['email', 'name'])
class BookResource(resources.ModelResource):
published = fields.Field(column_name='published_date')
class Meta:
model = Book
exclude = ('imported', )
class ModelResourceTest(TestCase):
def setUp(self):
self.resource = BookResource()
self.book = Book.objects.create(name="Some book")
self.dataset = tablib.Dataset(headers=['id', 'name', 'author_email',
'price'])
row = [self.book.pk, 'Some book', 'test@example.com', "10.25"]
self.dataset.append(row)
def test_default_instance_loader_class(self):
self.assertIs(self.resource._meta.instance_loader_class,
ModelInstanceLoader)
def test_fields(self):
fields = self.resource.fields
self.assertIn('id', fields)
self.assertIn('name', fields)
self.assertIn('author_email', fields)
self.assertIn('price', fields)
def test_fields_foreign_key(self):
fields = self.resource.fields
self.assertIn('author', fields)
widget = fields['author'].widget
self.assertIsInstance(widget, widgets.ForeignKeyWidget)
self.assertEqual(widget.model, Author)
def test_fields_m2m(self):
fields = self.resource.fields
self.assertIn('categories', fields)
def test_excluded_fields(self):
self.assertNotIn('imported', self.resource.fields)
def test_init_instance(self):
instance = self.resource.init_instance()
self.assertIsInstance(instance, Book)
def test_get_instance(self):
instance_loader = self.resource._meta.instance_loader_class(
self.resource)
instance = self.resource.get_instance(instance_loader,
self.dataset.dict[0])
self.assertEqual(instance, self.book)
def test_get_export_headers(self):
headers = self.resource.get_export_headers()
self.assertEqual(headers, ['published_date',
'id', 'name', 'author', 'author_email', 'price', 'categories',
])
def test_export(self):
dataset = self.resource.export(Book.objects.all())
self.assertEqual(len(dataset), 1)
def test_get_diff(self):
book2 = Book(name="Some other book")
diff = self.resource.get_diff(self.book, book2)
headers = self.resource.get_export_headers()
self.assertEqual(diff[headers.index('name')],
u'<span>Some </span><ins style="background:#e6ffe6;">'
u'other </ins><span>book</span>')
self.assertFalse(diff[headers.index('author_email')])
def test_import_data(self):
result = self.resource.import_data(self.dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 1)
self.assertTrue(result.rows[0].diff)
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_UPDATE)
instance = Book.objects.get(pk=self.book.pk)
self.assertEqual(instance.author_email, 'test@example.com')
self.assertEqual(instance.price, Decimal("10.25"))
def test_import_data_error_saving_model(self):
row = list(self.dataset.pop())
# set pk to something that would yield error
row[0] = 'foo'
self.dataset.append(row)
result = self.resource.import_data(self.dataset, raise_errors=False)
self.assertTrue(result.has_errors())
self.assertTrue(result.rows[0].errors)
msg = 'ValueError("invalid literal for int() with base 10: \'foo\'",)'
self.assertTrue(result.rows[0].errors[0].error, msg)
def test_import_data_delete(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields['delete'].clean(row)
row = [self.book.pk, self.book.name, '1']
dataset = tablib.Dataset(*[row], headers=['id', 'name', 'delete'])
result = B().import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_DELETE)
self.assertFalse(Book.objects.filter(pk=self.book.pk))
def test_relationships_fields(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('author__name',)
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
result = resource.fields['author__name'].export(self.book)
self.assertEqual(result, author.name)
def test_dehydrating_fields(self):
class B(resources.ModelResource):
full_title = fields.Field()
class Meta:
model = Book
fields = ('author__name', 'full_title')
def dehydrate_full_title(self, obj):
return '%s by %s' % (obj.name, obj.author.name)
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
full_title = resource.export_field(resource.get_fields()[0], self.book)
self.assertEqual(full_title, '%s by %s' % (self.book.name, self.book.author.name))
def test_widget_fomat_in_fk_field(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('author__birthday',)
widgets = {
'author__birthday': {'format': '%Y-%m-%d'},
}
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
result = resource.fields['author__birthday'].export(self.book)
self.assertEqual(result, str(date.today()))
def test_widget_kwargs_for_field(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('published',)
widgets = {
'published': {'format': '%d.%m.%Y'},
}
resource = B()
self.book.published = date(2012, 8, 13)
result = resource.fields['published'].export(self.book)
self.assertEqual(result, "13.08.2012")
def test_foreign_keys_export(self):
author1 = Author.objects.create(name='Foo')
self.book.author = author1
self.book.save()
dataset = self.resource.export(Book.objects.all())
self.assertEqual(dataset.dict[0]['author'], author1.pk)
def test_foreign_keys_import(self):
author2 = Author.objects.create(name='Bar')
headers = ['id', 'name', 'author']
row = [None, 'FooBook', author2.pk]
dataset = tablib.Dataset(row, headers=headers)
self.resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertEqual(book.author, author2)
def test_m2m_export(self):
cat1 = Category.objects.create(name='Cat 1')
cat2 = Category.objects.create(name='Cat 2')
self.book.categories.add(cat1)
self.book.categories.add(cat2)
dataset = self.resource.export(Book.objects.all())
self.assertEqual(dataset.dict[0]['categories'],
'%d,%d' % (cat1.pk, cat2.pk))
def test_m2m_import(self):
cat1 = Category.objects.create(name='Cat 1')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "%s" % cat1.pk]
dataset = tablib.Dataset(row, headers=headers)
self.resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertIn(cat1, book.categories.all())
class ModelResourceTransactionTest(TransactionTestCase):
def setUp(self):
self.resource = BookResource()
@skipUnlessDBFeature('supports_transactions')
def test_m2m_import_with_transactions(self):
cat1 = Category.objects.create(name='Cat 1')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "%s" % cat1.pk]
dataset = tablib.Dataset(row, headers=headers)
result = self.resource.import_data(dataset, dry_run=True,
use_transactions=True)
row_diff = result.rows[0].diff
fields = self.resource.get_fields()
id_field = self.resource.fields['id']
id_diff = row_diff[fields.index(id_field)]
#id diff should exists because in rollbacked transaction
#FooBook has been saved
self.assertTrue(id_diff)
category_field = self.resource.fields['categories']
categories_diff = row_diff[fields.index(category_field)]
self.assertEqual(strip_tags(categories_diff), unicode(cat1.pk))
#check that it is really rollbacked
self.assertFalse(Book.objects.filter(name='FooBook'))
class ModelResourceFactoryTest(TestCase):
def test_create(self):
BookResource = resources.modelresource_factory(Book)
self.assertIn('id', BookResource.fields)
self.assertEqual(BookResource._meta.model, Book)
|
|
"""Objects representing Flow entities, like boards, topics, and posts."""
#
# (C) Pywikibot team, 2015-2022
#
# Distributed under the terms of the MIT license.
#
import abc
import datetime
import logging
from typing import Any, Type, Union
from urllib.parse import parse_qs, urlparse
import pywikibot
from pywikibot.backports import Dict, Iterator, List, Mapping
from pywikibot.exceptions import (
LockedPageError,
NoPageError,
UnknownExtensionError,
)
from pywikibot.page import BasePage, PageSourceType, User
logger = logging.getLogger('pywiki.wiki.flow')
# Flow page-like objects (boards and topics)
class FlowPage(BasePage, abc.ABC):
"""The base page meta class for the Flow extension.
It cannot be instantiated directly.
"""
def __init__(self, source: PageSourceType, title: str = '') -> None:
"""Initializer.
:param source: A Flow-enabled site or a Link or Page on such a site
:param title: normalized title of the page
:raises TypeError: incorrect use of parameters
:raises ValueError: use of non-Flow-enabled Site
"""
super().__init__(source, title)
if not self.site.has_extension('Flow'):
raise UnknownExtensionError('site is not Flow-enabled')
@abc.abstractmethod
def _load(self, force: bool = False) -> Dict[str, Any]:
"""Abstract method to load and cache the Flow data.
Subclasses must overwrite _load() method to load and cache
the object's internal data from the API.
"""
raise NotImplementedError
@property
def uuid(self) -> str:
"""Return the UUID of the page.
:return: UUID of the page
"""
if not hasattr(self, '_uuid'):
self._uuid = self._load()['workflowId']
return self._uuid
def get(self, force: bool = False, get_redirect: bool = False
) -> Dict[str, Any]:
"""Get the page's content."""
if get_redirect or force:
raise NotImplementedError(
"Neither 'force' nor 'get_redirect' parameter is implemented "
'in {}.get()'.format(self.__class__.__name__))
# TODO: Return more useful data
return getattr(self, '_data', {})
class Board(FlowPage):
"""A Flow discussion board."""
def _load(self, force: bool = False) -> Dict[str, Any]:
"""Load and cache the Board's data, derived from its topic list.
:param force: Whether to force a reload if the data is already loaded
"""
if not hasattr(self, '_data') or force:
self._data = self.site.load_board(self)
return self._data
def _parse_url(self, links: Mapping[str, Any]) -> Dict[str, Any]:
"""Parse a URL retrieved from the API."""
rule = links['fwd']
parsed_url = urlparse(rule['url'])
params = parse_qs(parsed_url.query)
new_params = {} # type: Dict[str, Any]
for key, value in params.items():
if key != 'title':
key = key.replace('topiclist_', '').replace('-', '_')
if key == 'offset_dir':
new_params['reverse'] = (value == 'rev')
else:
new_params[key] = value
return new_params
def topics(self, content_format: str = 'wikitext', limit: int = 100,
sort_by: str = 'newest',
offset: Union[str, datetime.datetime, None] = None,
offset_uuid: str = '', reverse: bool = False,
include_offset: bool = False, toc_only: bool = False
) -> Iterator['Topic']:
"""Load this board's topics.
:param content_format: The content format to request the data in;
must be either 'wikitext', 'html', or 'fixed-html'
:param limit: The number of topics to fetch in each request.
:param sort_by: Algorithm to sort topics by;
must be either 'newest' or 'updated'
:param offset: The timestamp to start at (when sortby is 'updated').
:param offset_uuid: The UUID to start at (when sortby is 'newest').
:param reverse: Whether to reverse the topic ordering.
:param include_offset: Whether to include the offset topic.
:param toc_only: Whether to only include information for the TOC.
:yield: A generator of this board's topics.
"""
data = self.site.load_topiclist(self, content_format=content_format,
limit=limit, sortby=sort_by,
toconly=toc_only, offset=offset,
offset_id=offset_uuid, reverse=reverse,
include_offset=include_offset)
while data['roots']:
for root in data['roots']:
topic = Topic.from_topiclist_data(self, root, data)
yield topic
cont_args = self._parse_url(data['links']['pagination'])
data = self.site.load_topiclist(self, **cont_args)
def new_topic(self, title: str, content: str,
content_format: str = 'wikitext') -> 'Topic':
"""Create and return a Topic object for a new topic on this Board.
:param title: The title of the new topic (must be in plaintext)
:param content: The content of the topic's initial post
:param content_format: The content format of the supplied content;
either 'wikitext' or 'html'
:return: The new topic
"""
return Topic.create_topic(self, title, content, content_format)
class Topic(FlowPage):
"""A Flow discussion topic."""
def _load(self, force: bool = False, content_format: str = 'wikitext'
) -> Dict[str, Any]:
"""Load and cache the Topic's data.
:param force: Whether to force a reload if the data is already loaded
:param content_format: The post format in which to load
"""
if not hasattr(self, '_data') or force:
self._data = self.site.load_topic(self, content_format)
return self._data
def _reload(self) -> None:
"""Forcibly reload the topic's root post."""
self.root._load(load_from_topic=True)
@classmethod
def create_topic(cls: Type['Topic'], board: 'Board', title: str,
content: str, content_format: str = 'wikitext'
) -> 'Topic':
"""Create and return a Topic object for a new topic on a Board.
:param board: The topic's parent board
:param title: The title of the new topic (must be in plaintext)
:param content: The content of the topic's initial post
:param content_format: The content format of the supplied content;
either 'wikitext' or 'html'
:return: The new topic
"""
data = board.site.create_new_topic(board, title, content,
content_format)
return cls(board.site, data['topic-page'])
@classmethod
def from_topiclist_data(cls: Type['Topic'], board: 'Board',
root_uuid: str,
topiclist_data: Dict[str, Any]) -> 'Topic':
"""Create a Topic object from API data.
:param board: The topic's parent Flow board
:param root_uuid: The UUID of the topic and its root post
:param topiclist_data: The data returned by view-topiclist
:return: A Topic object derived from the supplied data
:raises TypeError: any passed parameters have wrong types
:raises ValueError: the passed topiclist_data is missing required data
"""
if not isinstance(board, Board):
raise TypeError('board must be a pywikibot.flow.Board object.')
if not isinstance(root_uuid, str):
raise TypeError('Topic/root UUID must be a string.')
topic = cls(board.site, 'Topic:' + root_uuid)
topic._root = Post.fromJSON(topic, root_uuid, topiclist_data)
topic._uuid = root_uuid
return topic
@property
def root(self) -> 'Post':
"""The root post of this topic."""
if not hasattr(self, '_root'):
self._root = Post.fromJSON(self, self.uuid, self._data)
return self._root
@property
def is_locked(self) -> bool:
"""Whether this topic is locked."""
return self.root._current_revision['isLocked']
@property
def is_moderated(self) -> bool:
"""Whether this topic is moderated."""
return self.root._current_revision['isModerated']
def replies(self, content_format: str = 'wikitext', force: bool = False
) -> List['Post']:
"""A list of replies to this topic's root post.
:param content_format: Content format to return contents in;
must be 'wikitext', 'html', or 'fixed-html'
:param force: Whether to reload from the API instead of using the cache
:return: The replies of this topic's root post
"""
return self.root.replies(content_format=content_format, force=force)
def reply(self, content: str, content_format: str = 'wikitext') -> 'Post':
"""A convenience method to reply to this topic's root post.
:param content: The content of the new post
:param content_format: The format of the given content;
must be 'wikitext' or 'html')
:return: The new reply to this topic's root post
"""
return self.root.reply(content, content_format)
# Moderation
def lock(self, reason: str) -> None:
"""Lock this topic.
:param reason: The reason for locking this topic
"""
self.site.lock_topic(self, True, reason)
self._reload()
def unlock(self, reason: str) -> None:
"""Unlock this topic.
:param reason: The reason for unlocking this topic
"""
self.site.lock_topic(self, False, reason)
self._reload()
def delete_mod(self, reason: str) -> None:
"""Delete this topic through the Flow moderation system.
:param reason: The reason for deleting this topic.
"""
self.site.delete_topic(self, reason)
self._reload()
def hide(self, reason: str) -> None:
"""Hide this topic.
:param reason: The reason for hiding this topic.
"""
self.site.hide_topic(self, reason)
self._reload()
def suppress(self, reason: str) -> None:
"""Suppress this topic.
:param reason: The reason for suppressing this topic.
"""
self.site.suppress_topic(self, reason)
self._reload()
def restore(self, reason: str) -> None:
"""Restore this topic.
:param reason: The reason for restoring this topic.
"""
self.site.restore_topic(self, reason)
self._reload()
# Flow non-page-like objects
class Post:
"""A post to a Flow discussion topic."""
def __init__(self, page: 'Topic', uuid: str) -> None:
"""
Initializer.
:param page: Flow topic
:param uuid: UUID of a Flow post
:raises TypeError: incorrect types of parameters
"""
if not isinstance(page, Topic):
raise TypeError('Page must be a Topic object')
if not page.exists():
raise NoPageError(page, 'Topic must exist: %s')
if not isinstance(uuid, str):
raise TypeError('Post UUID must be a string')
self._page = page
self._uuid = uuid
self._content = {} # type: Dict[str, Any]
@classmethod
def fromJSON(cls, page: 'Topic', post_uuid: str, # noqa: N802
data: Dict[str, Any]) -> 'Post':
"""
Create a Post object using the data returned from the API call.
:param page: A Flow topic
:param post_uuid: The UUID of the post
:param data: The JSON data returned from the API
:return: A Post object
:raises TypeError: data is not a dict
:raises ValueError: data is missing required entries
"""
post = cls(page, post_uuid)
post._set_data(data)
return post
def _set_data(self, data: Dict[str, Any]) -> None:
"""Set internal data and cache content.
:param data: The data to store internally
:raises TypeError: data is not a dict
:raises ValueError: missing data entries or post/revision not found
"""
if not isinstance(data, dict):
raise TypeError('Illegal post data (must be a dictionary).')
if ('posts' not in data) or ('revisions' not in data):
raise ValueError('Illegal post data (missing required data).')
if self.uuid not in data['posts']:
raise ValueError('Post not found in supplied data.')
current_revision_id = data['posts'][self.uuid][0]
if current_revision_id not in data['revisions']:
raise ValueError('Current revision of post'
'not found in supplied data.')
self._current_revision = data['revisions'][current_revision_id]
if 'content' in self._current_revision:
content = self._current_revision.pop('content')
assert isinstance(content, dict)
assert isinstance(content['content'], str)
self._content[content['format']] = content['content']
def _load(self, force: bool = True, content_format: str = 'wikitext',
load_from_topic: bool = False) -> Dict[str, Any]:
"""Load and cache the Post's data using the given content format.
:param load_from_topic: Whether to load the post from the whole topic
"""
if load_from_topic:
data = self.page._load(force=force, content_format=content_format)
else:
data = self.site.load_post_current_revision(self.page, self.uuid,
content_format)
self._set_data(data)
return self._current_revision
@property
def uuid(self) -> str:
"""Return the UUID of the post.
:return: UUID of the post
"""
return self._uuid
@property
def site(self) -> 'pywikibot.site.BaseSite':
"""Return the site associated with the post.
:return: Site associated with the post
"""
return self._page.site
@property
def page(self) -> 'Topic':
"""Return the page associated with the post.
:return: Page associated with the post
"""
return self._page
@property
def is_moderated(self) -> bool:
"""Whether this post is moderated."""
if not hasattr(self, '_current_revision'):
self._load()
return self._current_revision['isModerated']
@property
def creator(self) -> User:
"""The creator of this post."""
if not hasattr(self, '_current_revision'):
self._load()
if not hasattr(self, '_creator'):
self._creator = User(self.site,
self._current_revision['creator']['name'])
return self._creator
def get(self, content_format: str = 'wikitext',
force: bool = False) -> str:
"""Return the contents of the post in the given format.
:param force: Whether to reload from the API instead of using the cache
:param content_format: Content format to return contents in
:return: The contents of the post in the given content format
"""
if content_format not in self._content or force:
self._load(content_format=content_format)
return self._content[content_format]
def replies(self, content_format: str = 'wikitext', force: bool = False
) -> List['Post']:
"""Return this post's replies.
:param content_format: Content format to return contents in;
must be 'wikitext', 'html', or 'fixed-html'
:param force: Whether to reload from the API instead of using the cache
:return: This post's replies
"""
if content_format not in ('wikitext', 'html', 'fixed-html'):
raise ValueError('Invalid content format.')
if hasattr(self, '_replies') and not force:
return self._replies # type: ignore[has-type]
# load_from_topic workaround due to T106733
# (replies not returned by view-post)
if not hasattr(self, '_current_revision') or force:
self._load(content_format=content_format, load_from_topic=True)
reply_uuids = self._current_revision['replies']
self._replies = [Post(self.page, uuid) for uuid in reply_uuids]
return self._replies
def reply(self, content: str, content_format: str = 'wikitext') -> 'Post':
"""Reply to this post.
:param content: The content of the new post
:param content_format: The format of the given content;
must be 'wikitext' or 'html'
:return: The new reply post
"""
self._load()
if self.page.is_locked:
raise LockedPageError(self.page, 'Topic %s is locked.')
reply_url = self._current_revision['actions']['reply']['url']
parsed_url = urlparse(reply_url)
params = parse_qs(parsed_url.query)
reply_to = params['topic_postId']
if self.uuid == reply_to:
del self._current_revision
del self._replies
data = self.site.reply_to_post(self.page, reply_to, content,
content_format)
post = Post(self.page, data['post-id'])
return post
# Moderation
def delete(self, reason: str) -> None:
"""Delete this post through the Flow moderation system.
:param reason: The reason for deleting this post.
"""
self.site.delete_post(self, reason)
self._load()
def hide(self, reason: str) -> None:
"""Hide this post.
:param reason: The reason for hiding this post.
"""
self.site.hide_post(self, reason)
self._load()
def suppress(self, reason: str) -> None:
"""Suppress this post.
:param reason: The reason for suppressing this post.
"""
self.site.suppress_post(self, reason)
self._load()
def restore(self, reason: str) -> None:
"""Restore this post.
:param reason: The reason for restoring this post.
"""
self.site.restore_post(self, reason)
self._load()
def thank(self) -> None:
"""Thank the user who made this post."""
self.site.thank_post(self)
|
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from neutron import context
from neutron.db import agents_db
from neutron.db import bgp_db
from neutron.db import bgp_dragentscheduler_db as bgp_dras_db
from neutron.db import common_db_mixin
from neutron.services.bgp.scheduler import bgp_dragent_scheduler as bgp_dras
from neutron.tests.common import helpers
from neutron.tests.unit import testlib_api
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class TestAutoSchedule(testlib_api.SqlTestCase,
bgp_dras_db.BgpDrAgentSchedulerDbMixin,
agents_db.AgentDbMixin,
common_db_mixin.CommonDbMixin):
"""Test various scenarios for schedule_unscheduled_bgp_speakers.
Below is the brief description of the scenario variables
--------------------------------------------------------
host_count
number of hosts.
agent_count
number of BGP dynamic routing agents.
down_agent_count
number of DRAgents which are inactive.
bgp_speaker_count
Number of bgp_speakers.
hosted_bgp_speakers
A mapping of agent id to the ids of the bgp_speakers that they
should be initially hosting.
expected_schedule_return_value
Expected return value of 'schedule_unscheduled_bgp_speakers'.
expected_hosted_bgp_speakers
This stores the expected bgp_speakers that should have been
scheduled (or that could have already been scheduled) for each
agent after the 'schedule_unscheduled_bgp_speakers' function is
called.
"""
scenarios = [
('No BgpDrAgent scheduled, if no DRAgent is present',
dict(host_count=1,
agent_count=0,
down_agent_count=0,
bgp_speaker_count=1,
hosted_bgp_speakers={},
expected_schedule_return_value=False)),
('No BgpDrAgent scheduled, if no BGP speaker are present',
dict(host_count=1,
agent_count=1,
down_agent_count=0,
bgp_speaker_count=0,
hosted_bgp_speakers={},
expected_schedule_return_value=False,
expected_hosted_bgp_speakers={'agent-0': []})),
('No BgpDrAgent scheduled, if BGP speaker already hosted',
dict(host_count=1,
agent_count=1,
down_agent_count=0,
bgp_speaker_count=1,
hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']},
expected_schedule_return_value=False,
expected_hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']})),
('BgpDrAgent scheduled to the speaker, if the speaker is not hosted',
dict(host_count=1,
agent_count=1,
down_agent_count=0,
bgp_speaker_count=1,
hosted_bgp_speakers={},
expected_schedule_return_value=True,
expected_hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']})),
('No BgpDrAgent scheduled, if all the agents are down',
dict(host_count=2,
agent_count=2,
down_agent_count=2,
bgp_speaker_count=1,
hosted_bgp_speakers={},
expected_schedule_return_value=False,
expected_hosted_bgp_speakers={'agent-0': [],
'agent-1': [], })),
]
def _strip_host_index(self, name):
"""Strips the host index.
Eg. if name = '2-agent-3', then 'agent-3' is returned.
"""
return name[name.find('-') + 1:]
def _extract_index(self, name):
"""Extracts the index number and returns.
Eg. if name = '2-agent-3', then 3 is returned
"""
return int(name.split('-')[-1])
def _get_hosted_bgp_speakers_on_dragent(self, agent_id):
query = self.ctx.session.query(
bgp_dras_db.BgpSpeakerDrAgentBinding.bgp_speaker_id)
query = query.filter(
bgp_dras_db.BgpSpeakerDrAgentBinding.agent_id ==
agent_id)
return [item[0] for item in query]
def _create_and_set_agents_down(self, hosts, agent_count=0,
down_agent_count=0, admin_state_up=True):
agents = []
if agent_count:
for i, host in enumerate(hosts):
is_alive = i >= down_agent_count
agents.append(helpers.register_bgp_dragent(
host,
admin_state_up=admin_state_up,
alive=is_alive))
return agents
def _save_bgp_speakers(self, bgp_speakers):
cls = bgp_db.BgpDbMixin()
bgp_speaker_body = {
'bgp_speaker': {'name': 'fake_bgp_speaker',
'ip_version': '4',
'local_as': '123',
'advertise_floating_ip_host_routes': '0',
'advertise_tenant_networks': '0',
'peers': [],
'networks': []}}
i = 1
for bgp_speaker_id in bgp_speakers:
bgp_speaker_body['bgp_speaker']['local_as'] = i
cls._save_bgp_speaker(self.ctx, bgp_speaker_body,
uuid=bgp_speaker_id)
i = i + 1
def _test_auto_schedule(self, host_index):
scheduler = bgp_dras.ChanceScheduler()
self.ctx = context.get_admin_context()
msg = 'host_index = %s' % host_index
# create hosts
hosts = ['%s-agent-%s' % (host_index, i)
for i in range(self.host_count)]
bgp_dragents = self._create_and_set_agents_down(hosts,
self.agent_count,
self.down_agent_count)
# create bgp_speakers
self._bgp_speakers = ['%s-bgp-speaker-%s' % (host_index, i)
for i in range(self.bgp_speaker_count)]
self._save_bgp_speakers(self._bgp_speakers)
# pre schedule the bgp_speakers to the agents defined in
# self.hosted_bgp_speakers before calling auto_schedule_bgp_speaker
for agent, bgp_speakers in self.hosted_bgp_speakers.items():
agent_index = self._extract_index(agent)
for bgp_speaker in bgp_speakers:
bs_index = self._extract_index(bgp_speaker)
scheduler.bind(self.ctx, [bgp_dragents[agent_index]],
self._bgp_speakers[bs_index])
retval = scheduler.schedule_unscheduled_bgp_speakers(self.ctx,
hosts[host_index])
self.assertEqual(self.expected_schedule_return_value, retval,
message=msg)
if self.agent_count:
agent_id = bgp_dragents[host_index].id
hosted_bgp_speakers = self._get_hosted_bgp_speakers_on_dragent(
agent_id)
hosted_bs_ids = [self._strip_host_index(net)
for net in hosted_bgp_speakers]
expected_hosted_bgp_speakers = self.expected_hosted_bgp_speakers[
'agent-%s' % host_index]
self.assertItemsEqual(hosted_bs_ids, expected_hosted_bgp_speakers,
msg)
def test_auto_schedule(self):
for i in range(self.host_count):
self._test_auto_schedule(i)
|
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1/3, decimal=1)
assert_almost_equal(p[2], 1/3, decimal=1)
assert_almost_equal(p[4], 1/3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
"""The tests for Climate device conditions."""
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.climate import DOMAIN, const, device_condition
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a climate."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
f"{DOMAIN}.test_5678",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY],
},
)
hass.states.async_set("climate.test_5678", "attributes", {"supported_features": 17})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_hvac_mode",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_preset_mode",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_get_conditions_hvac_only(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a climate."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
f"{DOMAIN}.test_5678",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY],
},
)
hass.states.async_set("climate.test_5678", "attributes", {"supported_features": 1})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_hvac_mode",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
}
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_hvac_mode",
"hvac_mode": "cool",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_hvac_mode - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_preset_mode",
"preset_mode": "away",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_preset_mode - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_hvac_mode - event - test_event1"
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_AUTO,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
},
)
# Should not fire
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_preset_mode - event - test_event2"
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_AUTO,
const.ATTR_PRESET_MODE: const.PRESET_HOME,
},
)
# Should not fire
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_capabilities(hass):
"""Bla."""
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_MODE: const.HVAC_MODE_COOL,
const.ATTR_PRESET_MODE: const.PRESET_AWAY,
const.ATTR_HVAC_MODES: [const.HVAC_MODE_COOL, const.HVAC_MODE_OFF],
const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY],
},
)
# Test hvac mode
capabilities = await device_condition.async_get_condition_capabilities(
hass,
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_hvac_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "hvac_mode",
"options": [("cool", "cool"), ("off", "off")],
"required": True,
"type": "select",
}
]
# Test preset mode
capabilities = await device_condition.async_get_condition_capabilities(
hass,
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "is_preset_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "preset_mode",
"options": [("home", "home"), ("away", "away")],
"required": True,
"type": "select",
}
]
|
|
""" Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
'\u0e24' # 0xC4 -> THAI CHARACTER RU
'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
'\u0e26' # 0xC6 -> THAI CHARACTER LU
'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
'\u0e50' # 0xF0 -> THAI DIGIT ZERO
'\u0e51' # 0xF1 -> THAI DIGIT ONE
'\u0e52' # 0xF2 -> THAI DIGIT TWO
'\u0e53' # 0xF3 -> THAI DIGIT THREE
'\u0e54' # 0xF4 -> THAI DIGIT FOUR
'\u0e55' # 0xF5 -> THAI DIGIT FIVE
'\u0e56' # 0xF6 -> THAI DIGIT SIX
'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
'\u0e59' # 0xF9 -> THAI DIGIT NINE
'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
|
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __builtin__
import datetime
import functools
import hashlib
import importlib
import multiprocessing
import os
import os.path
import StringIO
import tempfile
import mox
import netaddr
from oslo.config import cfg
import nova
from nova import exception
from nova.openstack.common import processutils
from nova.openstack.common import timeutils
from nova import test
from nova import utils
CONF = cfg.CONF
class GetMyIP4AddressTestCase(test.NoDBTestCase):
def test_get_my_ipv4_address_with_no_ipv4(self):
response = """172.16.0.0/16 via 172.16.251.13 dev tun1
172.16.251.1 via 172.16.251.13 dev tun1
172.16.251.13 dev tun1 proto kernel scope link src 172.16.251.14
172.24.0.0/16 via 172.16.251.13 dev tun1
192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1"""
def fake_execute(*args, **kwargs):
return response, None
self.stubs.Set(utils, 'execute', fake_execute)
address = utils.get_my_ipv4_address()
self.assertEqual(address, '127.0.0.1')
def test_get_my_ipv4_address_bad_process(self):
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError()
self.stubs.Set(utils, 'execute', fake_execute)
address = utils.get_my_ipv4_address()
self.assertEqual(address, '127.0.0.1')
def test_get_my_ipv4_address_with_single_interface(self):
response_route = """default via 192.168.1.1 dev wlan0 proto static
192.168.1.0/24 dev wlan0 proto kernel scope link src 192.168.1.137 metric 9
"""
response_addr = """
1: lo inet 127.0.0.1/8 scope host lo
3: wlan0 inet 192.168.1.137/24 brd 192.168.1.255 scope global wlan0
"""
def fake_execute(*args, **kwargs):
if 'route' in args:
return response_route, None
return response_addr, None
self.stubs.Set(utils, 'execute', fake_execute)
address = utils.get_my_ipv4_address()
self.assertEqual(address, '192.168.1.137')
def test_get_my_ipv4_address_with_multi_ipv4_on_single_interface(self):
response_route = """
172.18.56.0/24 dev customer proto kernel scope link src 172.18.56.22
169.254.0.0/16 dev customer scope link metric 1031
default via 172.18.56.1 dev customer
"""
response_addr = (""
"31: customer inet 172.18.56.22/24 brd 172.18.56.255 scope global"
" customer\n"
"31: customer inet 172.18.56.32/24 brd 172.18.56.255 scope global "
"secondary customer")
def fake_execute(*args, **kwargs):
if 'route' in args:
return response_route, None
return response_addr, None
self.stubs.Set(utils, 'execute', fake_execute)
address = utils.get_my_ipv4_address()
self.assertEqual(address, '172.18.56.22')
def test_get_my_ipv4_address_with_multiple_interfaces(self):
response_route = """
169.1.9.0/24 dev eth1 proto kernel scope link src 169.1.9.10
172.17.248.0/21 dev eth0 proto kernel scope link src 172.17.255.9
169.254.0.0/16 dev eth0 scope link metric 1002
169.254.0.0/16 dev eth1 scope link metric 1003
default via 172.17.248.1 dev eth0 proto static
"""
response_addr = """
1: lo inet 127.0.0.1/8 scope host lo
2: eth0 inet 172.17.255.9/21 brd 172.17.255.255 scope global eth0
3: eth1 inet 169.1.9.10/24 scope global eth1
"""
def fake_execute(*args, **kwargs):
if 'route' in args:
return response_route, None
return response_addr, None
self.stubs.Set(utils, 'execute', fake_execute)
address = utils.get_my_ipv4_address()
self.assertEqual(address, '172.17.255.9')
class GenericUtilsTestCase(test.NoDBTestCase):
def test_parse_server_string(self):
result = utils.parse_server_string('::1')
self.assertEqual(('::1', ''), result)
result = utils.parse_server_string('[::1]:8773')
self.assertEqual(('::1', '8773'), result)
result = utils.parse_server_string('2001:db8::192.168.1.1')
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
result = utils.parse_server_string('192.168.1.1')
self.assertEqual(('192.168.1.1', ''), result)
result = utils.parse_server_string('192.168.1.2:8773')
self.assertEqual(('192.168.1.2', '8773'), result)
result = utils.parse_server_string('192.168.1.3')
self.assertEqual(('192.168.1.3', ''), result)
result = utils.parse_server_string('www.example.com:8443')
self.assertEqual(('www.example.com', '8443'), result)
result = utils.parse_server_string('www.example.com')
self.assertEqual(('www.example.com', ''), result)
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_read_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
self.mox.ReplayAll()
cache_data = {"data": 1123, "mtime": 1}
data = utils.read_cached_file("/this/is/a/fake", cache_data)
self.assertEqual(cache_data["data"], data)
def test_read_modified_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
self.mox.StubOutWithMock(__builtin__, 'open')
os.path.getmtime(mox.IgnoreArg()).AndReturn(2)
fake_contents = "lorem ipsum"
fake_file = self.mox.CreateMockAnything()
fake_file.read().AndReturn(fake_contents)
fake_context_manager = self.mox.CreateMockAnything()
fake_context_manager.__enter__().AndReturn(fake_file)
fake_context_manager.__exit__(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
__builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
self.mox.ReplayAll()
cache_data = {"data": 1123, "mtime": 1}
self.reload_called = False
def test_reload(reloaded_data):
self.assertEqual(reloaded_data, fake_contents)
self.reload_called = True
data = utils.read_cached_file("/this/is/a/fake", cache_data,
reload_func=test_reload)
self.assertEqual(data, fake_contents)
self.assertTrue(self.reload_called)
def test_generate_password(self):
password = utils.generate_password()
self.assertTrue([c for c in password if c in '0123456789'])
self.assertTrue([c for c in password
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
def test_read_file_as_root(self):
def fake_execute(*args, **kwargs):
if args[1] == 'bad':
raise processutils.ProcessExecutionError()
return 'fakecontents', None
self.stubs.Set(utils, 'execute', fake_execute)
contents = utils.read_file_as_root('good')
self.assertEqual(contents, 'fakecontents')
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root, 'bad')
def test_temporary_chown(self):
def fake_execute(*args, **kwargs):
if args[0] == 'chown':
fake_execute.uid = args[1]
self.stubs.Set(utils, 'execute', fake_execute)
with tempfile.NamedTemporaryFile() as f:
with utils.temporary_chown(f.name, owner_uid=2):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
self.assertEqual('&', utils.xhtml_escape('&'))
self.assertEqual('>', utils.xhtml_escape('>'))
self.assertEqual('<', utils.xhtml_escape('<'))
self.assertEqual('<foo>', utils.xhtml_escape('<foo>'))
def test_is_valid_ipv4(self):
self.assertTrue(utils.is_valid_ipv4('127.0.0.1'))
self.assertFalse(utils.is_valid_ipv4('::1'))
self.assertFalse(utils.is_valid_ipv4('bacon'))
self.assertFalse(utils.is_valid_ipv4(""))
self.assertFalse(utils.is_valid_ipv4(10))
def test_is_valid_ipv6(self):
self.assertTrue(utils.is_valid_ipv6("::1"))
self.assertTrue(utils.is_valid_ipv6(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
self.assertTrue(utils.is_valid_ipv6(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertFalse(utils.is_valid_ipv6("foo"))
self.assertFalse(utils.is_valid_ipv6("127.0.0.1"))
self.assertFalse(utils.is_valid_ipv6(""))
self.assertFalse(utils.is_valid_ipv6(10))
def test_is_valid_ipv6_cidr(self):
self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001/32"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertFalse(utils.is_valid_ipv6_cidr("foo"))
self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1"))
def test_get_shortened_ipv6(self):
self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
utils.get_shortened_ipv6(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
self.assertEqual("::1", utils.get_shortened_ipv6(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertEqual("caca::caca:0:babe:201:102",
utils.get_shortened_ipv6(
"caca:0000:0000:caca:0000:babe:0201:0102"))
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"failure")
def test_get_shortened_ipv6_cidr(self):
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600:0000:0000:0000:0000:0000:0000:0000/64"))
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600::1/64"))
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"failure")
def test_get_hash_str(self):
base_str = "foo"
value = hashlib.md5(base_str).hexdigest()
self.assertEqual(
value, utils.get_hash_str(base_str))
def test_cpu_count(self):
def fake_cpu_count():
return 8
self.stubs.Set(multiprocessing, 'cpu_count', fake_cpu_count)
self.assertEqual(8, utils.cpu_count())
def test_cpu_count_not_implemented_returns_1(self):
def fake_cpu_count():
raise NotImplementedError()
self.stubs.Set(multiprocessing, 'cpu_count', fake_cpu_count)
self.assertEqual(1, utils.cpu_count())
class MonkeyPatchTestCase(test.NoDBTestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'nova.tests.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package + 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
nova.tests.monkey_patch_example.CALLED_FUNCTION = []
from nova.tests.monkey_patch_example import example_a
from nova.tests.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(package_a + 'example_function_a'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(package_b + 'example_function_b'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
class MonkeyPatchDefaultTestCase(test.NoDBTestCase):
"""Unit test for default monkey_patch_modules value."""
def setUp(self):
super(MonkeyPatchDefaultTestCase, self).setUp()
self.flags(
monkey_patch=True)
def test_monkey_patch_default_mod(self):
# monkey_patch_modules is defined to be
# <module_to_patch>:<decorator_to_patch_with>
# Here we check that both parts of the default values are
# valid
for module in CONF.monkey_patch_modules:
m = module.split(':', 1)
# Check we can import the module to be patched
importlib.import_module(m[0])
# check the decorator is valid
decorator_name = m[1].rsplit('.', 1)
decorator_module = importlib.import_module(decorator_name[0])
getattr(decorator_module, decorator_name[1])
class AuditPeriodTest(test.NoDBTestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
#a fairly random time to test with
self.test_time = datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)
timeutils.set_time_override(override_time=self.test_time)
def tearDown(self):
timeutils.clear_time_override()
super(AuditPeriodTest, self).tearDown()
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEqual(begin, datetime.datetime(
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEqual(begin, datetime.datetime(
minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEqual(begin, datetime.datetime(
minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEqual(begin, datetime.datetime(
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEqual(begin, datetime.datetime(
hour=6,
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEqual(begin, datetime.datetime(
hour=10,
day=3,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEqual(begin, datetime.datetime(
day=2,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEqual(begin, datetime.datetime(
day=15,
month=1,
year=2012))
self.assertEqual(end, datetime.datetime(
day=15,
month=2,
year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEqual(begin, datetime.datetime(
day=1,
month=1,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEqual(begin, datetime.datetime(
day=1,
month=6,
year=2010))
self.assertEqual(end, datetime.datetime(
day=1,
month=6,
year=2011))
class MkfsTestCase(test.NoDBTestCase):
def test_mkfs(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev',
run_as_root=False)
utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev',
run_as_root=False)
utils.execute('mkswap', '/my/swap/block/dev',
run_as_root=False)
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev')
utils.mkfs('msdos', '/my/msdos/block/dev')
utils.mkfs('swap', '/my/swap/block/dev')
def test_mkfs_with_label(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F',
'-L', 'ext4-vol', '/my/block/dev', run_as_root=False)
utils.execute('mkfs', '-t', 'msdos',
'-n', 'msdos-vol', '/my/msdos/block/dev',
run_as_root=False)
utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev',
run_as_root=False)
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
class LastBytesTestCase(test.NoDBTestCase):
"""Test the last_bytes() utility method."""
def setUp(self):
super(LastBytesTestCase, self).setUp()
self.f = StringIO.StringIO('1234567890')
def test_truncated(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 5)
self.assertEqual(out, '67890')
self.assertTrue(remaining > 0)
def test_read_all(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 1000)
self.assertEqual(out, '1234567890')
self.assertFalse(remaining > 0)
def test_seek_too_far_real_file(self):
# StringIO doesn't raise IOError if you see past the start of the file.
flo = tempfile.TemporaryFile()
content = '1234567890'
flo.write(content)
self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
class IntLikeTestCase(test.NoDBTestCase):
def test_is_int_like(self):
self.assertTrue(utils.is_int_like(1))
self.assertTrue(utils.is_int_like("1"))
self.assertTrue(utils.is_int_like("514"))
self.assertTrue(utils.is_int_like("0"))
self.assertFalse(utils.is_int_like(1.1))
self.assertFalse(utils.is_int_like("1.1"))
self.assertFalse(utils.is_int_like("1.1.1"))
self.assertFalse(utils.is_int_like(None))
self.assertFalse(utils.is_int_like("0."))
self.assertFalse(utils.is_int_like("aaaaaa"))
self.assertFalse(utils.is_int_like("...."))
self.assertFalse(utils.is_int_like("1g"))
self.assertFalse(
utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64"))
self.assertFalse(utils.is_int_like("a1"))
class MetadataToDictTestCase(test.NoDBTestCase):
def test_metadata_to_dict(self):
self.assertEqual(utils.metadata_to_dict(
[{'key': 'foo1', 'value': 'bar'},
{'key': 'foo2', 'value': 'baz'}]),
{'foo1': 'bar', 'foo2': 'baz'})
def test_metadata_to_dict_empty(self):
self.assertEqual(utils.metadata_to_dict([]), {})
def test_dict_to_metadata(self):
expected = [{'key': 'foo1', 'value': 'bar1'},
{'key': 'foo2', 'value': 'bar2'}]
self.assertEqual(utils.dict_to_metadata(dict(foo1='bar1',
foo2='bar2')),
expected)
def test_dict_to_metadata_empty(self):
self.assertEqual(utils.dict_to_metadata({}), [])
class WrappedCodeTestCase(test.NoDBTestCase):
"""Test the get_wrapped_function utility method."""
def _wrapper(self, function):
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
function(self, *args, **kwargs)
return decorated_function
def test_single_wrapped(self):
@self._wrapper
def wrapped(self, instance, red=None, blue=None):
pass
func = utils.get_wrapped_function(wrapped)
func_code = func.func_code
self.assertEqual(4, len(func_code.co_varnames))
self.assertIn('self', func_code.co_varnames)
self.assertIn('instance', func_code.co_varnames)
self.assertIn('red', func_code.co_varnames)
self.assertIn('blue', func_code.co_varnames)
def test_double_wrapped(self):
@self._wrapper
@self._wrapper
def wrapped(self, instance, red=None, blue=None):
pass
func = utils.get_wrapped_function(wrapped)
func_code = func.func_code
self.assertEqual(4, len(func_code.co_varnames))
self.assertIn('self', func_code.co_varnames)
self.assertIn('instance', func_code.co_varnames)
self.assertIn('red', func_code.co_varnames)
self.assertIn('blue', func_code.co_varnames)
def test_triple_wrapped(self):
@self._wrapper
@self._wrapper
@self._wrapper
def wrapped(self, instance, red=None, blue=None):
pass
func = utils.get_wrapped_function(wrapped)
func_code = func.func_code
self.assertEqual(4, len(func_code.co_varnames))
self.assertIn('self', func_code.co_varnames)
self.assertIn('instance', func_code.co_varnames)
self.assertIn('red', func_code.co_varnames)
self.assertIn('blue', func_code.co_varnames)
class ExpectedArgsTestCase(test.NoDBTestCase):
def test_passes(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
@dec
def func(foo, bar, baz="lol"):
pass
def test_raises(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
def test_var_no_of_args(self):
@utils.expects_func_args('foo')
def dec(f):
return f
@dec
def func(bar, *args, **kwargs):
pass
def test_more_layers(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def dec_2(f):
def inner_f(*a, **k):
return f()
return inner_f
@dec_2
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
class StringLengthTestCase(test.NoDBTestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, 'name', max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', 'name', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
class ValidateIntegerTestCase(test.NoDBTestCase):
def test_valid_inputs(self):
self.assertEqual(
utils.validate_integer(42, "answer"), 42)
self.assertEqual(
utils.validate_integer("42", "answer"), 42)
self.assertEqual(
utils.validate_integer(
"7", "lucky", min_value=7, max_value=8), 7)
self.assertEqual(
utils.validate_integer(
7, "lucky", min_value=6, max_value=7), 7)
self.assertEqual(
utils.validate_integer(
300, "Spartaaa!!!", min_value=300), 300)
self.assertEqual(
utils.validate_integer(
"300", "Spartaaa!!!", max_value=300), 300)
def test_invalid_inputs(self):
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"im-not-an-int", "not-an-int")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
3.14, "Pie")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"299", "Sparta no-show",
min_value=300, max_value=300)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
55, "doing 55 in a 54",
max_value=54)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
unichr(129), "UnicodeError",
max_value=1000)
class ValidateNeutronConfiguration(test.NoDBTestCase):
def setUp(self):
super(ValidateNeutronConfiguration, self).setUp()
utils.reset_is_neutron()
def test_nova_network(self):
self.flags(network_api_class='nova.network.api.API')
self.assertFalse(utils.is_neutron())
def test_neutron(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.assertTrue(utils.is_neutron())
def test_quantum(self):
self.flags(network_api_class='nova.network.quantumv2.api.API')
self.assertTrue(utils.is_neutron())
class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
def test_is_auto_disk_config_disabled(self):
self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))
def test_is_auto_disk_config_disabled_none(self):
self.assertFalse(utils.is_auto_disk_config_disabled(None))
def test_is_auto_disk_config_disabled_false(self):
self.assertFalse(utils.is_auto_disk_config_disabled("false"))
class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
def get_image(self):
image_meta = {
"id": "fake-image",
"name": "fake-name",
"min_ram": 1,
"min_disk": 1,
"disk_format": "raw",
"container_format": "bare",
}
return image_meta
def get_flavor(self):
flavor = {
"id": "fake.flavor",
"root_gb": 10,
}
return flavor
def test_base_image_properties(self):
image = self.get_image()
# Verify that we inherit all the needed keys
sys_meta = utils.get_system_metadata_from_image(image)
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that everything else is ignored
self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS))
def test_inherit_image_properties(self):
image = self.get_image()
image["properties"] = {"foo1": "bar", "foo2": "baz"}
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that we inherit all the image properties
for key, expected in image["properties"].iteritems():
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(sys_meta[sys_key], expected)
def test_vhd_min_disk_image(self):
image = self.get_image()
flavor = self.get_flavor()
image["disk_format"] = "vhd"
sys_meta = utils.get_system_metadata_from_image(image, flavor)
# Verify that the min_disk property is taken from
# flavor's root_gb when using vhd disk format
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk")
self.assertEqual(sys_meta[sys_key], flavor["root_gb"])
def test_dont_inherit_empty_values(self):
image = self.get_image()
for key in utils.SM_INHERITABLE_KEYS:
image[key] = None
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertNotIn(sys_key, sys_meta)
class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
def get_system_metadata(self):
sys_meta = {
"image_min_ram": 1,
"image_min_disk": 1,
"image_disk_format": "raw",
"image_container_format": "bare",
}
return sys_meta
def test_image_from_system_metadata(self):
sys_meta = self.get_system_metadata()
sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that we inherit all the needed keys
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that we inherit the rest of metadata as properties
self.assertIn("properties", image)
for key, value in image["properties"].iteritems():
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image["properties"][key], sys_meta[sys_key])
def test_dont_inherit_empty_values(self):
sys_meta = self.get_system_metadata()
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
sys_meta[sys_key] = None
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
self.assertNotIn(key, image)
def test_non_inheritable_image_properties(self):
sys_meta = self.get_system_metadata()
sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
CONF.non_inheritable_image_properties = ["foo1"]
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that the foo1 key has not been inherited
self.assertNotIn("foo1", image)
class VersionTestCase(test.NoDBTestCase):
def test_convert_version_to_int(self):
self.assertEqual(utils.convert_version_to_int('6.2.0'), 6002000)
self.assertEqual(utils.convert_version_to_int((6, 4, 3)), 6004003)
self.assertEqual(utils.convert_version_to_int((5, )), 5)
self.assertRaises(exception.NovaException,
utils.convert_version_to_int, '5a.6b')
def test_convert_version_to_string(self):
self.assertEqual(utils.convert_version_to_str(6007000), '6.7.0')
self.assertEqual(utils.convert_version_to_str(4), '4')
def test_convert_version_to_tuple(self):
self.assertEqual(utils.convert_version_to_tuple('6.7.0'), (6, 7, 0))
|
|
import re
from functools import reduce
import sqlalchemy as sa
from pyparsing import ParseException
from sqlalchemy import event
from sqlalchemy.dialects.postgresql.base import RESERVED_WORDS
from sqlalchemy.schema import DDL
from sqlalchemy_utils import TSVectorType
from .parser import SearchQueryParser
from .vectorizers import Vectorizer
__version__ = '0.10.0'
parser = SearchQueryParser()
vectorizer = Vectorizer()
def parse_search_query(query, parser=parser):
query = query.strip()
# Convert hyphens between words to spaces but leave all hyphens which are
# at the beginning of the word (negation operator)
query = re.sub(r'(?i)(?<=[^\s|^])-(?=[^\s])', ' ', query)
parts = query.split()
parts = [
parser.remove_special_chars(part).strip() for part in parts if part
]
query = ' '.join(parts)
if not query:
return u''
try:
return parser.parse(query)
except ParseException:
return u''
class SearchQueryMixin(object):
def search(self, search_query, vector=None, regconfig=None, sort=False):
"""
Search given query with full text search.
:param search_query: the search query
:param vector: search vector to use
:param regconfig: postgresql regconfig to be used
:param sort: order results by relevance (quality of hit)
"""
return search(
self,
search_query,
vector=vector,
regconfig=regconfig,
sort=sort
)
def inspect_search_vectors(entity):
return [
getattr(entity, key).property.columns[0]
for key, column
in sa.inspect(entity).columns.items()
if isinstance(column.type, TSVectorType)
]
def search(query, search_query, vector=None, regconfig=None, sort=False):
"""
Search given query with full text search.
:param search_query: the search query
:param vector: search vector to use
:param regconfig: postgresql regconfig to be used
:param sort: order results by relevance (quality of hit)
"""
if not search_query:
return query
search_query = parse_search_query(search_query)
if not search_query:
return query
if vector is None:
entity = query._entities[0].entity_zero.class_
search_vectors = inspect_search_vectors(entity)
vector = search_vectors[0]
kwargs = {}
if regconfig is not None:
kwargs['postgresql_regconfig'] = regconfig
query = query.filter(vector.match(search_query, **kwargs))
if sort:
query = query.order_by(
sa.desc(
sa.func.ts_rank_cd(
vector,
sa.func.to_tsquery(search_query)
)
)
)
return query.params(term=search_query)
def quote_identifier(identifier):
"""Adds double quotes to given identifier. Since PostgreSQL is the only
supported dialect we don't need dialect specific stuff here"""
return '"%s"' % identifier
class SQLConstruct(object):
def __init__(
self,
tsvector_column,
conn=None,
indexed_columns=None,
options=None
):
self.table = tsvector_column.table
self.tsvector_column = tsvector_column
self.conn = conn
self.options = self.init_options(options)
if indexed_columns:
self.indexed_columns = list(indexed_columns)
else:
self.indexed_columns = list(self.tsvector_column.type.columns)
self.params = {}
def init_options(self, options=None):
if not options:
options = {}
for key, value in SearchManager.default_options.items():
try:
option = self.tsvector_column.type.options[key]
except (KeyError, AttributeError):
option = value
options.setdefault(key, option)
return options
@property
def table_name(self):
if self.table.schema:
return '%s."%s"' % (self.table.schema, self.table.name)
else:
return '"' + self.table.name + '"'
@property
def search_function_name(self):
return self.options['search_trigger_function_name'].format(
table=self.table.name,
column=self.tsvector_column.name
)
@property
def search_trigger_name(self):
return self.options['search_trigger_name'].format(
table=self.table.name,
column=self.tsvector_column.name
)
def column_vector(self, column):
if column.name in RESERVED_WORDS:
column.name = quote_identifier(column.name)
value = sa.text('NEW.{column}'.format(column=column.name))
try:
vectorizer_func = vectorizer[column]
except KeyError:
pass
else:
value = vectorizer_func(value)
value = sa.func.coalesce(value, sa.text("''"))
if self.options['remove_symbols']:
value = sa.func.regexp_replace(
value,
sa.text("'[{0}]'".format(self.options['remove_symbols'])),
sa.text("' '"),
sa.text("'g'")
)
value = sa.func.to_tsvector(self.options['regconfig'], value)
if column.name in self.options['weights']:
weight = self.options['weights'][column.name]
value = sa.func.setweight(value, weight)
return value
@property
def search_vector(self):
vectors = (
self.column_vector(getattr(self.table.c, column_name))
for column_name in self.indexed_columns
)
concatenated = reduce(lambda x, y: x.op('||')(y), vectors)
compiled = concatenated.compile(self.conn)
self.params = compiled.params
return compiled
class CreateSearchFunctionSQL(SQLConstruct):
def __str__(self):
return (
"""CREATE FUNCTION
{search_trigger_function_name}() RETURNS TRIGGER AS $$
BEGIN
NEW.{search_vector_name} = {ts_vector};
RETURN NEW;
END
$$ LANGUAGE 'plpgsql';
"""
).format(
search_trigger_function_name=self.search_function_name,
search_vector_name=self.tsvector_column.name,
ts_vector=self.search_vector
)
class CreateSearchTriggerSQL(SQLConstruct):
@property
def search_trigger_function_with_trigger_args(self):
if self.options['remove_symbols']:
return self.search_function_name + '()'
return 'tsvector_update_trigger({arguments})'.format(
arguments=', '.join(
[
self.tsvector_column.name,
"'%s'" % self.options['regconfig']
] +
self.indexed_columns
)
)
def __str__(self):
return (
"CREATE TRIGGER {search_trigger_name}"
" BEFORE UPDATE OR INSERT ON {table}"
" FOR EACH ROW EXECUTE PROCEDURE"
" {procedure_ddl}"
.format(
search_trigger_name=self.search_trigger_name,
table=self.table_name,
procedure_ddl=(
self.search_trigger_function_with_trigger_args
)
)
)
class DropSearchFunctionSQL(SQLConstruct):
def __str__(self):
return 'DROP FUNCTION IF EXISTS %s()' % self.search_function_name
class DropSearchTriggerSQL(SQLConstruct):
def __str__(self):
return 'DROP TRIGGER IF EXISTS %s ON %s' % (
self.search_trigger_name,
self.table_name
)
class SearchManager():
default_options = {
'remove_symbols': '-@.',
'search_trigger_name': '{table}_{column}_trigger',
'search_trigger_function_name': '{table}_{column}_update',
'regconfig': 'pg_catalog.english',
'weights': (),
}
def __init__(self, options={}):
self.options = self.default_options
self.options.update(options)
self.processed_columns = []
self.classes = set()
self.listeners = []
def option(self, column, name):
try:
return column.type.options[name]
except (AttributeError, KeyError):
return self.options[name]
def search_function_ddl(self, column):
def after_create(target, connection, **kw):
clause = CreateSearchFunctionSQL(column, conn=connection)
connection.execute(str(clause), **clause.params)
return after_create
def search_trigger_ddl(self, column):
"""
Returns the ddl for creating an automatically updated search trigger.
:param column: TSVectorType typed SQLAlchemy column object
"""
return DDL(str(CreateSearchTriggerSQL(column)))
def inspect_columns(self, cls):
"""
Inspects all searchable columns for given class.
:param cls: SQLAlchemy declarative class
"""
return [
column for column in cls.__table__.c
if isinstance(column.type, TSVectorType)
]
def append_index(self, cls, column):
if not hasattr(cls, '__table_args__') or cls.__table_args__ is None:
cls.__table_args__ = []
cls.__table_args__ = list(cls.__table_args__).append(
sa.Index(
'_'.join(('ix', column.table.name, column.name)),
column,
postgresql_using='gin'
)
)
def process_mapper(self, mapper, cls):
columns = self.inspect_columns(cls)
for column in columns:
if column in self.processed_columns:
continue
self.append_index(cls, column)
self.processed_columns.append(column)
def add_listener(self, args):
self.listeners.append(args)
event.listen(*args)
def attach_ddl_listeners(self):
# Remove all previously added listeners, so that same listener don't
# get added twice in situations where class configuration happens in
# multiple phases (issue #31).
for listener in self.listeners:
event.remove(*listener)
self.listeners = []
for column in self.processed_columns:
# This sets up the trigger that keeps the tsvector column up to
# date.
if column.type.columns:
table = column.table
if self.option(column, 'remove_symbols'):
self.add_listener((
table,
'after_create',
self.search_function_ddl(column)
))
self.add_listener((
table,
'after_drop',
DDL(str(DropSearchFunctionSQL(column)))
))
self.add_listener((
table,
'after_create',
self.search_trigger_ddl(column)
))
search_manager = SearchManager()
def sync_trigger(
conn,
table_name,
tsvector_column,
indexed_columns,
metadata=None,
options=None
):
"""
Synchronizes search trigger and trigger function for given table and given
search index column. Internally this function executes the following SQL
queries:
* Drops search trigger for given table (if it exists)
* Drops search function for given table (if it exists)
* Creates search function for given table
* Creates search trigger for given table
* Updates all rows for given search vector by running a column=column
update query for given table.
Example::
from sqlalchemy_searchable import sync_trigger
sync_trigger(
conn,
'article',
'search_vector',
['name', 'content']
)
This function is especially useful when working with alembic migrations.
In the following example we add a content column to article table and then
sync the trigger to contain this new column. ::
from alembic import op
from sqlalchemy_searchable import sync_trigger
def upgrade():
conn = op.get_bind()
op.add_column('article', sa.Column('content', sa.Text))
sync_trigger(conn, 'article', 'search_vector', ['name', 'content'])
# ... same for downgrade
If you are using vectorizers you need to initialize them in your migration
file and pass them to this function. ::
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy_searchable import sync_trigger, vectorizer
def upgrade():
vectorizer.clear()
conn = op.get_bind()
op.add_column('article', sa.Column('name_translations', HSTORE))
metadata = sa.MetaData(bind=conn)
articles = sa.Table('article', metadata, autoload=True)
@vectorizer(articles.c.name_translations)
def hstore_vectorizer(column):
return sa.cast(sa.func.avals(column), sa.Text)
op.add_column('article', sa.Column('content', sa.Text))
sync_trigger(
conn,
'article',
'search_vector',
['name_translations', 'content'],
metadata=metadata
)
# ... same for downgrade
:param conn: SQLAlchemy Connection object
:param table_name: name of the table to apply search trigger syncing
:param tsvector_column:
TSVector typed column which is used as the search index column
:param indexed_columns:
Full text indexed column names as a list
:param metadata:
Optional SQLAlchemy metadata object that is being used for autoloaded
Table. If None is given then new MetaData object is initialized within
this function.
:param options: Dictionary of configuration options
"""
if metadata is None:
metadata = sa.MetaData()
table = sa.Table(
table_name,
metadata,
autoload=True,
autoload_with=conn
)
params = dict(
tsvector_column=getattr(table.c, tsvector_column),
indexed_columns=indexed_columns,
options=options,
conn=conn
)
classes = [
DropSearchTriggerSQL,
DropSearchFunctionSQL,
CreateSearchFunctionSQL,
CreateSearchTriggerSQL,
]
for class_ in classes:
sql = class_(**params)
conn.execute(str(sql), **sql.params)
update_sql = table.update().values(
{indexed_columns[0]: sa.text(indexed_columns[0])}
)
conn.execute(update_sql)
def make_searchable(
mapper=sa.orm.mapper,
manager=search_manager,
options={}
):
manager.options.update(options)
event.listen(
mapper, 'instrument_class', manager.process_mapper
)
event.listen(
mapper, 'after_configured', manager.attach_ddl_listeners
)
|
|
from __future__ import print_function, division
import hashlib
from copy import deepcopy
import h5py
import numpy as np
from ..util.meshgrid import meshgrid_nd
from ..util.functions import FreezableClass, is_numpy_array, monotonically_increasing, link_or_copy
from astropy import log as logger
from .grid_helpers import single_grid_dims
class CylindricalPolarGrid(FreezableClass):
'''
A cylindrical polar grid.
The grid can be initialized by passing the w, z, and phi coordinates of cell walls::
>>> grid = CylindricalPolarGrid(w_wall, z_wall, p_wall)
where ``w_wall``, ``z_wall``, and ``p_wall`` are 1-d sequences of wall
positions. The number of cells in the resulting grid will be one less
in each dimension that the length of these arrays.
:class:`~hyperion.grid.CylindricalPolarGrid` objects may contain multiple
quantities (e.g. density, specific energy). To access these, you can
specify the name of the quantity as an item::
>>> grid['density']
which is no longer a :class:`~hyperion.grid.CylindricalPolarGrid` object, but
a :class:`~hyperion.grid.CylindricalPolarGridView` object. When setting
this for the first time, this can be set either to another
:class:`~hyperion.grid.CylindricalPolarGridView` object, an external h5py
link, or an empty list. For example, the following should work:
>>> grid['density_new'] = grid['density']
:class:`~hyperion.grid.CylindricalPolarGridView` objects allow the
specific dust population to be selected as an index:
>>> grid['density'][0]
Which is also a :class:`~hyperion.grid.CylindricalPolarGridView` object. The
data can then be accessed with the ``array`` attribute::
>>> grid['density'][0].array
which is a 3-d array of the requested quantity.
'''
def __init__(self, *args):
self.shape = None
self.w_wall = None
self.z_wall = None
self.p_wall = None
self.w = None
self.z = None
self.p = None
self.gw = None
self.gz = None
self.gp = None
self.volumes = None
self.areas = None
self.widths = None
self.quantities = {}
self._freeze()
if len(args) > 0:
if isinstance(args[0], CylindricalPolarGrid):
self.set_walls(args[0].w_wall, args[0].z_wall, args[0].p_wall)
else:
self.set_walls(*args)
def set_walls(self, w_wall, z_wall, p_wall):
if type(w_wall) in [list, tuple]:
w_wall = np.array(w_wall)
if type(z_wall) in [list, tuple]:
z_wall = np.array(z_wall)
if type(p_wall) in [list, tuple]:
p_wall = np.array(p_wall)
if not is_numpy_array(w_wall) or w_wall.ndim != 1:
raise ValueError("w_wall should be a 1-D sequence")
if not is_numpy_array(z_wall) or z_wall.ndim != 1:
raise ValueError("z_wall should be a 1-D sequence")
if not is_numpy_array(p_wall) or p_wall.ndim != 1:
raise ValueError("p_wall should be a 1-D sequence")
if not monotonically_increasing(w_wall):
raise ValueError("w_wall should be monotonically increasing")
if not monotonically_increasing(z_wall):
raise ValueError("z_wall should be monotonically increasing")
if not monotonically_increasing(p_wall):
raise ValueError("p_wall should be monotonically increasing")
if np.any(p_wall < 0.) or np.any(p_wall > 2. * np.pi):
raise ValueError("p_wall values be in the range [0:2*pi]")
# Find grid shape
self.shape = (len(p_wall) - 1, len(z_wall) - 1, len(w_wall) - 1)
# Store wall positions
self.w_wall = w_wall
self.z_wall = z_wall
self.p_wall = p_wall
# Compute cell centers
if w_wall[0] == 0.:
self.w = np.zeros(len(w_wall) - 1)
self.w[0] = w_wall[1] / 2.
self.w[1:] = 10. ** ((np.log10(w_wall[1:-1]) + np.log10(w_wall[2:])) / 2.)
else:
self.w = 10. ** ((np.log10(w_wall[:-1]) + np.log10(w_wall[1:])) / 2.)
self.z = (z_wall[:-1] + z_wall[1:]) / 2.
self.p = (p_wall[:-1] + p_wall[1:]) / 2.
# Generate 3D versions of r, t, p
#(each array is 3D and defined in every cell)
self.gw, self.gz, self.gp = meshgrid_nd(self.w, self.z, self.p)
# Generate 3D versions of the inner and outer wall positions respectively
gw_wall_min, gz_wall_min, gp_wall_min = \
meshgrid_nd(w_wall[:-1], z_wall[:-1], p_wall[:-1])
gw_wall_max, gz_wall_max, gp_wall_max = \
meshgrid_nd(w_wall[1:], z_wall[1:], p_wall[1:])
# USEFUL QUANTITIES
dr = gw_wall_max - gw_wall_min
dr2 = gw_wall_max ** 2 - gw_wall_min ** 2
dz = gz_wall_max - gz_wall_min
dp = gp_wall_max - gp_wall_min
# CELL VOLUMES
# dV = dr * dz * (r*dphi)
# V = [r_2^2 - r_1^2] / 2. * [z_2 - z_1] * [phi_2 - phi_1]
self.volumes = dr2 * dz * dp / 2.
# WALL AREAS
self.areas = np.zeros((6,) + self.shape)
# R walls:
# dA = r * dz * dphi
# A = r * [z 2 - z_1] * [phi_2 - phi_1]
self.areas[0, :, :, :] = gw_wall_min * dz * dp
self.areas[1, :, :, :] = gw_wall_max * dz * dp
# z walls:
# dA = r * dr * dphi
# A = 0.5 * [r_2^2 - r_1^2] * [phi_2 - phi_1]
self.areas[2, :, :, :] = 0.5 * dr2 * dp
self.areas[3, :, :, :] = 0.5 * dr2 * dp
# Phi walls:
# dA = dr * dz
# A = [r_2 - r_1] * [z_2 - z_1]
self.areas[4, :, :, :] = dr * dz
self.areas[5, :, :, :] = dr * dz
# CELL WIDTHS
self.widths = np.zeros((3,) + self.shape)
# R direction:
# dS = dr
# S = r_2 - r_1
self.widths[0, :, :, :] = dr
# z direction:
# dS = dz
# S = [z_2 - z_1]
self.widths[1, :, :, :] = dz
# Phi direction:
# dS = r * dphi
# S = r * [phi_2 - phi_1]
self.widths[2, :, :, :] = self.gw * dp
def __getattr__(self, attribute):
if attribute == 'n_dust':
n_dust = None
for quantity in self.quantities:
n_dust_q, shape_q = single_grid_dims(self.quantities[quantity])
if n_dust is None:
n_dust = n_dust_q
elif n_dust_q is not None:
if n_dust != n_dust_q:
raise ValueError("Not all dust lists in the grid have the same size")
return n_dust
else:
return FreezableClass.__getattribute__(self, attribute)
def _check_array_dimensions(self, array=None):
'''
Check that a grid's array dimensions agree with this grid's metadata
Parameters
----------
array : np.ndarray or list of np.ndarray, optional
The array for which to test the dimensions. If this is not
specified, this method performs a self-consistency check of array
dimensions and meta-data.
'''
n_pop_ref = None
if isinstance(array, CylindricalPolarGridView):
array = array.quantities[array.viewed_quantity]
for quantity in self.quantities:
if array is None:
n_pop, shape = single_grid_dims(self.quantities[quantity])
else:
n_pop, shape = single_grid_dims(array)
if shape != self.shape:
raise ValueError("Quantity arrays do not have the right "
"dimensions: %s instead of %s"
% (shape, self.shape))
if n_pop is not None:
if n_pop_ref is None:
n_pop_ref = n_pop
elif n_pop != n_pop_ref:
raise ValueError("Not all dust lists in the grid have the same size")
def read(self, group, quantities='all'):
'''
Read the geometry and physical quantities from a cylindrical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid from. This group should contain
groups named 'Geometry' and 'Quantities'.
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in geometry
self.read_geometry(group['Geometry'])
# Read in physical quantities
self.read_quantities(group['Quantities'], quantities=quantities)
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def read_geometry(self, group):
'''
Read in geometry information from a cylindrical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid geometry from.
'''
if group.attrs['grid_type'].decode('utf-8') != 'cyl_pol':
raise ValueError("Grid is not cylindrical polar")
self.set_walls(group['walls_1']['w'],
group['walls_2']['z'],
group['walls_3']['p'])
# Check that advertised hash matches real hash
if group.attrs['geometry'].decode('utf-8') != self.get_geometry_id():
raise Exception("Calculated geometry hash does not match hash in file")
def read_quantities(self, group, quantities='all'):
'''
Read in physical quantities from a cylindrical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid quantities from
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in physical quantities
if quantities is not None:
for quantity in group:
if quantities == 'all' or quantity in quantities:
array = np.array(group[quantity])
if array.ndim == 4: # if array is 4D, it is a list of 3D arrays
self.quantities[quantity] = [array[i] for i in range(array.shape[0])]
else:
self.quantities[quantity] = array
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def write(self, group, quantities='all', copy=True, absolute_paths=False, compression=True, wall_dtype=float, physics_dtype=float):
'''
Write out the cylindrical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
quantities : 'all' or list
Which physical quantities to write out. Use 'all' to write out all
quantities or a list of strings to write only specific quantities.
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
# Create HDF5 groups if needed
if 'Geometry' not in group:
g_geometry = group.create_group('Geometry')
else:
g_geometry = group['Geometry']
if 'Quantities' not in group:
g_quantities = group.create_group('Quantities')
else:
g_quantities = group['Quantities']
# Write out geometry
g_geometry.attrs['grid_type'] = np.string_('cyl_pol'.encode('utf-8'))
g_geometry.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
dset = g_geometry.create_dataset("walls_1", data=np.array(list(zip(self.w_wall)), dtype=[('w', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('cm'.encode('utf-8'))
dset = g_geometry.create_dataset("walls_2", data=np.array(list(zip(self.z_wall)), dtype=[('z', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('cm'.encode('utf-8'))
dset = g_geometry.create_dataset("walls_3", data=np.array(list(zip(self.p_wall)), dtype=[('p', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('rad'.encode('utf-8'))
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
# Write out physical quantities
for quantity in self.quantities:
if quantities == 'all' or quantity in quantities:
if isinstance(self.quantities[quantity], h5py.ExternalLink):
link_or_copy(g_quantities, quantity, self.quantities[quantity], copy, absolute_paths=absolute_paths)
else:
dset = g_quantities.create_dataset(quantity, data=self.quantities[quantity],
compression=compression,
dtype=physics_dtype)
dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def write_single_array(self, group, name, array, copy=True, absolute_paths=False, compression=True, physics_dtype=float):
'''
Write out a single quantity, checking for consistency with geometry
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
name : str
The name of the array in the group
array : np.ndarray
The array to write out
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
# Check consistency of array dimensions with grid
self._check_array_dimensions(array)
if isinstance(array, h5py.ExternalLink):
link_or_copy(group, name, array, copy, absolute_paths=absolute_paths)
else:
dset = group.create_dataset(name, data=array,
compression=compression,
dtype=physics_dtype)
dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def get_geometry_id(self):
geo_hash = hashlib.md5()
geo_hash.update(self.w_wall.tostring())
geo_hash.update(self.z_wall.tostring())
geo_hash.update(self.p_wall.tostring())
return geo_hash.hexdigest()
def __getitem__(self, item):
return CylindricalPolarGridView(self, item)
def __setitem__(self, item, value):
if isinstance(value, CylindricalPolarGridView):
if self.w_wall is None and self.z_wall is None and self.p_wall is None:
logger.warn("No geometry in target grid - copying from original grid")
self.set_walls(value.w_wall, value.z_wall, value.p_wall)
self.quantities[item] = deepcopy(value.quantities[value.viewed_quantity])
elif isinstance(value, h5py.ExternalLink):
self.quantities[item] = value
elif value == []:
self.quantities[item] = []
else:
raise ValueError('value should be an empty list, and ExternalLink, or a CylindricalPolarGridView instance')
def __contains__(self, item):
return self.quantities.__contains__(item)
def reset_quantities(self):
self.quantities = {}
def add_derived_quantity(self, name, function):
if name in self.quantities:
raise KeyError(name + ' already exists')
function(self.quantities)
class CylindricalPolarGridView(CylindricalPolarGrid):
def __init__(self, grid, quantity):
self.viewed_quantity = quantity
CylindricalPolarGrid.__init__(self)
self.set_walls(grid.w_wall, grid.z_wall, grid.p_wall)
self.quantities = {quantity: grid.quantities[quantity]}
def append(self, grid):
'''
Used to append quantities from another grid
Parameters
----------
grid : 3D Numpy array or CylindricalPolarGridView instance
The grid to copy the quantity from
'''
if isinstance(grid, CylindricalPolarGridView):
if self.quantities[self.viewed_quantity] is grid.quantities[grid.viewed_quantity]:
raise Exception("Calling append recursively")
if type(grid.quantities[grid.viewed_quantity]) is list:
raise Exception("Can only append a single grid")
self._check_array_dimensions(grid.quantities[grid.viewed_quantity])
self.quantities[self.viewed_quantity].append(deepcopy(grid.quantities[grid.viewed_quantity]))
elif type(grid) is np.ndarray:
self._check_array_dimensions(grid)
self.quantities[self.viewed_quantity].append(deepcopy(grid))
else:
raise ValueError("grid should be a Numpy array or a CylindricalPolarGridView instance")
def add(self, grid):
'''
Used to add quantities from another grid
Parameters
----------
grid : 3D Numpy array or CylindricalPolarGridView instance
The grid to copy the quantity from
'''
if type(self.quantities[self.viewed_quantity]) is list:
raise Exception("need to first specify the item to add to")
if isinstance(grid, CylindricalPolarGridView):
if type(grid.quantities[grid.viewed_quantity]) is list:
raise Exception("need to first specify the item to add")
self._check_array_dimensions(grid.quantities[grid.viewed_quantity])
self.quantities[self.viewed_quantity] += grid.quantities[grid.viewed_quantity]
elif type(grid) is np.ndarray:
self._check_array_dimensions(grid)
self.quantities[self.viewed_quantity] += grid
else:
raise ValueError("grid should be a Numpy array or a CylindricalPolarGridView instance")
def __getitem__(self, item):
if type(item) is int:
grid = CylindricalPolarGridView(self, self.viewed_quantity)
grid.quantities = {grid.viewed_quantity: grid.quantities[grid.viewed_quantity][item]}
return grid
else:
return CylindricalPolarGrid.__getitem__(self, item)
def __getattr__(self, attribute):
if attribute == 'array':
return self.quantities[self.viewed_quantity]
else:
return CylindricalPolarGrid.__getattr__(self, attribute)
|
|
# -*- test-case-name: twisted.application.runner.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted application runner.
"""
__all__ = [
"Runner",
"RunnerOptions",
]
from sys import stderr
from signal import SIGTERM
from os import getpid, kill
from constantly import Names, NamedConstant
from twisted.logger import (
globalLogBeginner, textFileLogObserver,
FilteringLogObserver, LogLevelFilterPredicate,
LogLevel, Logger,
)
from twisted.internet import default as defaultReactor
from ._exit import exit, ExitStatus
class Runner(object):
"""
Twisted application runner.
"""
log = Logger()
def __init__(self, options):
"""
@param options: Configuration options for this runner.
@type options: mapping of L{RunnerOptions} to values
"""
self.options = options
def run(self):
"""
Run this command.
Equivalent to::
self.killIfRequested()
self.writePIDFile()
self.startLogging()
self.startReactor()
self.reactorExited()
self.removePIDFile()
Additional steps may be added over time, but the order won't change.
"""
self.killIfRequested()
self.writePIDFile()
self.startLogging()
self.startReactor()
self.reactorExited()
self.removePIDFile()
def killIfRequested(self):
"""
Kill a running instance of this application if L{RunnerOptions.kill} is
specified and L{True} in C{self.options}.
This requires that L{RunnerOptions.pidFilePath} also be specified;
exit with L{ExitStatus.EX_USAGE} if kill is requested with no PID file.
"""
pidFilePath = self.options.get(RunnerOptions.pidFilePath)
if self.options.get(RunnerOptions.kill, False):
if pidFilePath is None:
exit(ExitStatus.EX_USAGE, "No PID file specified")
return # When testing, patched exit doesn't exit
else:
pid = ""
try:
for pid in pidFilePath.open():
break
except EnvironmentError:
exit(ExitStatus.EX_IOERR, "Unable to read PID file.")
return # When testing, patched exit doesn't exit
try:
pid = int(pid)
except ValueError:
exit(ExitStatus.EX_DATAERR, "Invalid PID file.")
return # When testing, patched exit doesn't exit
self.startLogging()
self.log.info("Terminating process: {pid}", pid=pid)
kill(pid, SIGTERM)
exit(ExitStatus.EX_OK)
return # When testing, patched exit doesn't exit
def writePIDFile(self):
"""
Write a PID file for this application if L{RunnerOptions.pidFilePath}
is specified in C{self.options}.
"""
pidFilePath = self.options.get(RunnerOptions.pidFilePath)
if pidFilePath is not None:
pid = getpid()
pidFilePath.setContent(u"{}\n".format(pid).encode("utf-8"))
def removePIDFile(self):
"""
Remove the PID file for this application if L{RunnerOptions.pidFilePath}
is specified in C{self.options}.
"""
pidFilePath = self.options.get(RunnerOptions.pidFilePath)
if pidFilePath is not None:
pidFilePath.remove()
def startLogging(self):
"""
Start the L{twisted.logger} logging system.
"""
logFile = self.options.get(RunnerOptions.logFile, stderr)
fileLogObserverFactory = self.options.get(
RunnerOptions.fileLogObserverFactory, textFileLogObserver
)
fileLogObserver = fileLogObserverFactory(logFile)
logLevelPredicate = LogLevelFilterPredicate(
defaultLogLevel=self.options.get(
RunnerOptions.defaultLogLevel, LogLevel.info
)
)
filteringObserver = FilteringLogObserver(
fileLogObserver, [logLevelPredicate]
)
globalLogBeginner.beginLoggingTo([filteringObserver])
def startReactor(self):
"""
Register C{self.whenRunning} with the reactor so that it is called once
the reactor is running and start the reactor.
If L{RunnerOptions.reactor} is specified in C{self.options}, use that
reactor; otherwise use the default reactor.
"""
reactor = self.options.get(RunnerOptions.reactor)
if reactor is None:
reactor = defaultReactor
reactor.install()
self.options[RunnerOptions.reactor] = reactor
reactor.callWhenRunning(self.whenRunning)
self.log.info("Starting reactor...")
reactor.run()
def whenRunning(self):
"""
If L{RunnerOptions.whenRunning} is specified in C{self.options}, call
it.
@note: This method is called when the reactor is running.
"""
whenRunning = self.options.get(RunnerOptions.whenRunning)
if whenRunning is not None:
whenRunning(self.options)
def reactorExited(self):
"""
If L{RunnerOptions.reactorExited} is specified in C{self.options}, call
it.
@note: This method is called after the reactor has exited.
"""
reactorExited = self.options.get(RunnerOptions.reactorExited)
if reactorExited is not None:
reactorExited(self.options)
class RunnerOptions(Names):
"""
Names for options recognized by L{Runner}.
These are meant to be used as keys in the options given to L{Runner}, with
corresponding values as noted below.
@cvar reactor: The reactor to start.
Corresponding value: L{IReactorCore}.
@type reactor: L{NamedConstant}
@cvar pidFilePath: The path to the PID file.
Corresponding value: L{IFilePath}.
@type pidFilePath: L{NamedConstant}
@cvar kill: Whether this runner should kill an existing running instance.
Corresponding value: L{bool}.
@type kill: L{NamedConstant}
@cvar defaultLogLevel: The default log level to start the logging system
with.
Corresponding value: L{NamedConstant} from L{LogLevel}.
@type defaultLogLevel: L{NamedConstant}
@cvar logFile: A file stream to write logging output to.
Corresponding value: writable file like object.
@type logFile: L{NamedConstant}
@cvar fileLogObserverFactory: What file log observer to use when starting
the logging system.
Corresponding value: callable that returns a
L{twisted.logger.FileLogObserver}
@type fileLogObserverFactory: L{NamedConstant}
@cvar whenRunning: Hook to call when the reactor is running.
This can be considered the Twisted equivalent to C{main()}.
Corresponding value: callable that takes the options mapping given to
the runner as an argument.
@type whenRunning: L{NamedConstant}
@cvar reactorExited: Hook to call when the reactor has exited.
Corresponding value: callable that takes an empty arguments list
@type reactorExited: L{NamedConstant}
"""
reactor = NamedConstant()
pidFilePath = NamedConstant()
kill = NamedConstant()
defaultLogLevel = NamedConstant()
logFile = NamedConstant()
fileLogObserverFactory = NamedConstant()
whenRunning = NamedConstant()
reactorExited = NamedConstant()
|
|
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Creation, renaming and deletion configuration testcases
List of tested functions :
--------------------------
- [listConfigurations] function
- [createConfiguration] function
- [deleteConfiguration] function
- [renameConfiguration] function
Test cases :
------------
- Testing configuration creation error
- Testing configuration renaming error
- Testing configuration deletion error
- Testing nominal case
"""
import os
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of Domains - Rename
class TestCases(PfwTestCase):
def setUp(self):
self.pfw.sendCmd("setTuningMode", "on")
self.domain_name = "domain_test"
self.conf_test = "conf_white"
self.conf_test_renamed = "conf_black"
self.new_conf_number = 5
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Conf_Creation_Error(self):
"""
Testing configuration creation error
------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Create an already existent configuration
- Create a configuration with no name specified
- Create a configuration on a wrong domain name
Tested commands :
~~~~~~~~~~~~~~~~~
- [createConfiguration] function
- [createDomain] function
- [listConfigurations] function
- [deleteConfiguration] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- no configuration created
- existent configurations not affected by error
"""
log.D(self.test_Conf_Creation_Error.__doc__)
# New domain creation for testing purpose
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation for testing purpose
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Domain configurations listing backup
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations_backup = open("f_configurations_backup", "w")
f_configurations_backup.write(out)
f_configurations_backup.close()
# New configurations creation error
log.I("Creating an already existent configurations names")
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("Trying to create already existent %s configuration for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating already existent configuration %s" % (new_conf_name)
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("error correctly detected, no configuration created")
log.I("Creating a configuration without specifying a name")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name)
assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating a configuration without specifying a name"
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration"
log.I("error correctly detected")
log.I("Creating a configuration on a wrong domain name")
new_conf_name = "new_conf"
out, err = self.pfw.sendCmd("createConfiguration","wrong_domain_name",new_conf_name)
assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating a configuration on a wrong domain name"
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration"
log.I("error correctly detected")
# New domain configurations listing
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]" )
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
f_configurations_backup = open("f_configurations_backup", "r")
for iteration in range(self.new_conf_number):
listed_conf_backup = f_configurations_backup.readline().strip('\n')
listed_conf = f_configurations.readline().strip('\n')
assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf)
log.I("No change detected, listed configurations names conform to expected values")
# New domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp files
f_configurations_backup.close()
os.remove("f_configurations_backup")
f_configurations.close()
os.remove("f_configurations")
def test_Conf_Renaming_Error(self):
"""
Testing configuration renaming error
------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Rename a configuration with an already used name
- Rename a configuration with no name specified
- Rename a configuration on a wrong domain name
Tested commands :
~~~~~~~~~~~~~~~~~
- [renameConfiguration] function
- [createDomain] function
- [listConfigurations] function
- [createConfiguration] function
- [deleteConfiguration] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- no configuration created
- existent configurations not affected by error
"""
log.D(self.test_Conf_Renaming_Error.__doc__)
# New domain creation for testing purpose
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation for testing purpose
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Domain configurations listing backup
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations_backup = open("f_configurations_backup", "w")
f_configurations_backup.write(out)
f_configurations_backup.close()
# New configurations renaming error
log.I("renaming a configuration with an already used name")
for iteration in range (self.new_conf_number-1):
conf_name = "".join([self.conf_test, "_", str(iteration)])
new_conf_name = "".join([self.conf_test, "_", str(iteration+1)])
log.I("Trying to rename %s on domain %s with an already used name : %s" % (conf_name,self.domain_name,new_conf_name))
log.I("command [renameConfiguration]" )
out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,conf_name,new_conf_name)
assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming configuration %s with an already used name" % (new_conf_name)
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration %s" % (new_conf_name)
log.I("command [renameConfiguration] correctly executed")
log.I("error correctly detected, no configuration renamed")
log.I("renaming a configuration without specifying a new name")
out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,new_conf_name)
assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming a configuration without specifying a new name"
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration"
log.I("error correctly detected, no configuration renamed")
log.I("renaming a configuration on a wrong domain name")
new_conf_name = "new_conf"
out, err = self.pfw.sendCmd("renameConfiguration","wrong_domain_name",new_conf_name,"Configuration")
assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming a configuration on a wrong domain name"
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration"
log.I("error correctly detected, no configuration renamed")
# New domain configurations listing
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
f_configurations_backup = open("f_configurations_backup", "r")
for iteration in range(self.new_conf_number):
listed_conf_backup = f_configurations_backup.readline().strip('\n')
listed_conf = f_configurations.readline().strip('\n')
assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf)
log.I("No change detected, listed configurations names conform to expected values")
# Testing domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp files
f_configurations_backup.close()
os.remove("f_configurations_backup")
f_configurations.close()
os.remove("f_configurations")
def test_Conf_Deletion_Error(self):
"""
Testing configuration deletion error
------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Delete a configuration with a non existent name
- Delete a configuration with no name specified
- Delete a configuration on a wrong domain name
Tested commands :
~~~~~~~~~~~~~~~~~
- [deleteConfiguration] function
- [createDomain] function
- [listConfigurations] function
- [createConfiguration] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- no configuration created
- existent configurations not affected by error
"""
print self.test_Conf_Renaming_Error.__doc__
# New domain creation for testing purpose
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation for testing purpose
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Domain configurations listing backup
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations_backup = open("f_configurations_backup", "w")
f_configurations_backup.write(out)
f_configurations_backup.close()
# Configurations deletion errors
log.I("Trying various deletions error test cases")
log.I("Trying to delete a wrong configuration name on domain %s" % (self.domain_name))
log.I("command [deleteConfiguration]")
out, err = self.pfw.sendCmd("deleteConfiguration",self.domain_name,"wrong_configuration_name")
assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting non existent configuration name"
assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration"
log.I("command [deleteConfiguration] correctly executed")
log.I("error correctly detected, no configuration deleted")
log.I("deleting a configuration with no name specified")
out, err = self.pfw.sendCmd("deleteConfiguration",self.domain_name)
assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting a configuration without specifying a name"
assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration"
log.I("error correctly detected, no configuration deleted")
log.I("deleting a configuration on a wrong domain name")
out, err = self.pfw.sendCmd("deleteConfiguration","wrong_domain_name",new_conf_name)
assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting a configuration on a wrong domain name"
assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration"
log.I("error correctly detected, no configuration deleted")
# New domain configurations listing
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
f_configurations_backup = open("f_configurations_backup", "r")
for iteration in range(self.new_conf_number):
listed_conf_backup = f_configurations_backup.readline().strip('\n')
listed_conf = f_configurations.readline().strip('\n')
assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf)
log.I("No change detected, listed configurations names conform to expected values")
# Testing domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp files
f_configurations_backup.close()
os.remove("f_configurations_backup")
f_configurations.close()
os.remove("f_configurations")
def test_Nominal_Case(self):
"""
Testing nominal cases
---------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Create new configurations
- List domain configurations
- Rename configurations
- Delete configurations
Tested commands :
~~~~~~~~~~~~~~~~~
- [listConfigurations] function
- [createConfiguration] function
- [renameConfiguration] function
- [deleteConfiguration] function
- [createDomain] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- all operations succeed
"""
log.D(self.test_Nominal_Case.__doc__)
# New domain creation
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]" )
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Listing domain configurations
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
for iteration in range(self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
listed_conf = f_configurations.readline().strip('\n')
assert listed_conf==new_conf_name, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf, new_conf_name)
log.I("Listed configurations names conform to expected values")
# Configuration renaming
log.I("Configurations renaming")
for iteration in range (self.new_conf_number):
conf_name = "".join([self.conf_test, "_", str(iteration)])
new_conf_name = "".join([self.conf_test_renamed, "_", str(iteration)])
log.I("Configuration %s renamed to %s in domain %s" % (conf_name,new_conf_name,self.domain_name))
log.I("command [renameConfiguration]")
out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,conf_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration %s to %s" % (conf_name,new_conf_name)
log.I("command [renameConfiguration] correctly executed")
log.I("Configuration %s renamed to %s for domain %s" % (conf_name,new_conf_name,self.domain_name))
# Listing domain configurations
log.I("Configurations listing to check configurations renaming")
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations_renamed = open("f_configurations_renamed", "w")
f_configurations_renamed.write(out)
f_configurations_renamed.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations_renamed = open("f_configurations_renamed", "r")
for iteration in range(self.new_conf_number):
new_conf_name = "".join([self.conf_test_renamed, "_", str(iteration)])
listed_conf = f_configurations_renamed.readline().strip('\n')
assert listed_conf==new_conf_name, "ERROR : Error while renaming configuration %s (found %s)" % (new_conf_name,listed_conf)
log.I("Listed configurations names conform to expected values, renaming successfull")
# New domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp file
f_configurations.close()
os.remove("f_configurations")
f_configurations_renamed.close()
os.remove("f_configurations_renamed")
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import collections
import itertools
import operator
import types
from .common import * # pylint: disable=redefined-builtin
from .datastructures import Context
from .exceptions import *
from .undefined import Undefined
from .util import listify
try:
from collections import OrderedDict
except ImportError:
from .datastructures import OrderedDict
###
# Transform loops
###
def import_loop(cls, instance_or_dict, field_converter=None, trusted_data=None,
mapping=None, partial=False, strict=False, init_values=False,
apply_defaults=False, convert=True, validate=False, new=False,
oo=False, recursive=False, app_data=None, context=None):
"""
The import loop is designed to take untrusted data and convert it into the
native types, as described in ``cls``. It does this by calling
``field_converter`` on every field.
Errors are aggregated and returned by throwing a ``ModelConversionError``.
:param cls:
The class for the model.
:param instance_or_dict:
A dict of data to be converted into types according to ``cls``.
:param field_converter:
This function is applied to every field found in ``instance_or_dict``.
:param trusted_data:
A ``dict``-like structure that may contain already validated data.
:param partial:
Allow partial data to validate; useful for PATCH requests.
Essentially drops the ``required=True`` arguments from field
definitions. Default: False
:param strict:
Complain about unrecognized keys. Default: False
:param apply_defaults:
Whether to set fields to their default values when not present in input data.
:param app_data:
An arbitrary container for application-specific data that needs to
be available during the conversion.
:param context:
A ``Context`` object that encapsulates configuration options and ``app_data``.
The context object is created upon the initial invocation of ``import_loop``
and is then propagated through the entire process.
"""
if instance_or_dict is None:
got_data = False
else:
got_data = True
if got_data and not isinstance(instance_or_dict, (cls, dict)):
raise ConversionError('Model conversion requires a model or dict')
context = Context._make(context)
try:
context.initialized
except:
if type(field_converter) is types.FunctionType:
field_converter = BasicConverter(field_converter)
context._setdefaults({
'initialized': True,
'field_converter': field_converter,
'trusted_data': trusted_data or {},
'mapping': mapping or {},
'partial': partial,
'strict': strict,
'init_values': init_values,
'apply_defaults': apply_defaults,
'convert': convert,
'validate': validate,
'new': new,
'oo': oo,
'recursive': recursive,
'app_data': app_data if app_data is not None else {}
})
instance_or_dict = context.field_converter.pre(cls, instance_or_dict, context)
_field_converter = context.field_converter
_model_mapping = context.mapping.get('model_mapping')
data = dict(context.trusted_data) if context.trusted_data else {}
errors = {}
if got_data:
# Determine all acceptable field input names
all_fields = cls._valid_input_keys
if context.mapping:
mapped_keys = (set(itertools.chain(*(
listify(input_keys) for target_key, input_keys in context.mapping.items()
if target_key != 'model_mapping'))))
all_fields = all_fields | mapped_keys
if context.strict:
# Check for rogues if strict is set
rogue_fields = set(instance_or_dict) - all_fields
if rogue_fields:
for field in rogue_fields:
errors[field] = 'Rogue field'
for field_name, field in cls._field_list:
value = Undefined
serialized_field_name = field.serialized_name or field_name
if got_data:
for key in field.get_input_keys(context.mapping):
if key and key in instance_or_dict:
value = instance_or_dict[key]
break
if value is Undefined:
if field_name in data:
continue
if context.apply_defaults:
value = field.default
if value is Undefined and context.init_values:
value = None
if got_data:
if field.is_compound:
if context.trusted_data and context.recursive:
td = context.trusted_data.get(field_name)
else:
td = {}
if _model_mapping:
submap = _model_mapping.get(field_name)
else:
submap = {}
field_context = context._branch(trusted_data=td, mapping=submap)
else:
field_context = context
try:
value = _field_converter(field, value, field_context)
except (FieldError, CompoundError) as exc:
errors[serialized_field_name] = exc
if isinstance(exc, DataError):
data[field_name] = exc.partial_data
continue
if value is Undefined:
continue
data[field_name] = value
if errors:
raise DataError(errors, data)
data = context.field_converter.post(cls, data, context)
return data
def export_loop(cls, instance_or_dict, field_converter=None, role=None, raise_error_on_role=True,
export_level=None, app_data=None, context=None):
"""
The export_loop function is intended to be a general loop definition that
can be used for any form of data shaping, such as application of roles or
how a field is transformed.
:param cls:
The model definition.
:param instance_or_dict:
The structure where fields from cls are mapped to values. The only
expectionation for this structure is that it implements a ``dict``
interface.
:param field_converter:
This function is applied to every field found in ``instance_or_dict``.
:param role:
The role used to determine if fields should be left out of the
transformation.
:param raise_error_on_role:
This parameter enforces strict behavior which requires substructures
to have the same role definition as their parent structures.
:param app_data:
An arbitrary container for application-specific data that needs to
be available during the conversion.
:param context:
A ``Context`` object that encapsulates configuration options and ``app_data``.
The context object is created upon the initial invocation of ``import_loop``
and is then propagated through the entire process.
"""
context = Context._make(context)
try:
context.initialized
except:
if type(field_converter) is types.FunctionType:
field_converter = BasicConverter(field_converter)
context._setdefaults({
'initialized': True,
'field_converter': field_converter,
'role': role,
'raise_error_on_role': raise_error_on_role,
'export_level': export_level,
'app_data': app_data if app_data is not None else {}
})
instance_or_dict = context.field_converter.pre(cls, instance_or_dict, context)
if cls._options.export_order:
data = OrderedDict()
else:
data = {}
filter_func = cls._options.roles.get(context.role)
if filter_func is None:
if context.role and context.raise_error_on_role:
error_msg = '%s Model has no role "%s"'
raise ValueError(error_msg % (cls.__name__, context.role))
else:
filter_func = cls._options.roles.get("default")
_field_converter = context.field_converter
for field_name, field, value in atoms(cls, instance_or_dict):
serialized_name = field.serialized_name or field_name
if filter_func is not None and filter_func(field_name, value):
continue
_export_level = field.get_export_level(context)
if _export_level == DROP:
continue
elif value not in (None, Undefined):
value = _field_converter(field, value, context)
if value is Undefined:
if _export_level <= DEFAULT:
continue
elif value is None:
if _export_level <= NOT_NONE:
continue
elif field.is_compound and len(value) == 0:
if _export_level <= NONEMPTY:
continue
if value is Undefined:
value = None
data[serialized_name] = value
data = context.field_converter.post(cls, data, context)
return data
def atoms(cls, instance_or_dict):
"""
Iterator for the atomic components of a model definition and relevant
data that creates a 3-tuple of the field's name, its type instance and
its value.
:param cls:
The model definition.
:param instance_or_dict:
The structure where fields from cls are mapped to values. The only
expectation for this structure is that it implements a ``Mapping``
interface.
"""
field_getter = serializable_getter = instance_or_dict.get
try:
field_getter = instance_or_dict._data.get
except AttributeError:
pass
sequences = ((cls._field_list, field_getter),
(cls._serializables.items(), serializable_getter))
for sequence, get in sequences:
for field_name, field in sequence:
yield (field_name, field, get(field_name, Undefined))
###
# Field filtering
###
@str_compat
class Role(collections.Set):
"""
A ``Role`` object can be used to filter specific fields against a sequence.
The ``Role`` is two things: a set of names and a function. The function
describes how filter taking a field name as input and then returning either
``True`` or ``False``, indicating that field should or should not be
skipped.
A ``Role`` can be operated on as a ``Set`` object representing the fields
is has an opinion on. When Roles are combined with other roles, the
filtering behavior of the first role is used.
"""
def __init__(self, function, fields):
self.function = function
self.fields = set(fields)
def _from_iterable(self, iterable):
return Role(self.function, iterable)
def __contains__(self, value):
return value in self.fields
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __eq__(self, other):
return (self.function.__name__ == other.function.__name__ and
self.fields == other.fields)
def __str__(self):
return '%s(%s)' % (self.function.__name__,
', '.join("'%s'" % f for f in self.fields))
def __repr__(self):
return '<Role %s>' % str(self)
# edit role fields
def __add__(self, other):
fields = self.fields.union(other)
return self._from_iterable(fields)
def __sub__(self, other):
fields = self.fields.difference(other)
return self._from_iterable(fields)
# apply role to field
def __call__(self, name, value):
return self.function(name, value, self.fields)
# static filter functions
@staticmethod
def wholelist(name, value, seq):
"""
Accepts a field name, value, and a field list. This functions
implements acceptance of all fields by never requesting a field be
skipped, thus returns False for all input.
:param name:
The field name to inspect.
:param value:
The field's value.
:param seq:
The list of fields associated with the ``Role``.
"""
return False
@staticmethod
def whitelist(name, value, seq):
"""
Implements the behavior of a whitelist by requesting a field be skipped
whenever it's name is not in the list of fields.
:param name:
The field name to inspect.
:param value:
The field's value.
:param seq:
The list of fields associated with the ``Role``.
"""
if seq is not None and len(seq) > 0:
return name not in seq
return True
@staticmethod
def blacklist(name, value, seq):
"""
Implements the behavior of a blacklist by requesting a field be skipped
whenever it's name is found in the list of fields.
:param k:
The field name to inspect.
:param v:
The field's value.
:param seq:
The list of fields associated with the ``Role``.
"""
if seq is not None and len(seq) > 0:
return name in seq
return False
def wholelist(*field_list):
"""
Returns a function that evicts nothing. Exists mainly to be an explicit
allowance of all fields instead of a using an empty blacklist.
"""
return Role(Role.wholelist, field_list)
def whitelist(*field_list):
"""
Returns a function that operates as a whitelist for the provided list of
fields.
A whitelist is a list of fields explicitly named that are allowed.
"""
return Role(Role.whitelist, field_list)
def blacklist(*field_list):
"""
Returns a function that operates as a blacklist for the provided list of
fields.
A blacklist is a list of fields explicitly named that are not allowed.
"""
return Role(Role.blacklist, field_list)
###
# Field converter interface
###
class Converter(object):
def __call__(self, field, value, context):
raise NotImplementedError
def pre(self, model_class, instance_or_dict, context):
return instance_or_dict
def post(self, model_class, data, context):
return data
class BasicConverter(Converter):
def __init__(self, func):
self.func = func
def __call__(self, *args):
return self.func(*args)
###
# Standard export converters
###
@BasicConverter
def to_native_converter(field, value, context):
return field.export(value, NATIVE, context)
@BasicConverter
def to_primitive_converter(field, value, context):
return field.export(value, PRIMITIVE, context)
###
# Standard import converters
###
@BasicConverter
def import_converter(field, value, context):
field.check_required(value, context)
if value in (None, Undefined):
return value
return field.convert(value, context)
@BasicConverter
def validation_converter(field, value, context):
field.check_required(value, context)
if value in (None, Undefined):
return value
return field.validate(value, context)
###
# Context stub factories
###
def get_import_context(field_converter=import_converter, **options):
import_options = {
'field_converter': field_converter,
'partial': False,
'strict': False,
'convert': True,
'validate': False,
'new': False,
'oo': False
}
import_options.update(options)
return Context(**import_options)
def get_export_context(field_converter=to_native_converter, **options):
export_options = {
'field_converter': field_converter,
'export_level': None
}
export_options.update(options)
return Context(**export_options)
###
# Import and export functions
###
def convert(cls, instance_or_dict, **kwargs):
return import_loop(cls, instance_or_dict, import_converter, **kwargs)
def to_native(cls, instance_or_dict, **kwargs):
return export_loop(cls, instance_or_dict, to_native_converter, **kwargs)
def to_primitive(cls, instance_or_dict, **kwargs):
return export_loop(cls, instance_or_dict, to_primitive_converter, **kwargs)
__all__ = module_exports(__name__)
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception as boto_exception
from neutronclient.common import exceptions as neutron_exceptions
from saharaclient.api import base as saharaclient_base
from rally.common import log as logging
from rally.plugins.openstack.context.cleanup import base
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.keystone import utils as kutils
from rally.plugins.openstack.wrappers import keystone as keystone_wrapper
LOG = logging.getLogger(__name__)
def get_order(start):
return iter(range(start, start + 99))
class SynchronizedDeletion(object):
def is_deleted(self):
return True
class QuotaMixin(SynchronizedDeletion):
def id(self):
return self.raw_resource
def delete(self):
self._manager().delete(self.raw_resource)
def list(self):
return [self.tenant_uuid] if self.tenant_uuid else []
# HEAT
@base.resource("heat", "stacks", order=100, tenant_resource=True)
class HeatStack(base.ResourceManager):
pass
# NOVA
_nova_order = get_order(200)
@base.resource("nova", "servers", order=next(_nova_order))
class NovaServer(base.ResourceManager):
def list(self):
"""List all servers."""
if hasattr(self._manager().api, "api_version"):
# NOTE(andreykurilin): novaclient v2.27.0 includes ability to
# return all servers(see https://review.openstack.org/#/c/217101
# for more details). This release can be identified by presence
# of "api_version" property of ``novaclient.client.Client`` cls.
return self._manager().list(limit=-1)
else:
# FIXME(andreykurilin): Remove code below, when minimum version of
# novaclient in requirements will allow it.
# NOTE(andreykurilin): Nova API returns only limited number(
# 'osapi_max_limit' option in nova.conf) of servers, so we need
# to use 'marker' option to list all pages of servers.
result = []
marker = None
while True:
servers = self._manager().list(marker=marker)
if not servers:
break
result.extend(servers)
marker = servers[-1].id
return result
def delete(self):
if getattr(self.raw_resource, "OS-EXT-STS:locked", False):
self.raw_resource.unlock()
super(NovaServer, self).delete()
@base.resource("nova", "floating_ips", order=next(_nova_order))
class NovaFloatingIPs(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "keypairs", order=next(_nova_order))
class NovaKeypair(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "security_groups", order=next(_nova_order))
class NovaSecurityGroup(SynchronizedDeletion, base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "default",
super(NovaSecurityGroup, self).list())
@base.resource("nova", "quotas", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaQuotas(QuotaMixin, base.ResourceManager):
pass
@base.resource("nova", "floating_ips_bulk", order=next(_nova_order),
admin_required=True)
class NovaFloatingIpsBulk(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.address
def list(self):
return [floating_ip for floating_ip in self._manager().list()
if floating_ip.pool.startswith("rally_fip_pool_")]
@base.resource("nova", "networks", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaNetworks(SynchronizedDeletion, base.ResourceManager):
def list(self):
return [net for net in self._manager().list()
if net.label.startswith("rally_novanet")]
# EC2
_ec2_order = get_order(250)
class EC2Mixin(object):
def _manager(self):
return getattr(self.user, self._service)()
@base.resource("ec2", "servers", order=next(_ec2_order))
class EC2Server(EC2Mixin, base.ResourceManager):
def is_deleted(self):
try:
instances = self._manager().get_only_instances(
instance_ids=[self.id()])
except boto_exception.EC2ResponseError as e:
# NOTE(wtakase): Nova EC2 API returns 'InvalidInstanceID.NotFound'
# if instance not found. In this case, we consider
# instance has already been deleted.
return getattr(e, "error_code") == "InvalidInstanceID.NotFound"
# NOTE(wtakase): After instance deletion, instance can be 'terminated'
# state. If all instance states are 'terminated', this
# returns True. And if get_only_instaces() returns empty
# list, this also returns True because we consider
# instance has already been deleted.
return all(map(lambda i: i.state == "terminated", instances))
def delete(self):
self._manager().terminate_instances(instance_ids=[self.id()])
def list(self):
return self._manager().get_only_instances()
# NEUTRON
_neutron_order = get_order(300)
@base.resource(service=None, resource=None, admin_required=True)
class NeutronMixin(SynchronizedDeletion, base.ResourceManager):
# Neutron has the best client ever, so we need to override everything
def supports_extension(self, extension):
exts = self._manager().list_extensions().get("extensions", [])
if any(ext.get("alias") == extension for ext in exts):
return True
return False
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource["id"]
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(lambda r: r["tenant_id"] == self.tenant_uuid,
list_method({"tenant_id": self.tenant_uuid})[resources])
class NeutronLbaasV1Mixin(NeutronMixin):
def list(self):
if self.supports_extension("lbaas"):
return super(NeutronLbaasV1Mixin, self).list()
return []
@base.resource("neutron", "vip", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Vip(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "pool", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Pool(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "port", order=next(_neutron_order),
tenant_resource=True)
class NeutronPort(NeutronMixin):
def delete(self):
if (self.raw_resource["device_owner"] == "network:router_interface" or
self.raw_resource["device_owner"] ==
"network:router_interface_distributed"):
self._manager().remove_interface_router(
self.raw_resource["device_id"],
{"port_id": self.raw_resource["id"]})
else:
try:
self._manager().delete_port(self.id())
except neutron_exceptions.PortNotFoundClient:
# Port can be already auto-deleted, skip silently
LOG.debug("Port %s was not deleted. Skip silently because "
"port can be already auto-deleted."
% self.id())
@base.resource("neutron", "router", order=next(_neutron_order),
tenant_resource=True)
class NeutronRouter(NeutronMixin):
pass
@base.resource("neutron", "subnet", order=next(_neutron_order),
tenant_resource=True)
class NeutronSubnet(NeutronMixin):
pass
@base.resource("neutron", "network", order=next(_neutron_order),
tenant_resource=True)
class NeutronNetwork(NeutronMixin):
pass
@base.resource("neutron", "floatingip", order=next(_neutron_order),
tenant_resource=True)
class NeutronFloatingIP(NeutronMixin):
pass
@base.resource("neutron", "quota", order=next(_neutron_order),
admin_required=True, tenant_resource=True)
class NeutronQuota(QuotaMixin, NeutronMixin):
def delete(self):
self._manager().delete_quota(self.tenant_uuid)
# CINDER
_cinder_order = get_order(400)
@base.resource("cinder", "backups", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeBackup(base.ResourceManager):
pass
@base.resource("cinder", "volume_snapshots", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeSnapshot(base.ResourceManager):
pass
@base.resource("cinder", "transfers", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeTransfer(base.ResourceManager):
pass
@base.resource("cinder", "volumes", order=next(_cinder_order),
tenant_resource=True)
class CinderVolume(base.ResourceManager):
pass
@base.resource("cinder", "quotas", order=next(_cinder_order),
admin_required=True, tenant_resource=True)
class CinderQuotas(QuotaMixin, base.ResourceManager):
pass
# MANILA
_manila_order = get_order(450)
@base.resource("manila", "shares", order=next(_manila_order),
tenant_resource=True)
class ManilaShare(base.ResourceManager):
pass
@base.resource("manila", "share_networks", order=next(_manila_order),
tenant_resource=True)
class ManilaShareNetwork(base.ResourceManager):
pass
@base.resource("manila", "security_services", order=next(_manila_order),
tenant_resource=True)
class ManilaSecurityService(base.ResourceManager):
pass
# GLANCE
@base.resource("glance", "images", order=500, tenant_resource=True)
class GlanceImage(base.ResourceManager):
def list(self):
return self._manager().list(owner=self.tenant_uuid)
# SAHARA
_sahara_order = get_order(600)
@base.resource("sahara", "job_executions", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "jobs", order=next(_sahara_order),
tenant_resource=True)
class SaharaJob(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binary_internals", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binaries", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "data_sources", order=next(_sahara_order),
tenant_resource=True)
class SaharaDataSource(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "clusters", order=next(_sahara_order),
tenant_resource=True)
class SaharaCluster(base.ResourceManager):
# Need special treatment for Sahara Cluster because of the way the
# exceptions are described in:
# https://github.com/openstack/python-saharaclient/blob/master/
# saharaclient/api/base.py#L145
def is_deleted(self):
try:
self._manager().get(self.id())
return False
except saharaclient_base.APIException as e:
return e.error_code == 404
@base.resource("sahara", "cluster_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "node_group_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager):
pass
# CEILOMETER
@base.resource("ceilometer", "alarms", order=700, tenant_resource=True)
class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.alarm_id
def list(self):
query = [{
"field": "project_id",
"op": "eq",
"value": self.tenant_uuid
}]
return self._manager().list(q=query)
# ZAQAR
@base.resource("zaqar", "queues", order=800)
class ZaqarQueues(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self.user.zaqar().queues()
# DESIGNATE
_designate_order = get_order(900)
@base.resource("designate", "domains", order=next(_designate_order))
class Designate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("designate", "servers", order=next(_designate_order),
admin_required=True, perform_for_admin_only=True)
class DesignateServer(SynchronizedDeletion, base.ResourceManager):
pass
# SWIFT
_swift_order = get_order(1000)
class SwiftMixin(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
# NOTE(weiwu): *self.raw_resource is required because for deleting
# container we are passing only container name, to delete object we
# should pass as first argument container and second is object name.
delete_method(*self.raw_resource)
@base.resource("swift", "object", order=next(_swift_order),
tenant_resource=True)
class SwiftObject(SwiftMixin):
def list(self):
object_list = []
containers = self._manager().get_account(full_listing=True)[1]
for con in containers:
objects = self._manager().get_container(con["name"],
full_listing=True)[1]
for obj in objects:
raw_resource = [con["name"], obj["name"]]
object_list.append(raw_resource)
return object_list
@base.resource("swift", "container", order=next(_swift_order),
tenant_resource=True)
class SwiftContainer(SwiftMixin):
def list(self):
containers = self._manager().get_account(full_listing=True)[1]
return [[con["name"]] for con in containers]
# MISTRAL
@base.resource("mistral", "workbooks", order=1100, tenant_resource=True)
class MistralWorkbooks(SynchronizedDeletion, base.ResourceManager):
def delete(self):
self._manager().delete(self.raw_resource.name)
# MURANO
_murano_order = get_order(1200)
@base.resource("murano", "environments", tenant_resource=True,
order=next(_murano_order))
class MuranoEnvironments(base.ResourceManager):
pass
@base.resource("murano", "packages", tenant_resource=True,
order=next(_murano_order))
class MuranoPackages(base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "Core library",
super(MuranoPackages, self).list())
# IRONIC
_ironic_order = get_order(1300)
@base.resource("ironic", "node", admin_required=True,
order=next(_ironic_order), perform_for_admin_only=True)
class IronicNodes(base.ResourceManager):
def id(self):
return self.raw_resource.uuid
# FUEL
@base.resource("fuel", "environment", order=1400,
admin_required=True, perform_for_admin_only=True)
class FuelEnvironment(base.ResourceManager):
"""Fuel environment.
That is the only resource that can be deleted by fuelclient explicitly.
"""
def id(self):
return self.raw_resource["id"]
def is_deleted(self):
return not self._manager().get(self.id())
def list(self):
return [env for env in self._manager().list()
if env["name"].startswith(
scenario.OpenStackScenario.RESOURCE_NAME_PREFIX)]
# KEYSTONE
_keystone_order = get_order(9000)
class KeystoneMixin(SynchronizedDeletion):
def _manager(self):
return keystone_wrapper.wrap(getattr(self.admin, self._service)())
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
# TODO(boris-42): We should use such stuff in all list commands.
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(kutils.is_temporary, list_method())
@base.resource("keystone", "user", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneUser(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "project", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneProject(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "service", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneService(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "role", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneRole(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "ec2", tenant_resource=True,
order=next(_keystone_order))
class KeystoneEc2(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self._manager().list(self.raw_resource)
|
|
"""HTTP wrapper for apitools.
This library wraps the underlying http library we use, which is
currently :mod:`httplib2`.
"""
import collections
import contextlib
import logging
import socket
import time
import httplib2
import six
from six.moves import http_client
from six.moves.urllib import parse
from gcloud.streaming.exceptions import BadStatusCodeError
from gcloud.streaming.exceptions import RequestError
from gcloud.streaming.exceptions import RetryAfterError
from gcloud.streaming.util import calculate_wait_for_retry
# 308 and 429 don't have names in httplib.
RESUME_INCOMPLETE = 308
TOO_MANY_REQUESTS = 429
_REDIRECT_STATUS_CODES = (
http_client.MOVED_PERMANENTLY,
http_client.FOUND,
http_client.SEE_OTHER,
http_client.TEMPORARY_REDIRECT,
RESUME_INCOMPLETE,
)
_RETRYABLE_EXCEPTIONS = (
http_client.BadStatusLine,
http_client.IncompleteRead,
http_client.ResponseNotReady,
socket.error,
httplib2.ServerNotFoundError,
ValueError,
RequestError,
BadStatusCodeError,
RetryAfterError,
)
class _ExceptionRetryArgs(
collections.namedtuple(
'_ExceptionRetryArgs',
['http', 'http_request', 'exc', 'num_retries', 'max_retry_wait'])):
"""Bundle of information for retriable exceptions.
:type http: :class:`httplib2.Http` (or conforming alternative)
:param http: instance used to perform requests.
:type http_request: :class:`Request`
:param http_request: the request whose response was a retriable error
:type exc: :class:`Exception` subclass
:param exc: the exception being raised.
:type num_retries: integer
:param num_retries: Number of retries consumed; used for exponential
backoff.
"""
@contextlib.contextmanager
def _httplib2_debug_level(http_request, level, http=None):
"""Temporarily change the value of httplib2.debuglevel, if necessary.
If http_request has a `loggable_body` distinct from `body`, then we
need to prevent httplib2 from logging the full body. This sets
httplib2.debuglevel for the duration of the `with` block; however,
that alone won't change the value of existing HTTP connections. If
an httplib2.Http object is provided, we'll also change the level on
any cached connections attached to it.
:type http_request: :class:`Request`
:param http_request: the request to be logged.
:type level: integer
:param level: the debuglevel for logging.
:type http: :class:`httplib2.Http`, or ``None``
:param http: the instance on whose connections to set the debuglevel.
"""
if http_request.loggable_body is None:
yield
return
old_level = httplib2.debuglevel
http_levels = {}
httplib2.debuglevel = level
if http is not None:
for connection_key, connection in http.connections.items():
# httplib2 stores two kinds of values in this dict, connection
# classes and instances. Since the connection types are all
# old-style classes, we can't easily distinguish by connection
# type -- so instead we use the key pattern.
if ':' not in connection_key:
continue
http_levels[connection_key] = connection.debuglevel
connection.set_debuglevel(level)
yield
httplib2.debuglevel = old_level
if http is not None:
for connection_key, old_level in http_levels.items():
http.connections[connection_key].set_debuglevel(old_level)
class Request(object):
"""Encapsulates the data for an HTTP request.
:type url: str
:param url: the URL for the request
:type http_method: str
:param http_method: the HTTP method to use for the request
:type headers: mapping or None
:param headers: headers to be sent with the request
:type body: str
:param body: body to be sent with the request
"""
def __init__(self, url='', http_method='GET', headers=None, body=''):
self.url = url
self.http_method = http_method
self.headers = headers or {}
self.__body = None
self.__loggable_body = None
self.body = body
@property
def loggable_body(self):
"""Request body for logging purposes
:rtype: str
"""
return self.__loggable_body
@loggable_body.setter
def loggable_body(self, value):
"""Update request body for logging purposes
:type value: str
:param value: updated body
:raises: :exc:`RequestError` if the request does not have a body.
"""
if self.body is None:
raise RequestError(
'Cannot set loggable body on request with no body')
self.__loggable_body = value
@property
def body(self):
"""Request body
:rtype: str
"""
return self.__body
@body.setter
def body(self, value):
"""Update the request body
Handles logging and length measurement.
:type value: str
:param value: updated body
"""
self.__body = value
if value is not None:
# Avoid calling len() which cannot exceed 4GiB in 32-bit python.
body_length = getattr(
self.__body, 'length', None) or len(self.__body)
self.headers['content-length'] = str(body_length)
else:
self.headers.pop('content-length', None)
# This line ensures we don't try to print large requests.
if not isinstance(value, (type(None), six.string_types)):
self.loggable_body = '<media body>'
def _process_content_range(content_range):
"""Convert a 'Content-Range' header into a length for the response.
Helper for :meth:`Response.length`.
:type content_range: str
:param content_range: the header value being parsed.
:rtype: integer
:returns: the length of the response chunk.
"""
_, _, range_spec = content_range.partition(' ')
byte_range, _, _ = range_spec.partition('/')
start, _, end = byte_range.partition('-')
return int(end) - int(start) + 1
# Note: currently the order of fields here is important, since we want
# to be able to pass in the result from httplib2.request.
_ResponseTuple = collections.namedtuple(
'HttpResponse', ['info', 'content', 'request_url'])
class Response(_ResponseTuple):
"""Encapsulates data for an HTTP response.
"""
__slots__ = ()
def __len__(self):
return self.length
@property
def length(self):
"""Length of this response.
Exposed as an attribute since using ``len()`` directly can fail
for responses larger than ``sys.maxint``.
:rtype: integer or long
"""
if 'content-encoding' in self.info and 'content-range' in self.info:
# httplib2 rewrites content-length in the case of a compressed
# transfer; we can't trust the content-length header in that
# case, but we *can* trust content-range, if it's present.
return _process_content_range(self.info['content-range'])
elif 'content-length' in self.info:
return int(self.info.get('content-length'))
elif 'content-range' in self.info:
return _process_content_range(self.info['content-range'])
return len(self.content)
@property
def status_code(self):
"""HTTP status code
:rtype: integer
"""
return int(self.info['status'])
@property
def retry_after(self):
"""Retry interval (if set).
:rtype: integer
:returns: interval in seconds
"""
if 'retry-after' in self.info:
return int(self.info['retry-after'])
@property
def is_redirect(self):
"""Does this response contain a redirect
:rtype: boolean
:returns: True if the status code indicates a redirect and the
'location' header is present.
"""
return (self.status_code in _REDIRECT_STATUS_CODES and
'location' in self.info)
def _check_response(response):
"""Validate a response
:type response: :class:`Response`
:param response: the response to validate
:raises: :exc:`gcloud.streaming.exceptions.RequestError` if response is
None, :exc:`gcloud.streaming.exceptions.BadStatusCodeError` if
response status code indicates an error, or
:exc:`gcloud.streaming.exceptions.RetryAfterError` if response
indicates a retry interval.
"""
if response is None:
# Caller shouldn't call us if the response is None, but handle anyway.
raise RequestError(
'Request did not return a response.')
elif (response.status_code >= 500 or
response.status_code == TOO_MANY_REQUESTS):
raise BadStatusCodeError.from_response(response)
elif response.retry_after:
raise RetryAfterError.from_response(response)
def _reset_http_connections(http):
"""Rebuild all http connections in the httplib2.Http instance.
httplib2 overloads the map in http.connections to contain two different
types of values:
{ scheme string: connection class } and
{ scheme + authority string : actual http connection }
Here we remove all of the entries for actual connections so that on the
next request httplib2 will rebuild them from the connection types.
:type http: :class:`httplib2.Http`
:param http: the instance whose connections are to be rebuilt
"""
if getattr(http, 'connections', None):
for conn_key in list(http.connections.keys()):
if ':' in conn_key:
del http.connections[conn_key]
def _make_api_request_no_retry(http, http_request, redirections=5,
check_response_func=_check_response):
"""Send an HTTP request via the given http instance.
This wrapper exists to handle translation between the plain httplib2
request/response types and the Request and Response types above.
:type http: :class:`httplib2.Http`
:param http: an instance which impelements the `Http` API.
:type http_request: :class:`Request`
:param http_request: the request to send.
:type redirections: integer
:param redirections: Number of redirects to follow.
:type check_response_func: function taking (response, content, url).
:param check_response_func: Function to validate the HTTP response.
:rtype: :class:`Response`
:returns: an object representing the server's response
:raises: :exc:`gcloud.streaming.exceptions.RequestError` if no response
could be parsed.
"""
connection_type = None
# Handle overrides for connection types. This is used if the caller
# wants control over the underlying connection for managing callbacks
# or hash digestion.
if getattr(http, 'connections', None):
url_scheme = parse.urlsplit(http_request.url).scheme
if url_scheme and url_scheme in http.connections:
connection_type = http.connections[url_scheme]
# Custom printing only at debuglevel 4
new_debuglevel = 4 if httplib2.debuglevel == 4 else 0
with _httplib2_debug_level(http_request, new_debuglevel, http=http):
info, content = http.request(
str(http_request.url), method=str(http_request.http_method),
body=http_request.body, headers=http_request.headers,
redirections=redirections, connection_type=connection_type)
if info is None:
raise RequestError()
response = Response(info, content, http_request.url)
check_response_func(response)
return response
def make_api_request(http, http_request,
retries=7,
max_retry_wait=60,
redirections=5,
check_response_func=_check_response,
wo_retry_func=_make_api_request_no_retry):
"""Send an HTTP request via the given http, performing error/retry handling.
:type http: :class:`httplib2.Http`
:param http: an instance which impelements the `Http` API.
:type http_request: :class:`Request`
:param http_request: the request to send.
:type retries: integer
:param retries: Number of retries to attempt on retryable
responses (such as 429 or 5XX).
:type max_retry_wait: integer
:param max_retry_wait: Maximum number of seconds to wait when retrying.
:type redirections: integer
:param redirections: Number of redirects to follow.
:type check_response_func: function taking (response, content, url).
:param check_response_func: Function to validate the HTTP response.
:type wo_retry_func: function taking
(http, request, redirections, check_response_func)
:param wo_retry_func: Function to make HTTP request without retries.
:rtype: :class:`Response`
:returns: an object representing the server's response
:raises: :exc:`gcloud.streaming.exceptions.RequestError` if no response
could be parsed.
"""
retry = 0
while True:
try:
return wo_retry_func(
http, http_request, redirections=redirections,
check_response_func=check_response_func)
except _RETRYABLE_EXCEPTIONS as exc:
retry += 1
if retry >= retries:
raise
retry_after = getattr(exc, 'retry_after', None)
if retry_after is None:
retry_after = calculate_wait_for_retry(retry, max_retry_wait)
_reset_http_connections(http)
logging.debug('Retrying request to url %s after exception %s',
http_request.url, type(exc).__name__)
time.sleep(retry_after)
_HTTP_FACTORIES = []
def _register_http_factory(factory):
"""Register a custom HTTP factory.
:type factory: callable taking keyword arguments, returning an Http
instance (or an instance implementing the same API);
:param factory: the new factory (it may return ``None`` to defer to
a later factory or the default).
"""
_HTTP_FACTORIES.append(factory)
def get_http(**kwds):
"""Construct an Http instance.
:type kwds: dict
:param kwds: keyword arguments to pass to factories.
:rtype: :class:`httplib2.Http` (or a workalike)
"""
for factory in _HTTP_FACTORIES:
http = factory(**kwds)
if http is not None:
return http
return httplib2.Http(**kwds)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubIpConfigurationOperations(object):
"""VirtualHubIpConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.HubIpConfiguration"
"""Retrieves the details of a Virtual Hub Ip configuration.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param ip_config_name: The name of the ipconfig.
:type ip_config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HubIpConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.HubIpConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubIpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
parameters, # type: "_models.HubIpConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.HubIpConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubIpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'HubIpConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
parameters, # type: "_models.HubIpConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.HubIpConfiguration"]
"""Creates a VirtualHubIpConfiguration resource if it doesn't exist else updates the existing
VirtualHubIpConfiguration.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param ip_config_name: The name of the ipconfig.
:type ip_config_name: str
:param parameters: Hub Ip Configuration parameters.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.HubIpConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either HubIpConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.HubIpConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubIpConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
ip_config_name=ip_config_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('HubIpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
ip_config_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHubIpConfiguration.
:param resource_group_name: The resource group name of the VirtualHubBgpConnection.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param ip_config_name: The name of the ipconfig.
:type ip_config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
ip_config_name=ip_config_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'ipConfigName': self._serialize.url("ip_config_name", ip_config_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations/{ipConfigName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubIpConfigurationResults"]
"""Retrieves the details of all VirtualHubIpConfigurations.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubIpConfigurationResults or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.ListVirtualHubIpConfigurationResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubIpConfigurationResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubIpConfigurationResults', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/ipConfigurations'} # type: ignore
|
|
#
# Copyright (c), 2016-2021, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
from collections.abc import MutableMapping, MutableSequence
from typing import TYPE_CHECKING, cast, Any, Dict, Iterator, Iterable, \
List, Optional, Type, Tuple, Union
from ..exceptions import XMLSchemaTypeError
from ..names import XSI_NAMESPACE
from ..etree import etree_element, ElementData
from ..aliases import NamespacesType, ElementType, BaseXsdType
from ..namespaces import NamespaceMapper
if TYPE_CHECKING:
from ..validators import XsdElement
class XMLSchemaConverter(NamespaceMapper):
"""
Generic XML Schema based converter class. A converter is used to compose
decoded XML data for an Element into a data structure and to build an Element
from encoded data structure. There are two methods for interfacing the
converter with the decoding/encoding process. The method *element_decode*
accepts an ElementData tuple, containing the element parts, and returns
a data structure. The method *element_encode* accepts a data structure and
returns an ElementData tuple. For default character data parts are ignored.
Prefixes and text key can be changed also using alphanumeric values but
ambiguities with schema elements could affect XML data re-encoding.
:param namespaces: map from namespace prefixes to URI.
:param dict_class: dictionary class to use for decoded data. Default is `dict`.
:param list_class: list class to use for decoded data. Default is `list`.
:param etree_element_class: the class that has to be used to create new XML elements, \
if not provided uses the ElementTree's Element class.
:param text_key: is the key to apply to element's decoded text data.
:param attr_prefix: controls the mapping of XML attributes, to the same name or \
with a prefix. If `None` the converter ignores attributes.
:param cdata_prefix: is used for including and prefixing the character data parts \
of a mixed content, that are labeled with an integer instead of a string. \
Character data parts are ignored if this argument is `None`.
:param indent: number of spaces for XML indentation (default is 4).
:param strip_namespaces: if set to `True` removes namespace declarations from data and \
namespace information from names, during decoding or encoding. Defaults to `False`.
:param preserve_root: if set to `True` the root element is preserved, wrapped into a \
single-item dictionary. Applicable only to default converter, to \
:class:`UnorderedConverter` and to :class:`ParkerConverter`.
:param force_dict: if set to `True` complex elements with simple content are decoded \
with a dictionary also if there are no decoded attributes. Applicable only to default \
converter and to :class:`UnorderedConverter`. Defaults to `False`.
:param force_list: if set to `True` child elements are decoded within a list in any case. \
Applicable only to default converter and to :class:`UnorderedConverter`. Defaults to `False`.
:ivar dict: dictionary class to use for decoded data.
:ivar list: list class to use for decoded data.
:ivar etree_element_class: Element class to use
:ivar text_key: key for decoded Element text
:ivar attr_prefix: prefix for attribute names
:ivar cdata_prefix: prefix for character data parts
:ivar indent: indentation to use for rebuilding XML trees
:ivar preserve_root: preserve the root element on decoding
:ivar force_dict: force dictionary for complex elements with simple content
:ivar force_list: force list for child elements
"""
ns_prefix: str
dict: Type[Dict[str, Any]] = dict
list: Type[List[Any]] = list
etree_element_class: Type[ElementType]
etree_element_class = etree_element
__slots__ = ('text_key', 'ns_prefix', 'attr_prefix', 'cdata_prefix',
'indent', 'preserve_root', 'force_dict', 'force_list')
def __init__(self, namespaces: Optional[NamespacesType] = None,
dict_class: Optional[Type[Dict[str, Any]]] = None,
list_class: Optional[Type[List[Any]]] = None,
etree_element_class: Optional[Type[ElementType]] = None,
text_key: Optional[str] = '$',
attr_prefix: Optional[str] = '@',
cdata_prefix: Optional[str] = None,
indent: int = 4,
strip_namespaces: bool = False,
preserve_root: bool = False,
force_dict: bool = False,
force_list: bool = False,
**kwargs: Any) -> None:
super(XMLSchemaConverter, self).__init__(namespaces, strip_namespaces)
if dict_class is not None:
self.dict = dict_class
if list_class is not None:
self.list = list_class
if etree_element_class is not None:
self.etree_element_class = etree_element_class
self.text_key = text_key
self.attr_prefix = attr_prefix
self.cdata_prefix = cdata_prefix
self.ns_prefix = 'xmlns' if attr_prefix is None else f'{attr_prefix}xmlns'
self.indent = indent
self.preserve_root = preserve_root
self.force_dict = force_dict
self.force_list = force_list
def __setattr__(self, name: str, value: Any) -> None:
if name in {'attr_prefix', 'text_key', 'cdata_prefix'}:
if value is not None and not isinstance(value, str):
msg = '{} must be a str or None, not {}'
raise XMLSchemaTypeError(msg.format(name, type(value).__name__))
elif name in {'strip_namespaces', 'preserve_root', 'force_dict', 'force_list'}:
if not isinstance(value, bool):
msg = '{} must be a bool, not {}'
raise XMLSchemaTypeError(msg.format(name, type(value).__name__))
elif name == 'indent':
if isinstance(value, bool) or not isinstance(value, int):
msg = '{} must be an int, not {}'
raise XMLSchemaTypeError(msg.format(name, type(value).__name__))
elif name == 'dict':
if not issubclass(value, MutableMapping):
msg = '{!r} must be a MutableMapping subclass, not {}'
raise XMLSchemaTypeError(msg.format(name, value))
elif name == 'list':
if not issubclass(value, MutableSequence):
msg = '{!r} must be a MutableSequence subclass, not {}'
raise XMLSchemaTypeError(msg.format(name, value))
super(XMLSchemaConverter, self).__setattr__(name, value)
@property
def lossy(self) -> bool:
"""The converter ignores some kind of XML data during decoding/encoding."""
return self.cdata_prefix is None or self.text_key is None or self.attr_prefix is None
@property
def losslessly(self) -> bool:
"""
The XML data is decoded without loss of quality, neither on data nor on data model
shape. Only losslessly converters can be always used to encode to an XML data that
is strictly conformant to the schema.
"""
return False
def copy(self, **kwargs: Any) -> 'XMLSchemaConverter':
return type(self)(
namespaces=kwargs.get('namespaces', self._namespaces),
dict_class=kwargs.get('dict_class', self.dict),
list_class=kwargs.get('list_class', self.list),
etree_element_class=kwargs.get('etree_element_class'),
text_key=kwargs.get('text_key', self.text_key),
attr_prefix=kwargs.get('attr_prefix', self.attr_prefix),
cdata_prefix=kwargs.get('cdata_prefix', self.cdata_prefix),
indent=kwargs.get('indent', self.indent),
strip_namespaces=kwargs.get('strip_namespaces', self.strip_namespaces),
preserve_root=kwargs.get('preserve_root', self.preserve_root),
force_dict=kwargs.get('force_dict', self.force_dict),
force_list=kwargs.get('force_list', self.force_list),
)
def map_attributes(self, attributes: Iterable[Tuple[str, Any]]) \
-> Iterator[Tuple[str, Any]]:
"""
Creates an iterator for converting decoded attributes to a data structure with
appropriate prefixes. If the instance has a not-empty map of namespaces registers
the mapped URIs and prefixes.
:param attributes: A sequence or an iterator of couples with the name of \
the attribute and the decoded value. Default is `None` (for `simpleType` \
elements, that don't have attributes).
"""
if self.attr_prefix is None or not attributes:
return
elif self.attr_prefix:
for name, value in attributes:
yield '%s%s' % (self.attr_prefix, self.map_qname(name)), value
else:
for name, value in attributes:
yield self.map_qname(name), value
def map_content(self, content: Iterable[Tuple[str, Any, Any]]) \
-> Iterator[Tuple[str, Any, Any]]:
"""
A generator function for converting decoded content to a data structure.
If the instance has a not-empty map of namespaces registers the mapped URIs
and prefixes.
:param content: A sequence or an iterator of tuples with the name of the \
element, the decoded value and the `XsdElement` instance associated.
"""
if not content:
return
for name, value, xsd_child in content:
try:
if name[0] == '{':
yield self.map_qname(name), value, xsd_child
else:
yield name, value, xsd_child
except TypeError:
if self.cdata_prefix is not None:
yield '%s%s' % (self.cdata_prefix, name), value, xsd_child
def etree_element(self, tag: str,
text: Optional[str] = None,
children: Optional[List[ElementType]] = None,
attrib: Optional[Dict[str, str]] = None,
level: int = 0) -> ElementType:
"""
Builds an ElementTree's Element using arguments and the element class and
the indent spacing stored in the converter instance.
:param tag: the Element tag string.
:param text: the Element text.
:param children: the list of Element children/subelements.
:param attrib: a dictionary with Element attributes.
:param level: the level related to the encoding process (0 means the root).
:return: an instance of the Element class is set for the converter instance.
"""
if type(self.etree_element_class) is type(etree_element):
if attrib is None:
elem = self.etree_element_class(tag)
else:
elem = self.etree_element_class(tag, self.dict(attrib))
else:
# FIXME: need a more refined check
nsmap = {prefix if prefix else None: uri
for prefix, uri in self._namespaces.items() if uri}
elem = self.etree_element_class(tag, nsmap=nsmap) # type: ignore[arg-type]
elem.attrib.update(attrib) # type: ignore[arg-type]
if children:
elem.extend(children)
elem.text = text or '\n' + ' ' * self.indent * (level + 1)
elem.tail = '\n' + ' ' * self.indent * level
else:
elem.text = text
elem.tail = '\n' + ' ' * self.indent * level
return elem
def element_decode(self, data: ElementData, xsd_element: 'XsdElement',
xsd_type: Optional[BaseXsdType] = None, level: int = 0) -> Any:
"""
Converts a decoded element data to a data structure.
:param data: ElementData instance decoded from an Element node.
:param xsd_element: the `XsdElement` associated to decoded the data.
:param xsd_type: optional XSD type for supporting dynamic type through \
*xsi:type* or xs:alternative.
:param level: the level related to the decoding process (0 means the root).
:return: a data structure containing the decoded data.
"""
xsd_type = xsd_type or xsd_element.type
result_dict = self.dict()
if level == 0 and xsd_element.is_global() and not self.strip_namespaces and self:
schema_namespaces = set(xsd_element.namespaces.values())
result_dict.update(
('%s:%s' % (self.ns_prefix, k) if k else self.ns_prefix, v)
for k, v in self._namespaces.items()
if v in schema_namespaces or v == XSI_NAMESPACE
)
xsd_group = xsd_type.model_group
if xsd_group is None:
if data.attributes or self.force_dict and not xsd_type.is_simple():
result_dict.update(t for t in self.map_attributes(data.attributes))
if data.text is not None and data.text != '' and self.text_key is not None:
result_dict[self.text_key] = data.text
return result_dict
else:
return data.text if data.text != '' else None
else:
if data.attributes:
result_dict.update(t for t in self.map_attributes(data.attributes))
has_single_group = xsd_group.is_single()
if data.content:
for name, value, xsd_child in self.map_content(data.content):
try:
result = result_dict[name]
except KeyError:
if xsd_child is None or has_single_group and xsd_child.is_single():
result_dict[name] = self.list([value]) if self.force_list else value
else:
result_dict[name] = self.list([value])
else:
if not isinstance(result, MutableSequence) or not result:
result_dict[name] = self.list([result, value])
elif isinstance(result[0], MutableSequence) or \
not isinstance(value, MutableSequence):
result.append(value)
else:
result_dict[name] = self.list([result, value])
elif data.text is not None and data.text != '' and self.text_key is not None:
result_dict[self.text_key] = data.text
if level == 0 and self.preserve_root:
return self.dict(
[(self.map_qname(data.tag), result_dict if result_dict else None)]
)
if not result_dict:
return None
elif len(result_dict) == 1 and self.text_key in result_dict:
return result_dict[self.text_key]
return result_dict
def element_encode(self, obj: Any, xsd_element: 'XsdElement', level: int = 0) -> ElementData:
"""
Extracts XML decoded data from a data structure for encoding into an ElementTree.
:param obj: the decoded object.
:param xsd_element: the `XsdElement` associated to the decoded data structure.
:param level: the level related to the encoding process (0 means the root).
:return: an ElementData instance.
"""
if level != 0:
tag = xsd_element.name
else:
if xsd_element.is_global():
tag = xsd_element.qualified_name
else:
tag = xsd_element.name
if self.preserve_root and isinstance(obj, MutableMapping):
match_local_name = cast(bool, self.strip_namespaces or self.default_namespace)
match = xsd_element.get_matching_item(obj, self.ns_prefix, match_local_name)
if match is not None:
obj = match
if not isinstance(obj, MutableMapping):
if xsd_element.type.simple_type is not None:
return ElementData(tag, obj, None, {})
elif xsd_element.type.mixed and isinstance(obj, (str, bytes)):
return ElementData(tag, None, [(1, obj)], {})
else:
return ElementData(tag, None, obj, {})
text = None
content: List[Tuple[Union[int, str], Any]] = []
attributes = {}
for name, value in obj.items():
if name == self.text_key:
text = value
elif self.cdata_prefix is not None and \
name.startswith(self.cdata_prefix) and \
name[len(self.cdata_prefix):].isdigit():
index = int(name[len(self.cdata_prefix):])
content.append((index, value))
elif name == self.ns_prefix:
self[''] = value
elif name.startswith('%s:' % self.ns_prefix):
if not self.strip_namespaces:
self[name[len(self.ns_prefix) + 1:]] = value
elif self.attr_prefix and \
name.startswith(self.attr_prefix) and \
name != self.attr_prefix:
attr_name = name[len(self.attr_prefix):]
ns_name = self.unmap_qname(attr_name, xsd_element.attributes)
attributes[ns_name] = value
elif not isinstance(value, MutableSequence) or not value:
content.append((self.unmap_qname(name), value))
elif isinstance(value[0], (MutableMapping, MutableSequence)):
ns_name = self.unmap_qname(name)
content.extend((ns_name, item) for item in value)
else:
xsd_group = xsd_element.type.model_group
if xsd_group is None:
# fallback to xs:anyType encoder
xsd_group = xsd_element.any_type.model_group
assert xsd_group is not None
ns_name = self.unmap_qname(name)
for xsd_child in xsd_group.iter_elements():
matched_element = xsd_child.match(ns_name, resolve=True)
if matched_element is not None:
if matched_element.type and matched_element.type.is_list():
content.append((ns_name, value))
else:
content.extend((ns_name, item) for item in value)
break
else:
if self.attr_prefix == '' and ns_name not in attributes:
for key, xsd_attribute in xsd_element.attributes.items():
if key and xsd_attribute.is_matching(ns_name):
attributes[key] = value
break
else:
content.append((ns_name, value))
else:
content.append((ns_name, value))
return ElementData(tag, text, content, attributes)
|
|
import json
import logging
import os
import shutil
import sys
import time
import urllib2
import warnings
# Dropping a table inexplicably produces a warning despite
# the 'IF EXISTS' clause. Squelch these warnings.
warnings.simplefilter('ignore')
import MySQLdb
import environment
import utils
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
from vtdb import tablet
tablet_cell_map = {
62344: 'nj',
62044: 'nj',
41983: 'nj',
31981: 'ny',
}
def get_backup_storage_flags():
return ['-backup_storage_implementation', 'file',
'-file_backup_storage_root',
os.path.join(environment.tmproot, 'backupstorage')]
def get_all_extra_my_cnf(extra_my_cnf):
all_extra_my_cnf = [environment.vttop + '/config/mycnf/default-fast.cnf']
flavor_my_cnf = mysql_flavor().extra_my_cnf()
if flavor_my_cnf:
all_extra_my_cnf.append(flavor_my_cnf)
if extra_my_cnf:
all_extra_my_cnf.append(extra_my_cnf)
return all_extra_my_cnf
class Tablet(object):
"""This class helps manage a vttablet or vtocc instance.
To use it for vttablet, you need to use init_tablet and/or
start_vttablet. For vtocc, you can just call start_vtocc.
If you use it to start as vtocc, many of the support functions
that are meant for vttablet will not work.
"""
default_uid = 62344
seq = 0
tablets_running = 0
default_db_config = {
'app': {
'uname': 'vt_app',
'charset': 'utf8'
},
'dba': {
'uname': 'vt_dba',
'charset': 'utf8'
},
'filtered': {
'uname': 'vt_filtered',
'charset': 'utf8'
},
'repl': {
'uname': 'vt_repl',
'charset': 'utf8'
}
}
# this will eventually be coming from the proto3
tablet_type_value = {
'UNKNOWN': 0,
'IDLE': 1,
'MASTER': 2,
'REPLICA': 3,
'RDONLY': 4,
'BATCH': 4,
'SPARE': 5,
'EXPERIMENTAL': 6,
'SCHEMA_UPGRADE': 7,
'BACKUP': 8,
'RESTORE': 9,
'WORKER': 10,
'SCRAP': 11,
}
def __init__(self, tablet_uid=None, port=None, mysql_port=None, cell=None,
use_mysqlctld=False):
self.tablet_uid = tablet_uid or (Tablet.default_uid + Tablet.seq)
self.port = port or (environment.reserve_ports(1))
self.mysql_port = mysql_port or (environment.reserve_ports(1))
self.grpc_port = environment.reserve_ports(1)
self.use_mysqlctld = use_mysqlctld
Tablet.seq += 1
if cell:
self.cell = cell
else:
self.cell = tablet_cell_map.get(tablet_uid, 'nj')
self.proc = None
# filled in during init_tablet
self.keyspace = None
self.shard = None
# utility variables
self.tablet_alias = 'test_%s-%010d' % (self.cell, self.tablet_uid)
self.zk_tablet_path = (
'/zk/test_%s/vt/tablets/%010d' % (self.cell, self.tablet_uid))
def update_stream_python_endpoint(self):
protocol = protocols_flavor().binlog_player_python_protocol()
port = self.port
if protocol == 'gorpc':
from vtdb import gorpc_update_stream
elif protocol == 'grpc':
# import the grpc update stream client implementation, change the port
from vtdb import grpc_update_stream
port = self.grpc_port
return (protocol, 'localhost:%d' % port)
def mysqlctl(self, cmd, extra_my_cnf=None, with_ports=False, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctl') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid)]
if self.use_mysqlctld:
args.extend(
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if with_ports:
args.extend(['-port', str(self.port),
'-mysql_port', str(self.mysql_port)])
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def mysqlctld(self, cmd, extra_my_cnf=None, with_ports=False, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctld') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid),
'-mysql_port', str(self.mysql_port),
'-socket_file', os.path.join(self.tablet_dir, 'mysqlctl.sock')]
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def init_mysql(self, extra_my_cnf=None):
if self.use_mysqlctld:
return self.mysqlctld(
['-bootstrap_archive', mysql_flavor().bootstrap_archive()],
extra_my_cnf=extra_my_cnf)
else:
return self.mysqlctl(
['init', '-bootstrap_archive', mysql_flavor().bootstrap_archive()],
extra_my_cnf=extra_my_cnf, with_ports=True)
def start_mysql(self):
return self.mysqlctl(['start'], with_ports=True)
def shutdown_mysql(self):
return self.mysqlctl(['shutdown'], with_ports=True)
def teardown_mysql(self):
if utils.options.keep_logs:
return self.shutdown_mysql()
return self.mysqlctl(['teardown', '-force'])
def remove_tree(self):
if utils.options.keep_logs:
return
try:
shutil.rmtree(self.tablet_dir)
except OSError as e:
if utils.options.verbose == 2:
print >> sys.stderr, e, self.tablet_dir
def mysql_connection_parameters(self, dbname, user='vt_dba'):
return dict(user=user,
unix_socket=self.tablet_dir + '/mysql.sock',
db=dbname)
def connect(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, conn.cursor()
def connect_dict(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, MySQLdb.cursors.DictCursor(conn)
# Query the MySQL instance directly
def mquery(
self, dbname, query, write=False, user='vt_dba', conn_params=None):
if conn_params is None:
conn_params = {}
conn, cursor = self.connect(dbname, user=user, **conn_params)
if write:
conn.begin()
if isinstance(query, basestring):
query = [query]
for q in query:
# logging.debug('mysql(%s,%s): %s', self.tablet_uid, dbname, q)
cursor.execute(q)
if write:
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def assert_table_count(self, dbname, table, n, where=''):
result = self.mquery(dbname, 'select count(*) from ' + table + ' ' + where)
if result[0][0] != n:
raise utils.TestError('expected %d rows in %s' % (n, table), result)
def reset_replication(self):
self.mquery('', mysql_flavor().reset_replication_commands())
def populate(self, dbname, create_sql, insert_sqls=[]):
self.create_db(dbname)
if isinstance(create_sql, basestring):
create_sql = [create_sql]
for q in create_sql:
self.mquery(dbname, q)
for q in insert_sqls:
self.mquery(dbname, q, write=True)
def has_db(self, name):
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname == name:
return True
return False
def drop_db(self, name):
self.mquery('', 'drop database if exists %s' % name)
while self.has_db(name):
logging.debug('%s sleeping while waiting for database drop: %s',
self.tablet_alias, name)
time.sleep(0.3)
self.mquery('', 'drop database if exists %s' % name)
def create_db(self, name):
self.drop_db(name)
self.mquery('', 'create database %s' % name)
def clean_dbs(self):
logging.debug('mysql(%s): removing all databases', self.tablet_uid)
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname in ['information_schema', 'mysql']:
continue
self.drop_db(dbname)
def wait_check_db_var(self, name, value):
for _ in range(3):
try:
return self.check_db_var(name, value)
except utils.TestError as e:
print >> sys.stderr, 'WARNING: ', e
time.sleep(1.0)
raise e
def check_db_var(self, name, value):
row = self.get_db_var(name)
if row != (name, value):
raise utils.TestError('variable not set correctly', name, row)
def get_db_var(self, name):
conn, cursor = self.connect()
try:
cursor.execute("show variables like '%s'" % name)
return cursor.fetchone()
finally:
conn.close()
def update_addrs(self):
args = [
'UpdateTabletAddrs',
'-hostname', 'localhost',
'-ip-addr', '127.0.0.1',
'-mysql-port', '%d' % self.mysql_port,
'-vt-port', '%d' % self.port,
self.tablet_alias
]
return utils.run_vtctl(args)
def scrap(self, force=False, skip_rebuild=False):
args = ['ScrapTablet']
if force:
args.append('-force')
if skip_rebuild:
args.append('-skip-rebuild')
args.append(self.tablet_alias)
utils.run_vtctl(args, auto_log=True)
def init_tablet(self, tablet_type, keyspace=None, shard=None, force=True,
start=False, dbname=None, parent=True, wait_for_start=True,
include_mysql_port=True, **kwargs):
self.tablet_type = tablet_type
self.keyspace = keyspace
self.shard = shard
if dbname is None:
self.dbname = 'vt_' + (self.keyspace or 'database')
else:
self.dbname = dbname
args = ['InitTablet',
'-hostname', 'localhost',
'-port', str(self.port)]
if include_mysql_port:
args.extend(['-mysql_port', str(self.mysql_port)])
if force:
args.append('-force')
if parent:
args.append('-parent')
if dbname:
args.extend(['-db-name-override', dbname])
if keyspace:
args.extend(['-keyspace', keyspace])
if shard:
args.extend(['-shard', shard])
args.extend([self.tablet_alias, tablet_type])
utils.run_vtctl(args)
if start:
if not wait_for_start:
expected_state = None
elif (tablet_type == 'master' or tablet_type == 'replica' or
tablet_type == 'rdonly' or tablet_type == 'batch'):
expected_state = 'SERVING'
else:
expected_state = 'NOT_SERVING'
self.start_vttablet(wait_for_state=expected_state, **kwargs)
def conn(self, user=None, password=None):
conn = tablet.TabletConnection(
'localhost:%d' % self.port, self.tablet_type, self.keyspace,
self.shard, 30, caller_id='dev')
conn.dial()
return conn
@property
def tablet_dir(self):
return '%s/vt_%010d' % (environment.vtdataroot, self.tablet_uid)
def grpc_enabled(self):
return (
protocols_flavor().tabletconn_protocol() == 'grpc' or
protocols_flavor().tablet_manager_protocol() == 'grpc' or
protocols_flavor().binlog_player_protocol() == 'grpc')
def flush(self):
utils.curl('http://localhost:%s%s' %
(self.port, environment.flush_logs_url),
stderr=utils.devnull, stdout=utils.devnull)
def _start_prog(
self, binary, port=None, memcache=False,
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
schema_override=None,
repl_extra_flags=None, table_acl_config=None,
lameduck_period=None, security_policy=None,
extra_args=None, extra_env=None):
if repl_extra_flags is None:
repl_extra_flags = {}
environment.prog_compile(binary)
args = environment.binary_args(binary)
args.extend(['-port', '%s' % (port or self.port),
'-log_dir', environment.vtlogroot])
self._add_dbconfigs(args, repl_extra_flags)
if memcache:
args.extend(['-rowcache-bin', environment.memcached_bin()])
memcache_socket = os.path.join(self.tablet_dir, 'memcache.sock')
args.extend(['-rowcache-socket', memcache_socket])
args.extend(['-enable-rowcache'])
if filecustomrules:
args.extend(['-filecustomrules', filecustomrules])
if zkcustomrules:
args.extend(['-zkcustomrules', zkcustomrules])
if schema_override:
args.extend(['-schema-override', schema_override])
if table_acl_config:
args.extend(['-table-acl-config', table_acl_config])
args.extend(['-queryserver-config-strict-table-acl'])
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if self.grpc_enabled():
args.extend(['-grpc_port', str(self.grpc_port)])
if lameduck_period:
args.extend(['-lameduck-period', lameduck_period])
if security_policy:
args.extend(['-security_policy', security_policy])
if extra_args:
args.extend(extra_args)
args.extend(['-enable-autocommit'])
stderr_fd = open(
os.path.join(environment.vtlogroot, '%s-%d.stderr' %
(binary, self.tablet_uid)), 'w')
# increment count only the first time
if not self.proc:
Tablet.tablets_running += 1
self.proc = utils.run_bg(args, stderr=stderr_fd, extra_env=extra_env)
log_message = (
'Started vttablet: %s (%s) with pid: %s - Log files: '
'%s/vttablet.*.{INFO,WARNING,ERROR,FATAL}.*.%s' %
(self.tablet_uid, self.tablet_alias, self.proc.pid,
environment.vtlogroot, self.proc.pid))
# This may race with the stderr output from the process (though
# that's usually empty).
stderr_fd.write(log_message + '\n')
stderr_fd.close()
logging.debug(log_message)
# wait for query service to be in the right state
if wait_for_state:
if binary == 'vttablet':
self.wait_for_vttablet_state(wait_for_state, port=port)
else:
self.wait_for_vtocc_state(wait_for_state, port=port)
return self.proc
def start_vttablet(
self, port=None, memcache=False,
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
schema_override=None,
repl_extra_flags=None, table_acl_config=None,
lameduck_period=None, security_policy=None,
target_tablet_type=None, full_mycnf_args=False,
extra_args=None, extra_env=None, include_mysql_port=True,
init_tablet_type=None, init_keyspace=None,
init_shard=None, init_db_name_override=None,
supports_backups=False):
"""Starts a vttablet process, and returns it.
The process is also saved in self.proc, so it's easy to kill as well.
"""
if repl_extra_flags is None:
repl_extra_flags = {}
args = []
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
args.extend(['-tablet_hostname', 'localhost'])
args.extend(['-tablet-path', self.tablet_alias])
args.extend(environment.topo_server().flags())
args.extend(['-binlog_player_protocol',
protocols_flavor().binlog_player_protocol()])
args.extend(['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol()])
args.extend(['-pid_file', os.path.join(self.tablet_dir, 'vttablet.pid')])
if self.use_mysqlctld:
args.extend(
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if full_mycnf_args:
# this flag is used to specify all the mycnf_ flags, to make
# sure that code works and can fork actions.
relay_log_path = os.path.join(self.tablet_dir, 'relay-logs',
'vt-%010d-relay-bin' % self.tablet_uid)
args.extend([
'-mycnf_server_id', str(self.tablet_uid),
'-mycnf_data_dir', os.path.join(self.tablet_dir, 'data'),
'-mycnf_innodb_data_home_dir', os.path.join(self.tablet_dir,
'innodb', 'data'),
'-mycnf_innodb_log_group_home_dir', os.path.join(self.tablet_dir,
'innodb', 'logs'),
'-mycnf_socket_file', os.path.join(self.tablet_dir, 'mysql.sock'),
'-mycnf_error_log_path', os.path.join(self.tablet_dir, 'error.log'),
'-mycnf_slow_log_path', os.path.join(self.tablet_dir,
'slow-query.log'),
'-mycnf_relay_log_path', relay_log_path,
'-mycnf_relay_log_index_path', relay_log_path + '.index',
'-mycnf_relay_log_info_path', os.path.join(self.tablet_dir,
'relay-logs',
'relay-log.info'),
'-mycnf_bin_log_path', os.path.join(
self.tablet_dir, 'bin-logs', 'vt-%010d-bin' % self.tablet_uid),
'-mycnf_master_info_file', os.path.join(self.tablet_dir,
'master.info'),
'-mycnf_pid_file', os.path.join(self.tablet_dir, 'mysql.pid'),
'-mycnf_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
'-mycnf_slave_load_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
])
if include_mysql_port:
args.extend(['-mycnf_mysql_port', str(self.mysql_port)])
if target_tablet_type:
self.tablet_type = target_tablet_type
args.extend(['-target_tablet_type', target_tablet_type,
'-health_check_interval', '2s',
'-enable_replication_lag_check',
'-degraded_threshold', '5s'])
# this is used to run InitTablet as part of the vttablet startup
if init_tablet_type:
self.tablet_type = init_tablet_type
args.extend(['-init_tablet_type', init_tablet_type])
if init_keyspace:
self.keyspace = init_keyspace
self.shard = init_shard
args.extend(['-init_keyspace', init_keyspace,
'-init_shard', init_shard])
if init_db_name_override:
self.dbname = init_db_name_override
args.extend(['-init_db_name_override', init_db_name_override])
else:
self.dbname = 'vt_' + init_keyspace
if supports_backups:
args.extend(['-restore_from_backup'] + get_backup_storage_flags())
args.extend(['-rpc-error-only-in-reply=true'])
if extra_args:
args.extend(extra_args)
return self._start_prog(
binary='vttablet', port=port,
memcache=memcache, wait_for_state=wait_for_state,
filecustomrules=filecustomrules,
zkcustomrules=zkcustomrules,
schema_override=schema_override,
repl_extra_flags=repl_extra_flags,
table_acl_config=table_acl_config,
lameduck_period=lameduck_period, extra_args=args,
security_policy=security_policy, extra_env=extra_env)
def start_vtocc(self, port=None, memcache=False,
wait_for_state='SERVING', filecustomrules=None,
schema_override=None,
repl_extra_flags=None, table_acl_config=None,
lameduck_period=None, security_policy=None,
keyspace=None, shard=False,
extra_args=None):
"""Starts a vtocc process, and returns it.
The process is also saved in self.proc, so it's easy to kill as well.
"""
if repl_extra_flags is None:
repl_extra_flags = {}
self.keyspace = keyspace
self.shard = shard
self.dbname = 'vt_' + (self.keyspace or 'database')
args = []
args.extend(['-db-config-app-unixsocket', self.tablet_dir + '/mysql.sock'])
args.extend(['-db-config-dba-unixsocket', self.tablet_dir + '/mysql.sock'])
args.extend(['-db-config-app-keyspace', keyspace])
args.extend(['-db-config-app-shard', shard])
args.extend(['-binlog-path', 'foo'])
if extra_args:
args.extend(extra_args)
return self._start_prog(binary='vtocc', port=port,
memcache=memcache, wait_for_state=wait_for_state,
filecustomrules=filecustomrules,
schema_override=schema_override,
repl_extra_flags=repl_extra_flags,
table_acl_config=table_acl_config,
lameduck_period=lameduck_period, extra_args=args,
security_policy=security_policy)
def wait_for_vttablet_state(self, expected, timeout=60.0, port=None):
self.wait_for_vtocc_state(expected, timeout=timeout, port=port)
def wait_for_vtocc_state(self, expected, timeout=60.0, port=None):
while True:
v = utils.get_vars(port or self.port)
last_seen_state = '?'
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for state %s' % expected)
logging.debug(
' vttablet %s not answering at /debug/vars, waiting...',
self.tablet_alias)
else:
if 'TabletStateName' not in v:
logging.debug(
' vttablet %s not exporting TabletStateName, waiting...',
self.tablet_alias)
else:
s = v['TabletStateName']
last_seen_state = s
if s != expected:
logging.debug(
' vttablet %s in state %s != %s', self.tablet_alias, s,
expected)
else:
break
timeout = utils.wait_step(
'waiting for state %s (last seen state: %s)' %
(expected, last_seen_state),
timeout, sleep_time=0.1)
def wait_for_mysqlctl_socket(self, timeout=30.0):
mysql_sock = os.path.join(self.tablet_dir, 'mysql.sock')
mysqlctl_sock = os.path.join(self.tablet_dir, 'mysqlctl.sock')
while True:
if os.path.exists(mysql_sock) and os.path.exists(mysqlctl_sock):
return
timeout = utils.wait_step(
'waiting for mysql and mysqlctl socket files: %s %s' %
(mysql_sock, mysqlctl_sock), timeout)
def _add_dbconfigs(self, args, repl_extra_flags=None):
if repl_extra_flags is None:
repl_extra_flags = {}
config = dict(self.default_db_config)
if self.keyspace:
config['app']['dbname'] = self.dbname
config['repl']['dbname'] = self.dbname
config['repl'].update(repl_extra_flags)
for key1 in config:
for key2 in config[key1]:
args.extend(['-db-config-' + key1 + '-' + key2, config[key1][key2]])
def get_status(self):
return utils.get_status(self.port)
def get_healthz(self):
return urllib2.urlopen('http://localhost:%d/healthz' % self.port).read()
def kill_vttablet(self, wait=True):
logging.debug('killing vttablet: %s, wait: %s', self.tablet_alias,
str(wait))
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.terminate()
if wait:
self.proc.wait()
self.proc = None
def hard_kill_vttablet(self):
logging.debug('hard killing vttablet: %s', self.tablet_alias)
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.kill()
self.proc.wait()
self.proc = None
def wait_for_binlog_server_state(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for binlog state %s' %
expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'UpdateStreamState' not in v:
logging.debug(
' vttablet not exporting BinlogServerState, waiting...')
else:
s = v['UpdateStreamState']
if s != expected:
logging.debug(" vttablet's binlog server in state %s != %s", s,
expected)
else:
break
timeout = utils.wait_step(
'waiting for binlog server state %s' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog service is in state %s',
self.tablet_alias, expected)
def wait_for_binlog_player_count(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for binlog count %s' %
expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'BinlogPlayerMapSize' not in v:
logging.debug(
' vttablet not exporting BinlogPlayerMapSize, waiting...')
else:
s = v['BinlogPlayerMapSize']
if s != expected:
logging.debug(" vttablet's binlog player map has count %d != %d",
s, expected)
else:
break
timeout = utils.wait_step(
'waiting for binlog player count %d' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog player has %d players',
self.tablet_alias, expected)
@classmethod
def check_vttablet_count(klass):
if Tablet.tablets_running > 0:
raise utils.TestError('This test is not killing all its vttablets')
def execute(self, sql, bindvars=None, transaction_id=None, auto_log=True):
"""execute uses 'vtctl VtTabletExecute' to execute a command.
"""
args = [
'VtTabletExecute',
'-keyspace', self.keyspace,
'-shard', self.shard,
]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if transaction_id:
args.extend(['-transaction_id', str(transaction_id)])
args.extend([self.tablet_alias, sql])
return utils.run_vtctl_json(args, auto_log=auto_log)
def begin(self, auto_log=True):
"""begin uses 'vtctl VtTabletBegin' to start a transaction.
"""
args = [
'VtTabletBegin',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
]
result = utils.run_vtctl_json(args, auto_log=auto_log)
return result['transaction_id']
def commit(self, transaction_id, auto_log=True):
"""commit uses 'vtctl VtTabletCommit' to commit a transaction.
"""
args = [
'VtTabletCommit',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def rollback(self, transaction_id, auto_log=True):
"""rollback uses 'vtctl VtTabletRollback' to rollback a transaction.
"""
args = [
'VtTabletRollback',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def kill_tablets(tablets):
for t in tablets:
logging.debug('killing vttablet: %s', t.tablet_alias)
if t.proc is not None:
Tablet.tablets_running -= 1
t.proc.terminate()
for t in tablets:
if t.proc is not None:
t.proc.wait()
t.proc = None
|
|
from wq.build import wq
import click
import os
import json
import logging
from wq.build.commands.collect import readfiles
@wq.command()
@wq.pass_config
def optimize(config):
"""
(DEPRECATED) Use r.js to optimize JS and CSS. This command requires an
"optimize" section in your configuration file, which will be passed to
r.js for compilation. See http://requirejs.org/docs/optimization.html
for available options.
Note that r.js-based compilation is deprecated and will be removed in
wq.app 2.0. For full control over the compilation process, use
`wq start --with-npm` instead.
"""
try:
import requirejs
except ImportError:
raise NotInstalled('requirejs')
conf = config.get('optimize', None)
if not conf:
raise click.UsageError(
"optimize section not found in %s" % config.filename
)
# Defer to r.js for actual processing
click.echo("Optimizing with r.js...")
try:
requirejs.optimize(conf)
except requirejs.RJSException as e:
raise click.ClickException(e.args[0])
click.echo("Optimization complete")
@wq.command()
@wq.pass_config
def babel(config):
"""
(DEPRECATED) Use babel.js with ES6/2015+. Generates ES5-compatible
JavaScript for older browsers. Note that wq babel is run after
wq optimize, on the compiled modules created by r.js. For more control
over the compilation process, use `wq start --with-npm` instead of
an r.js-based build.
Note that this command will be removed in wq.app 2.0 in favor of
`wq start --with-npm`.
"""
try:
from babeljs import transformer as babeljs
except ImportError:
raise NotInstalled('PyBabeljs')
rconf = config.get('optimize', None)
if not rconf:
raise click.UsageError(
"optimize section not found in %s" % config.filename
)
babel = config.get('babel', {})
files = []
if 'modules' in rconf and 'dir' in rconf:
base_url = rconf.get('baseUrl', '.')
for module in rconf['modules']:
path = module['name']
if path in rconf.get('paths', {}):
path = rconf['paths'][path]
path = os.path.join(rconf['dir'], base_url, path)
files.append(path + '.js')
for filename in files:
label = os.path.normpath(filename)
try:
with open(filename) as f:
content = f.read()
except OSError:
raise click.ClickException(
"Error loading %s - run wq optimize first?" % label
)
try:
print("Transforming %s with Babel..." % label)
output = babeljs.transform_string(content, **babel)
except babeljs.TransformError as e:
raise click.ClickException(e.args[0])
with open(filename, 'w') as f:
f.write(output)
@wq.command()
@click.option(
'--indir', type=click.Path(exists=True), default="scss",
help="Path to SCSS/SASS files"
)
@click.option(
'--outdir', type=click.Path(exists=True), default="css",
help="Path to CSS files"
)
def scss(**conf):
"""
(DEPRECATED) Render SCSS/SASS into CSS. The input folder will be searched
for *.scss files, which will be compiled to corresponding *.css files in
the output directory.
Note: This command will be removed in wq.app 2.0 in favor of
Material UI themes.
"""
try:
import scss as pyScss
except ImportError:
raise NotInstalled("pyScss")
compiler = pyScss.Scss(scss_opts={'compress': 0})
logging.getLogger("scss").addHandler(logging.StreamHandler())
def compile(path, source):
css = compiler.compile(source)
outfile = open(path, 'w')
outfile.write(css)
outfile.close()
files = readfiles(conf['indir'], "scss")
pyScss.config.LOAD_PATHS = [
conf['indir'],
os.path.join(conf['indir'], 'lib'),
# FIXME: Why aren't these paths automatically picked up on Windows?
os.path.join(conf['indir'], 'lib', 'compass'),
os.path.join(conf['indir'], 'lib', 'compass', 'css3'),
]
for name, source in files.items():
if isinstance(source, dict):
continue
path = "%s/%s.css" % (conf['outdir'], name)
compile(path, source)
click.echo("%s compiled from %s/%s.scss" % (path, conf['indir'], name))
class path_or_dict:
def __init__(self, value):
self.value = value
if isinstance(value, dict):
self.is_dict = True
else:
self.is_dict = False
assert isinstance(value, str)
@wq.command()
@click.option('--template', help="Path to template")
@click.option('--partials', help="Path to partials",
type=path_or_dict)
@click.option('--context', help="Path to context (JSON or YAML)",
type=path_or_dict)
@click.option(
'--output', type=click.Path(), default="output.html",
help="Output filename"
)
def mustache(**conf):
"""
(DEPRECATED) Render mustache into HTML files. The template context can be
provided via a nexted object in wq.yml, or by pointing to a folder
containing JSON or YAML files. Similarly, the partials can be defined as a
nested object in wq.yml or by a folder path.
Example YAML configuration:
\b
mustache:
template: "<html><body>{{>header}}{{>footer}}</body></html>"
partials:
header: "<h3>{{title}}</h3>"
footer: "<a href='mailto:{{email}}'>{{email}}</a>"
context:
title: "Example"
email: "email@example.com"
output: index.html
Example command line configuration:
wq mustache --template tmpl.html --partials partials/ --context conf/
Note: This command will be removed in wq.app 2.0 in favor of JSX.
"""
try:
import pystache
except ImportError:
raise NotInstalled('pystache')
template = conf['template']
if template is None:
return
if os.path.exists(template) or template.endswith('.html'):
try:
template = open(template).read()
except IOError as e:
raise click.FileError(template, hint=str(e))
context_arg = conf["context"] or path_or_dict({})
if context_arg.is_dict:
context = context_arg.value
else:
if context_arg.value.startswith('{'):
context = json.loads(context_arg.value)
else:
path = context_arg.value
context = readfiles(path, "yaml", "yml")
context.update(**readfiles(path, "json"))
partials_arg = conf['partials'] or path_or_dict({})
if partials_arg.is_dict:
partials = partials_arg.value
else:
partials = readfiles(partials_arg.value, "html")
click.echo("Generating %s from %s" % (conf['output'], conf['template']))
renderer = pystache.Renderer(partials=partials)
html = renderer.render(template, context)
f = open(conf['output'], 'w')
f.write(html)
f.close()
class NotInstalled(click.ClickException):
def __init__(self, dep):
super().__init__(
"Could not find {}. Install compat dependencies via:"
"\n pip install wq.app[compat]".format(dep)
)
|
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <patrick@netflix.com>
"""
import stat
### VERSION ###
__version__ = '0.9.3'
### FLASK ###
from flask import Flask
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__, static_url_path='/static')
# If SECURITY_MONKEY_SETTINGS is set, then use that.
# Otherwise, use env-config/config.py
if os.environ.get('SECURITY_MONKEY_SETTINGS'):
app.config.from_envvar('SECURITY_MONKEY_SETTINGS')
else:
# find env-config/config.py
from os.path import dirname, join, isfile
path = dirname(dirname(__file__))
path = join(path, 'env-config')
path = join(path, 'config.py')
if isfile(path):
app.config.from_pyfile(path)
else:
print('PLEASE SET A CONFIG FILE WITH SECURITY_MONKEY_SETTINGS OR PUT ONE AT env-config/config.py')
exit(-1)
"""
Govcloud works in the following way.
If the AWS_GOVCLOUD configuration is set to True:
the arn prefix is set to: arn:aws-us-gov:...
and the default region is set to: us-gov-west-1
else:
the arn prefix is set to: arn:aws:...
and the default region is set to: us-east-1
"""
ARN_PARTITION = 'aws'
AWS_DEFAULT_REGION = 'us-east-1'
if app.config.get("AWS_GOVCLOUD"):
ARN_PARTITION = 'aws-us-gov'
AWS_DEFAULT_REGION = 'us-gov-west-1'
ARN_PREFIX = 'arn:' + ARN_PARTITION
db = SQLAlchemy(app)
# For ELB and/or Eureka
@app.route('/healthcheck')
def healthcheck():
return 'ok'
### Flask Mail ###
from flask_mail import Mail
mail = Mail(app=app)
from security_monkey.common.utils import send_email as common_send_email
### Flask-WTF CSRF Protection ###
from flask_wtf.csrf import CSRFProtect, CSRFError
csrf = CSRFProtect()
csrf.init_app(app)
@app.errorhandler(CSRFError)
def csrf_error(reason):
app.logger.debug("CSRF ERROR: {}".format(reason))
return render_template('csrf_error.json', reason=reason), 400
from security_monkey.datastore import User, Role
### Flask-Security ###
from flask_security.core import Security
from flask_security.datastore import SQLAlchemyUserDatastore
# Flask-Security Custom Form
# Implementing DoD Compliance for Password
from flask_security.forms import ChangePasswordForm, PasswordField, Required, validators, ValidatorMixin
from security_monkey.datastore import UserPasswordHistory
from flask_security import current_user
from flask_security.utils import verify_password
class Regexp(ValidatorMixin, validators.regexp):
pass
password_dod_compliance_message = """
Password must be atleast 12 characters and must have
- two lowercase letters
- two uppercase letters
- two numbers
- two special characters
(e.g., 3mP@gD2!c2nyt)
"""
app.config['SECURITY_CHANGEABLE'] = True
app.config['SECURITY_MSG_PASSWORD_MISSING_DOD_COMPLIANCE'] = (password_dod_compliance_message, 'error')
app.config['SECURITY_MSG_PASSWORD_PREVENT_PASSWORD_REUSE'] = (
'You must not reuse any of your previous 24 passwords', 'error')
password_required = Required(message='PASSWORD_NOT_PROVIDED')
password_dod_compliance = Regexp(
regex='^(?=(?:.*[A-Z]){2,})(?=(?:.*[a-z]){2,})(?=(?:.*\d){2,})(?=(?:.*[!@#$%^&*()\-_=+{};:,<.>]){2,})(?!.*(.)\1{2})([A-Za-z0-9!@#$%^&*()\-_=+{};:,<.>]{12,30})',
message='PASSWORD_MISSING_DOD_COMPLIANCE')
# Extend Forms
class ExtendedChangePasswordForm(ChangePasswordForm):
new_password = PasswordField('Password', validators=[password_required, password_dod_compliance])
def validate(self):
if not super(ExtendedChangePasswordForm, self).validate():
return False
hashpw_recs = UserPasswordHistory.query.filter(UserPasswordHistory.user_id == current_user.id).order_by(
UserPasswordHistory.changed_at.desc()).limit(24).all()
for rec in hashpw_recs:
if verify_password(self.new_password.data, rec.password):
self.password.errors.append(app.config['SECURITY_MSG_PASSWORD_PREVENT_PASSWORD_REUSE'][0])
return False
return True
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore, change_password_form=ExtendedChangePasswordForm)
@security.send_mail_task
def send_email(msg):
"""
Overrides the Flask-Security/Flask-Mail integration
to send emails out via boto and ses.
"""
common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)
from auth.modules import RBAC
rbac = RBAC(app=app)
from flask_security.views import login, logout, register, confirm_email, reset_password, forgot_password, \
change_password, send_confirmation
rbac.exempt(login)
rbac.exempt(logout)
rbac.exempt(register)
rbac.exempt(confirm_email)
rbac.exempt(send_confirmation)
rbac.exempt(reset_password)
rbac.exempt(forgot_password)
rbac.exempt(change_password)
rbac.exempt(healthcheck)
### Sentry definition ###
sentry = None
### FLASK API ###
from flask_restful import Api
api = Api(app, decorators=[csrf.exempt])
from security_monkey.views.account import AccountGetPutDelete
from security_monkey.views.account import AccountPostList
api.add_resource(AccountGetPutDelete, '/api/1/accounts/<int:account_id>')
api.add_resource(AccountPostList, '/api/1/accounts')
from security_monkey.views.distinct import Distinct
api.add_resource(Distinct, '/api/1/distinct/<string:key_id>')
from security_monkey.views.ignore_list import IgnoreListGetPutDelete
from security_monkey.views.ignore_list import IgnorelistListPost
api.add_resource(IgnoreListGetPutDelete, '/api/1/ignorelistentries/<int:item_id>')
api.add_resource(IgnorelistListPost, '/api/1/ignorelistentries')
from security_monkey.views.item import ItemList
from security_monkey.views.item import ItemGet
api.add_resource(ItemList, '/api/1/items')
api.add_resource(ItemGet, '/api/1/items/<int:item_id>')
from security_monkey.views.item_comment import ItemCommentPost
from security_monkey.views.item_comment import ItemCommentDelete
from security_monkey.views.item_comment import ItemCommentGet
api.add_resource(ItemCommentPost, '/api/1/items/<int:item_id>/comments')
api.add_resource(ItemCommentDelete, '/api/1/items/<int:item_id>/comments/<int:comment_id>')
api.add_resource(ItemCommentGet, '/api/1/items/<int:item_id>/comments/<int:comment_id>')
from security_monkey.views.item_issue import ItemAuditGet
from security_monkey.views.item_issue import ItemAuditList
api.add_resource(ItemAuditList, '/api/1/issues')
api.add_resource(ItemAuditGet, '/api/1/issues/<int:audit_id>')
from security_monkey.views.item_issue_justification import JustifyPostDelete
api.add_resource(JustifyPostDelete, '/api/1/issues/<int:audit_id>/justification')
from security_monkey.views.logout import Logout
api.add_resource(Logout, '/api/1/logout')
from security_monkey.views.revision import RevisionList
from security_monkey.views.revision import RevisionGet
api.add_resource(RevisionList, '/api/1/revisions')
api.add_resource(RevisionGet, '/api/1/revisions/<int:revision_id>')
from security_monkey.views.revision_comment import RevisionCommentPost
from security_monkey.views.revision_comment import RevisionCommentGet
from security_monkey.views.revision_comment import RevisionCommentDelete
api.add_resource(RevisionCommentPost, '/api/1/revisions/<int:revision_id>/comments')
api.add_resource(RevisionCommentGet, '/api/1/revisions/<int:revision_id>/comments/<int:comment_id>')
api.add_resource(RevisionCommentDelete, '/api/1/revisions/<int:revision_id>/comments/<int:comment_id>')
from security_monkey.views.user_settings import UserSettings
api.add_resource(UserSettings, '/api/1/settings')
from security_monkey.views.users import UserList, Roles, UserDetail
api.add_resource(UserList, '/api/1/users')
api.add_resource(UserDetail, '/api/1/users/<int:user_id>')
api.add_resource(Roles, '/api/1/roles')
from security_monkey.views.whitelist import WhitelistGetPutDelete
from security_monkey.views.whitelist import WhitelistListPost
api.add_resource(WhitelistGetPutDelete, '/api/1/whitelistcidrs/<int:item_id>')
api.add_resource(WhitelistListPost, '/api/1/whitelistcidrs')
from security_monkey.views.auditor_settings import AuditorSettingsGet
from security_monkey.views.auditor_settings import AuditorSettingsPut
api.add_resource(AuditorSettingsGet, '/api/1/auditorsettings')
api.add_resource(AuditorSettingsPut, '/api/1/auditorsettings/<int:as_id>')
from security_monkey.views.account_config import AccountConfigGet
api.add_resource(AccountConfigGet, '/api/1/account_config/<string:account_fields>')
from security_monkey.views.audit_scores import AuditScoresGet
from security_monkey.views.audit_scores import AuditScoreGetPutDelete
api.add_resource(AuditScoresGet, '/api/1/auditscores')
api.add_resource(AuditScoreGetPutDelete, '/api/1/auditscores/<int:id>')
from security_monkey.views.tech_methods import TechMethodsGet
api.add_resource(TechMethodsGet, '/api/1/techmethods/<string:tech_ids>')
from security_monkey.views.account_pattern_audit_score import AccountPatternAuditScoreGet
from security_monkey.views.account_pattern_audit_score import AccountPatternAuditScorePost
from security_monkey.views.account_pattern_audit_score import AccountPatternAuditScoreGetPutDelete
api.add_resource(AccountPatternAuditScoreGet, '/api/1/auditscores/<int:auditscores_id>/accountpatternauditscores')
api.add_resource(AccountPatternAuditScorePost, '/api/1/accountpatternauditscores')
api.add_resource(AccountPatternAuditScoreGetPutDelete, '/api/1/accountpatternauditscores/<int:id>')
from security_monkey.views.account_bulk_update import AccountListPut
api.add_resource(AccountListPut, '/api/1/accounts_bulk/batch')
from security_monkey.views.watcher_config import WatcherConfigGetList
from security_monkey.views.watcher_config import WatcherConfigPut
api.add_resource(WatcherConfigGetList, '/api/1/watcher_config')
api.add_resource(WatcherConfigPut, '/api/1/watcher_config/<int:id>')
# Start: Inherit from webui-threatalert-branding by Pritam
# Get a List of POA&M Items
from security_monkey.views.poam import POAMItemList
api.add_resource(POAMItemList, '/api/1/poamitems')
# Vulnerabilities By Technology Chart Data
from security_monkey.views.charts import VulnerabilitiesByTech
api.add_resource(VulnerabilitiesByTech, '/api/1/vulnbytech')
# Vulnerabilities By Severity Chart Data
from security_monkey.views.charts import VulnerabilitiesBySeverity
api.add_resource(VulnerabilitiesBySeverity, '/api/1/vulnbyseverity')
# GuardDutyEvent Data -> WorldMap Data API
from security_monkey.views.guard_duty_event import GuardDutyEventMapPointsList
api.add_resource(GuardDutyEventMapPointsList, '/api/1/worldmapguarddutydata')
# GuardDutyEvent Data -> Top 10 Countries List
from security_monkey.views.guard_duty_event import GuardDutyEventTop10Countries
api.add_resource(GuardDutyEventTop10Countries, '/api/1/top10countryguarddutydata')
# Get Issue count over Time for Time Series Graph
from security_monkey.views.charts import IssuesCountByMonth
api.add_resource(IssuesCountByMonth, '/api/1/issuescountbymonth')
# End: Inherit from webui-threatalert-branding by Pritam
# Start: Inherit from Develop Branch
from security_monkey.views.guard_duty_event import GuardDutyEventService
api.add_resource(GuardDutyEventService, '/api/1/gde')
# End: Inherit from Develop Branch
# Start: Anchore-Engine Configuration Management API
from security_monkey.views.anchoreconfig import AnchoreGetPutDelete
from security_monkey.views.anchoreconfig import AnchorePostList
api.add_resource(AnchoreGetPutDelete, '/api/1/anchoreconfig/<int:anchore_id>')
api.add_resource(AnchorePostList, '/api/1/anchoreconfig')
# End: Anchore-Engine Configuration Management API
## Jira Sync
from security_monkey.jirasync import JiraSync
jirasync_file = os.environ.get('SECURITY_MONKEY_JIRA_SYNC')
if jirasync_file:
try:
jirasync = JiraSync(jirasync_file)
except Exception as e:
app.logger.error(repr(e))
jirasync = None
else:
jirasync = None
# Blueprints
from security_monkey.sso.views import mod as sso_bp
from security_monkey.export import export_blueprint
BLUEPRINTS = [sso_bp, export_blueprint]
for bp in BLUEPRINTS:
app.register_blueprint(bp, url_prefix="/api/1")
# Logging
import sys
from logging import Formatter, handlers
from logging.handlers import RotatingFileHandler
from logging import StreamHandler
from logging.config import dictConfig
from logging import DEBUG
# Use this handler to have log rotator give newly minted logfiles +gw perm
class GroupWriteRotatingFileHandler(handlers.RotatingFileHandler):
def doRollover(self):
"""
Override base class method to make the new log file group writable.
"""
# Rotate the file first.
handlers.RotatingFileHandler.doRollover(self)
# Add group write to the current permissions.
try:
currMode = os.stat(self.baseFilename).st_mode
os.chmod(self.baseFilename, currMode | stat.S_IWGRP)
except OSError:
pass
handlers.GroupWriteRotatingFileHandler = GroupWriteRotatingFileHandler
def setup_logging():
"""
Logging in security_monkey can be configured in two ways.
1) Vintage: Set LOG_FILE and LOG_LEVEL in your config.
LOG_FILE will default to stderr if no value is supplied.
LOG_LEVEL will default to DEBUG if no value is supplied.
LOG_LEVEL = "DEBUG"
LOG_FILE = "/var/log/security_monkey/securitymonkey.log"
2) Set LOG_CFG in your config to a PEP-0391 compatible
logging configuration.
LOG_CFG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
}
},
'handlers': {
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'formatter': 'standard',
'filename': '/var/log/security_monkey/securitymonkey.log',
'maxBytes': 10485760,
'backupCount': 100,
'encoding': 'utf8'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'standard',
'stream': 'ext://sys.stdout'
}
},
'loggers': {
'security_monkey': {
'handlers': ['file', 'console'],
'level': 'DEBUG'
},
'apscheduler': {
'handlers': ['file', 'console'],
'level': 'INFO'
}
}
}
"""
if not app.debug:
if app.config.get('LOG_CFG'):
# initialize the Flask logger (removes all handlers)
_ = app.logger
dictConfig(app.config.get('LOG_CFG'))
else:
# capability with previous config settings
# Should have LOG_FILE and LOG_LEVEL set
if app.config.get('LOG_FILE') is not None:
handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100)
else:
handler = StreamHandler(stream=sys.stderr)
handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(app.config.get('LOG_LEVEL', DEBUG))
app.logger.addHandler(handler)
setup_logging()
### Sentry ###
try:
from raven.contrib.flask import Sentry
sentry = Sentry()
sentry.init_app(app)
except ImportError as e:
app.logger.debug('Sentry not installed, skipping...')
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User land driver for the spacemouse device.
This program iterates through all the USB devices, printing out their VID
and PIDs. If it finds a 3DConnexion device, it will say so! And if it finds a
SpaceNavigator, it will connect and wait for you to start moving the mouse or
pressing buttons.
# VID and PID from https://3dconnexion.com/faq/27
"""
import binascii
import logging
import multiprocessing as mp
import queue
import time
from typing import Any, List, Optional, Tuple
# Try importing usb1. But don't crash if the library is not installed.
try:
# pylint:disable=g-import-not-at-top
import usb1 # type: ignore
_have_usb = True
except ModuleNotFoundError:
_have_usb = False
_VID_3DCONNEXION_OLD = 0x046D # From when 3DConnexion was a Logitech division
_VID_3DCONNEXION = 0x256F
_ALL_THE_3DCONNEXION_THINGS = [
{
"pid": 0xC62E,
"name": "SpaceMouse Wireless Receiver (Cabled)"
},
{
"pid": 0xC62F,
"name": "SpaceMouse Wireless Receiver"
},
{
"pid": 0xC631,
"name": "SpaceMouse Pro Wireless Receiver (Cabled)"
},
{
"pid": 0xC632,
"name": "SpaceMouse Pro Wireless Receiver"
},
{
"pid": 0xC633,
"name": "SpaceMouse Enterprise"
},
{
"pid": 0xC635,
"name": "SpaceMouse Compact"
},
{
"pid": 0xC650,
"name": "CadMouse"
},
{
"pid": 0xC651,
"name": "CadMouse Wireless"
},
{
"pid": 0xC652,
"name": "Universal Receiver"
},
{
"pid": 0xC654,
"name": "CadMouse Pro Wireless"
},
{
"pid": 0xC657,
"name": "CadMouse Pro Wireless Left"
},
]
_ALL_THE_LOGITECH_THINGS = [
{
"pid": 0xC603,
"name": "SpaceMouse Plus USB"
},
{
"pid": 0xC605,
"name": "CadMan"
},
{
"pid": 0xC606,
"name": "SpaceMouse Classic USB"
},
{
"pid": 0xC623,
"name": "SpaceBall 5000 USB"
},
{
"pid": 0xC623,
"name": "SpaceTraveler"
},
{
"pid": 0xC625,
"name": "SpacePilot"
},
{
"pid": 0xC626,
"name": "SpaceNavigator",
"leds": [0x08, 0x4B]
},
{
"pid": 0xC627,
"name": "SpaceExplorer"
},
{
"pid": 0xC628,
"name": "SpaceNavigator For Notebooks"
},
{
"pid": 0xC629,
"name": "SpacePilot Pro"
},
{
"pid": 0xC62B,
"name": "SpaceMouse Pro"
},
]
def _show_info(handle: "usb1.USBDeviceHandle") -> None:
manufacturer = handle.getManufacturer()
name = handle.getProduct()
sn = handle.getSerialNumber()
print(f" {manufacturer} {name}: S/N {sn}")
def _show_device_info(device: "usb1.USBDevice") -> None:
"""Print the information of a USB device.
Args:
device: the USB device.
"""
print(f" == {device}")
print(f" Num configurations: {device.getNumConfigurations()}")
for c in device.iterConfiguations():
print(f" Configuration value {c.getConfigurationValue()}")
print(f" Number of interfaces {c.getNumInterfaces()}")
for i in c:
print(f" Number of settings: {i.getNumSettings()}")
for s in i:
print(f" Setting number {s.getNumber()}")
print(f" Number of endpoints {s.getNumEndpoints()}")
print(f" Class/Subclass: {s.getClass()}/{s.getSubClass()}")
hid_data = s.getExtra()
print(f" HID num extra descriptors: {len(hid_data)}")
# The data is a standard USB descriptor:
# bLength: 09
# bDescriptorType: 21 (HID)
# bcdHID: 1.11
# bCountryCode: 0
# bDescriptorType[0]: 22 (HID)
# wDescriptorLength[0]: 00D9 (217)
print(f" Data: {binascii.hexlify(hid_data[0]).decode('utf-8')}")
def _to_int_16(data: bytes) -> int:
# Little endian
x = (data[1] << 8) | data[0]
if x > 0x7FFF:
return x - 0x10000
return x
RSPNAV_EVENT_ANY = 0
RSPNAV_EVENT_MOTION = 1
RSPNAV_EVENT_BUTTON = 2
_rspnav_qs: List["queue.Queue[RSpnavEvent]"] = []
_rspnav_processes = []
class RSpnavEvent(object):
"""A spacemouse event."""
def __init__(self, device: int, ev_type: int) -> None:
"""A spacemouse event.
Args:
device: the device id.
ev_type: the event type, RSPNAV_EVENT_MOTION or RSPNAV_EVENT_BUTTON.
"""
self.device: int = device
self.ev_type: int = ev_type
class RSpnavButtonEvent(RSpnavEvent):
"""Button event."""
def __init__(self, device: int, bnum: int, press: bool) -> None:
"""Returns a new RspnavButtonEvent.
Args:
device: the USB device.
bnum: the button number (0, 1, ...).
press: True if the button was pressed, False if released.
"""
super().__init__(device, RSPNAV_EVENT_BUTTON)
self.bnum: int = bnum
self.press: bool = press
class RSpnavMotionEvent(RSpnavEvent):
"""A motion event."""
def __init__(self, device: int, translation: Tuple[int, int, int],
rotation: Tuple[int, int, int], period: Any) -> None:
"""A motion event.
Args:
device: the USB device.
translation: 3-tuple of translation force ints (Y, Z, X)
rotation: 3-tuple of rotation torque ints (Ry, Rz, Rx)
period: is unknown, and is present only for compatibility with spnav.
"""
super().__init__(device, RSPNAV_EVENT_MOTION)
# +Y is towards the mouse cable, and +Z is up.
# Counterintuitively, translation and rotation are X Y Z.
self.translation: Tuple[int, int, int] = (translation[0], translation[1],
translation[2])
self.rotation: Tuple[int, int,
int] = (rotation[0], rotation[1], rotation[2])
self.period: Any = period
def _rspnav_hexdump(bs: bytes) -> str:
hex_string = str(binascii.hexlify(bs), "ascii")
return " ".join(hex_string[i:i + 2] for i in range(0, len(hex_string), 2))
def _scan_for_spnav() -> List[int]:
"""Scans USB devices.
Returns:
The list of ids for the discovered USB devices.
"""
found: List[int] = []
if not _have_usb:
return found
with usb1.USBContext() as context:
devices = context.getDeviceList()
i = 0
for device in devices:
vid = device.getVendorID()
pid = device.getProductID()
print(f"VID {vid:04X} PID {pid:04X}")
if vid == _VID_3DCONNEXION_OLD:
for d in _ALL_THE_LOGITECH_THINGS:
if d["pid"] == pid:
name = d["name"]
print(f" it's a {name}")
if vid == _VID_3DCONNEXION:
for d in _ALL_THE_3DCONNEXION_THINGS:
if d["pid"] == pid:
name = d["name"]
print(f" it's a {name}")
# Make sure cabled takes precedence over universal receiver so that
# the universal receiver can be kept plugged in while using the cabled
# version.
if vid == _VID_3DCONNEXION and pid == 0xC635:
# SpaceMouse Compact
found.append(i)
elif vid == _VID_3DCONNEXION and pid == 0xC62E:
# SpaceMouse Wireless (Cabled)
found.append(i)
elif vid == _VID_3DCONNEXION and pid == 0xC652:
# Universal Receiver
found.append(i)
elif vid == _VID_3DCONNEXION_OLD and pid == 0xC626:
# SpaceNavigator
found.append(i)
i += 1
return found
def _interpret_space_navigator(device_num: int, handle: "usb1.USBDeviceHandle",
event_queue: "queue.Queue[RSpnavEvent]") -> None:
"""Processing space navigator events.
This functions loops on the provided USB device handle, reads and enqueues
every event.
Args:
device_num: the USB device id.
handle: the handle to the USB device.
event_queue: spacemouse event queue.
"""
last_buttons = 0
last_translate = None
last_rotate = None
num_buttons = 2
while True:
data = handle.interruptRead(1, 16) # endpoint and length, no timeout
logging.debug(_rspnav_hexdump(data))
if data[0] == 0x01: # translate
x = _to_int_16(data[1:])
y = -_to_int_16(data[3:])
z = -_to_int_16(data[5:])
# print(f"X {x} Y {y} Z {z}")
last_translate = (x, y, z)
if last_rotate is not None:
event_queue.put(
RSpnavMotionEvent(device_num, last_translate, last_rotate, 0))
elif data[0] == 0x02: # rotate
x = _to_int_16(data[1:])
y = -_to_int_16(data[3:])
z = -_to_int_16(data[5:])
# print(f"RX {x} RY {y} RZ {z}")
last_rotate = (x, y, z)
if last_translate is not None:
event_queue.put(
RSpnavMotionEvent(device_num, last_translate, last_rotate, 0))
elif data[0] == 0x03: # buttons
press_mask = _to_int_16(data[1:])
# print(f"Button mask {press_mask:02X}")
for i in range(num_buttons):
bit = press_mask & (1 << i)
if bit != (last_buttons & (1 << i)):
event_queue.put(RSpnavButtonEvent(device_num, i, bit != 0))
last_buttons = press_mask
else:
print(f" unknown event: {_rspnav_hexdump(data)}")
def _interpret_space_mouse_wireless(
device_num: int, handle: "usb1.USBDeviceHandle", endpoint: int,
event_queue: "queue.Queue[RSpnavEvent]") -> None:
"""Processing space mouse wireless events.
This functions loops on the provided USB device handle, reads and enqueues
every event.
Args:
device_num: the USB device id.
handle: the handle to the USB device.
endpoint: the end point number.
event_queue: spacemouse event queue.
"""
last_buttons = 0
last_translate = None
last_rotate = None
num_buttons = 2
while True:
data = handle.interruptRead(endpoint, 16) # endpoint and length, no timeout
logging.debug(_rspnav_hexdump(data))
if data[0] == 0x01: # translate + rotate
x = _to_int_16(data[1:])
y = -_to_int_16(data[3:])
z = -_to_int_16(data[5:])
rx = _to_int_16(data[7:])
ry = -_to_int_16(data[9:])
rz = -_to_int_16(data[11:])
# print(f"X {x} Y {y} Z {z} RX {rx} RY {ry} RZ {rz}")
last_translate = (x, y, z)
last_rotate = (rx, ry, rz)
event_queue.put(
RSpnavMotionEvent(device_num, last_translate, last_rotate, 0))
elif data[0] == 0x03: # buttons
press_mask = _to_int_16(data[1:])
# print(f"Button mask {press_mask:02X}")
for i in range(num_buttons):
bit = press_mask & (1 << i)
if bit != (last_buttons & (1 << i)):
event_queue.put(RSpnavButtonEvent(device_num, i, bit != 0))
last_buttons = press_mask
else:
print(f" unknown event: {_rspnav_hexdump(data)}")
def _start_rspnav(device_num: int, device_index: int,
event_queue: "queue.Queue[RSpnavEvent]") -> None:
"""Starting a space mouse device.
Args:
device_num: the device number.
device_index: the device index.
event_queue: event queue.
"""
print(f"Starting for device {device_num} index {device_index}")
with usb1.USBContext() as context:
devices = context.getDeviceList()
device = devices[device_index]
vid = device.getVendorID()
pid = device.getProductID()
handle = device.open()
if handle is None:
print("ERROR: failed to open device")
return
_show_device_info(device)
# _show_info(handle)
if handle.kernelDriverActive(0):
print("Detaching kernel driver")
handle.detachKernelDriver(0)
with handle.claimInterface(0):
# We don't actually use this data. I kept it here because it could be
# useful to look at the descriptor at some point.
_ = handle.controlRead(
usb1.RECIPIENT_INTERFACE,
usb1.REQUEST_GET_DESCRIPTOR,
usb1.DT_REPORT << 8,
0, # index
300, # max length
5000, # timeout (msec)
)
# Here's the real place we parse the data.
# Make sure cabled takes precedence over universal receiver so that
# the universal receiver can be kept plugged in while using the cabled
# version.
if vid == _VID_3DCONNEXION and pid == 0xC635:
# SpaceMouse Compact
_interpret_space_navigator(device_num, handle, event_queue)
elif vid == _VID_3DCONNEXION and pid == 0xC65E:
# SpaceMouse Wireless Cabled
_interpret_space_mouse_wireless(device_num, handle, 3, event_queue)
elif vid == _VID_3DCONNEXION and pid == 0xC652:
# Universal Receiver
_interpret_space_mouse_wireless(device_num, handle, 1, event_queue)
elif vid == _VID_3DCONNEXION_OLD and pid == 0xC626:
# SpaceNavigator
_interpret_space_navigator(device_num, handle, event_queue)
else:
print(f"ERROR: can't parse data for VID {vid:04X} PID {pid:04X}")
return
def rspnav_open() -> None:
"""Scans for all Space Mice and starts up a queue for events.
Raises:
RSpnavConnectionException if connection cannot be established
"""
global _rspnav_processes
global _rspnav_qs
devices = _scan_for_spnav()
print(f"Found {len(devices)} Space Navigator devices")
for i in range(len(devices)):
_rspnav_qs.append(mp.Queue())
_rspnav_processes.append(
mp.Process(target=_start_rspnav, args=(
i,
devices[i],
_rspnav_qs[i],
)))
_rspnav_processes[i].start()
def rspnav_howmany() -> int:
"""Returns the number of Space Mice found."""
return len(_rspnav_qs)
# Keep the index of which queue we last polled, so that we can
# ensure round-robin when checking all the queues.
_poll_index = 0
def _get_poll_index() -> int:
global _poll_index
global _rspnav_qs
i = _poll_index
_poll_index = (_poll_index + 1) % len(_rspnav_qs)
return i
def rspnav_wait_event() -> RSpnavEvent:
"""Blocks waiting for a Space Mouse event.
Returns:
An instance of SpnavMotionEvent or SpnavButtonEvent.
"""
e = None
while e is None:
e = rspnav_poll_event()
if e is None:
time.sleep(0.001)
return e
def rspnav_poll_event() -> Optional[RSpnavEvent]:
"""Polls for Space Navigator events.
Returns:
None if no waiting events, otherwise an instance of
SpnavMotionEvent or SpnavButtonEvent.
"""
global _rspnav_qs
for _ in range(len(_rspnav_qs)):
try:
return _rspnav_qs[_get_poll_index()].get_nowait()
except queue.Empty:
pass
return None
def rspnav_remove_events(unused_ev_type: int) -> None:
"""Removes pending Space Navigator events from all queues.
Unlike the original spnav library, this call ignores the event type, instead
removing ALL events.
Args:
unused_ev_type: event type
"""
global _rspnav_qs
for q in _rspnav_qs:
try:
while q.get_nowait() is not None:
pass
except queue.Empty:
continue
def rspnav_kill() -> None:
global _rspnav_processes
for rspnav_process in _rspnav_processes:
rspnav_process.kill()
_rspnav_processes = []
|
|
from __future__ import division
from builtins import map
from builtins import zip
from builtins import range
import ROOT
from collections import defaultdict
from operator import itemgetter
from PyAnalysisTools.base import InvalidInputError, _logger
from PyAnalysisTools.PlottingUtils import Formatting as fm
from PyAnalysisTools.PlottingUtils import HistTools as ht
from PyAnalysisTools.PlottingUtils.PlotConfig import get_draw_option_as_root_str, get_style_setters_and_values
from PyAnalysisTools.PlottingUtils.PlotConfig import get_default_plot_config
from PyAnalysisTools.base.ProcessConfig import find_process_config
import PyAnalysisTools.PlottingUtils.PlotableObject as PO
def retrieve_new_canvas(name, title='', size_x=800, size_y=600):
"""
Retrieve a new TCanvas
:param name: canvas name
:param title: canvas title
:param size_x: x size
:param size_y: y size
:return: empty canvas
"""
canvas = ROOT.TCanvas(name, title, size_x, size_y)
canvas.SetRightMargin(0.07)
ROOT.SetOwnership(canvas, False)
return canvas
def plot_obj(obj, plot_config, **kwargs):
"""
Base wrapper to plot an object. Based on object type appropriate plotting functions are called
:param obj: object to plot
:param plot_config: plot configuration
:param kwargs: additional arguments
:return: canvas with plotted object
"""
if isinstance(obj, ROOT.TH2):
return plot_2d_hist(obj, plot_config, **kwargs)
if isinstance(obj, ROOT.TH1):
return plot_hist(obj, plot_config, **kwargs)
if isinstance(obj, ROOT.TEfficiency) or isinstance(obj, ROOT.TGraph):
return plot_graph(obj, plot_config, **kwargs)
def project_hist(tree, hist, var_name, cut_string='', weight=None, is_data=False):
if cut_string is None:
cut_string = ''
if weight:
if 'MC:' in weight:
weight = weight.split('*')
mc_weights = [w for w in weight if 'MC:' in w]
for mc_w in mc_weights:
weight.remove(mc_w)
weight = '*'.join(weight)
if not is_data:
mc_weights = [mc_w.replace('MC:', '') for mc_w in mc_weights]
for mc_w in mc_weights:
weight += '* {:s}'.format(mc_w)
if 'DATA:' in weight:
weight = weight.split('*')
data_weights = [w for w in weight if 'DATA:' in w]
for data_w in data_weights:
weight.remove(data_w)
weight = '*'.join(weight)
if is_data:
data_weights = [data_w.replace('DATA:', '') for data_w in data_weights]
for data_w in data_weights:
weight += '* {:s}'.format(data_w)
if cut_string == '':
cut_string = weight
else:
cut_string = '%s * (%s)' % (weight, cut_string)
n_selected_events = tree.Project(hist.GetName(), var_name, cut_string)
_logger.debug("Selected %i events from tree %s for distribution %s and cut %s." % (n_selected_events,
tree.GetName(),
var_name,
cut_string))
if n_selected_events != hist.GetEntries():
_logger.error("No of selected events does not match histogram entries. Probably FileHandle has been "
"initialised after histogram definition has been received")
raise RuntimeError("Inconsistency in TTree::Project")
if n_selected_events == -1:
_logger.error("Unable to project {:s} from tree {:s} with cut {:s}".format(var_name, tree.GetName(),
cut_string))
raise RuntimeError("TTree::Project failed")
return hist
def plot_objects(objects, plot_config, process_configs=None):
"""
Base interface to plot multiple objects
:param objects: objects to be plotted, e.g. TH1, TEfficiency
:type objects: list or dict
:param plot_config: plot configuration
:type plot_config: PlotConfig
:param process_configs: physics processes configuration containing e.g. colors and plot styles
:type process_configs: ProcessConfig
:return: canvas with plotted objects
:rtype: TCanvas
"""
if len(objects) == 0:
_logger.warning("Requested plot objects with zero objects")
return
if isinstance(objects, dict):
first_obj = list(objects.values())[0]
elif isinstance(objects, list):
first_obj = objects[0]
if isinstance(first_obj, PO.PlotableObject):
if isinstance(first_obj.plot_object, ROOT.TH1):
return plot_histograms(objects, plot_config, process_configs)
# if isinstance(first_obj, ROOT.TEfficiency) or isinstance(first_obj, ROOT.TGraph):
# return plot_graphs(objects, plot_config)
if isinstance(first_obj, ROOT.TH1):
return plot_histograms(objects, plot_config, process_configs)
if isinstance(first_obj, ROOT.TEfficiency) or isinstance(first_obj, ROOT.TGraph):
return plot_graphs(objects, plot_config)
_logger.error("Unsupported type {:s} passed for plot_objects".format(type(list(objects.values())[0])))
def add_object_to_canvas(canvas, obj, plot_config, process_config=None, index=None):
"""
Add an object to a canvas
:param canvas: canvas to which the object should be added
:type canvas: ROOT.TCanvas
:param obj: plot object
:type obj: ROOT.TH1, ROOT.TGraph, etc
:param plot_config: plot configuration defining outline
:type plot_config: PlotConfig
:param process_config: specific process configuration (optional)
:type process_config: ProcessConfig
:param index: index of plot object in list of all plot objects to set specific styles like colors attached to
plot_config (optional)
:type index: int
:return: nothing
:rtype: None
"""
if isinstance(obj, ROOT.TH1):
add_histogram_to_canvas(canvas, obj, plot_config, process_config, index)
if isinstance(obj, ROOT.TGraphAsymmErrors) or isinstance(obj, ROOT.TEfficiency) or isinstance(obj, ROOT.TGraph):
add_graph_to_canvas(canvas, obj, plot_config)
def plot_hist(hist, plot_config, **kwargs):
kwargs.setdefault("y_max", plot_config.yscale * hist.GetMaximum())
# kwargs.setdefault("y_max", 1.1 * hist[0].GetMaximum()) - sm dev
kwargs.setdefault("index", None)
canvas = retrieve_new_canvas(plot_config.name, '', plot_config.canvas_size_x, plot_config.canvas_size_y)
canvas.cd()
ROOT.SetOwnership(hist, False)
process_config = None
draw_option = get_draw_option_as_root_str(plot_config, process_config)
hist = format_obj(hist, plot_config)
hist.Draw(draw_option)
hist.SetMarkerSize(0.7)
fm.apply_style(hist, plot_config, process_config, index=kwargs["index"])
if plot_config.ymin is not None:
fm.set_minimum_y(hist, plot_config.ymin)
if plot_config.ymax is not None:
fm.set_maximum_y(hist, plot_config.ymax)
if plot_config.logy:
hist.SetMaximum(hist.GetMaximum() * plot_config.yscale_log)
if plot_config.ymin:
hist.SetMinimum(max(0.1, plot_config.ymin))
if hist.GetMinimum() == 0.:
hist.SetMinimum(0.9)
if plot_config.normalise:
# hist.SetMinimum(0.000001)
hist.SetMinimum(0.)
fm.set_minimum_y(hist, plot_config.ymin)
canvas.SetLogy()
if plot_config.logx:
canvas.SetLogx()
ht.set_axis_labels(hist, plot_config)
canvas.Update()
return canvas
def plot_2d_hist(hist, plot_config, **kwargs):
canvas = retrieve_new_canvas(plot_config.name, plot_config.title, plot_config.canvas_size_x,
plot_config.canvas_size_y)
canvas.cd()
hist = format_obj(hist, plot_config)
ROOT.SetOwnership(hist, False)
draw_option = plot_config.draw_option
if draw_option is None:
_logger.warning("No draw option provided for TH2 for pc: {:s}. "
"Fall back to default: COLZ".format(plot_config.name))
draw_option = 'COLZ'
hist.Draw(draw_option)
if plot_config.logx:
canvas.SetLogx()
if plot_config.logy:
canvas.SetLogy()
if plot_config.logz:
canvas.SetLogz()
if plot_config.style is not None:
hist.SetMarkerStyle(plot_config.style)
canvas.SetRightMargin(0.2)
canvas.Modified()
canvas.Update()
return canvas
# todo: remove
# def fetch_process_config_old(process, process_config):
# if process is None or process_config is None:
# return None
# if process not in process_config:
# _logger.warning("Could not find process %s in process config" % process)
# return None
# return process_config[process]
def format_obj(obj, plot_config):
if isinstance(obj, ROOT.TH1):
return format_hist(obj, plot_config)
if isinstance(obj, ROOT.TGraphAsymmErrors) or isinstance(obj, ROOT.TGraph) or isinstance(obj, ROOT.TMultiGraph):
return format_hist(obj, plot_config)
if isinstance(obj, ROOT.TEfficiency):
return format_tefficiency(obj, plot_config)
_logger.warn('Could not find implementation for ', obj)
return obj
def get_title_from_plot_config(plot_config):
xtitle = None
if plot_config.xtitle is not None:
xtitle = plot_config.xtitle
if hasattr(plot_config, "unit"):
xtitle += " [" + plot_config.unit + "]"
y_title = "Entries"
if plot_config.ytitle is not None:
y_title = plot_config.ytitle
return xtitle, y_title
def format_tefficiency(obj, plot_config):
xtitle, ytitle = get_title_from_plot_config(plot_config)
if xtitle is None:
xtitle = ""
obj.SetTitle(";{:s};{:s}".format(xtitle, ytitle))
if plot_config.xmin is not None and plot_config.xmax is not None:
ROOT.gPad.Update()
obj.GetPaintedGraph().GetXaxis().SetRangeUser(plot_config.xmin, plot_config.xmax)
obj.GetPaintedGraph().Set(0)
return obj
def format_hist(hist, plot_config):
if plot_config is None:
return hist
xtitle, ytitle = get_title_from_plot_config(plot_config)
if xtitle:
fm.set_title_x(hist, xtitle)
if plot_config.xtitle_offset is not None:
fm.set_title_x_offset(hist, plot_config.xtitle_offset)
if plot_config.xtitle_size is not None:
fm.set_title_x_size(hist, plot_config.xtitle_size)
if hasattr(plot_config, "unit"):
ytitle += " / %.1f %s" % (hist.GetXaxis().GetBinWidth(0), plot_config.unit)
fm.set_title_y(hist, ytitle)
if plot_config.ytitle_offset is not None:
fm.set_title_y_offset(hist, plot_config.ytitle_offset)
if plot_config.ytitle_size is not None:
fm.set_title_y_size(hist, plot_config.ytitle_size)
if isinstance(hist, ROOT.TH2):
if plot_config.ztitle is not None:
hist.GetZaxis().SetTitle(plot_config.ztitle)
if plot_config.ztitle_offset is not None:
fm.set_title_z_offset(hist, plot_config.ztitle_offset)
if plot_config.ztitle_size is not None:
fm.set_title_z_size(hist, plot_config.ztitle_size)
if hasattr(plot_config, "rebinX") and hasattr(plot_config.rebinY):
hist = ht.rebin2D(hist, plot_config.rebinX, plot_config.rebinY)
if hasattr(plot_config, "zmin") and hasattr(plot_config, "zmax"):
fm.set_range_z(hist, plot_config.zmin, plot_config.zmax)
if plot_config.normalise:
ht.normalise(hist, plot_config.normalise_range, plot_config.norm_scale)
ymax = plot_config.yscale * hist.GetMaximum()
if plot_config.ymax is not None:
plot_config.ymax = max(plot_config.ymax, ymax)
else:
plot_config.ymax = ymax
if plot_config.rebin and not isinstance(hist, ROOT.THStack) and not plot_config.ignore_rebin:
hist = ht.rebin(hist, plot_config.rebin, plot_config.disable_bin_width_division)
ymax = plot_config.yscale*hist.GetMaximum()
if plot_config.ymax is not None:
plot_config.ymax = min(plot_config.ymax, ymax)
else:
plot_config.ymax = ymax
ROOT.SetOwnership(hist, False)
return hist
def make_graph(name, x_vals, y_vals):
"""
Create a TGraph based on x and y-values
:param name: name of graph
:type name: str
:param x_vals: x-values
:type x_vals: list
:param y_vals: y-values
:type y_vals: lists
:return: graph
:rtype: ROOT.TGraph
"""
g = ROOT.TGraph(len(x_vals))
g.SetName(name)
for i, x in enumerate(x_vals):
g.SetPoint(i, x, y_vals[i])
ROOT.SetOwnership(g, False)
return g
def plot_graphs(graphs, plot_config):
"""
Plot function for graphs
:param graphs: graphs to plot
:type graphs: list or dict of TGraphs
:param plot_config: plot configuration
:type plot_config: PlotConfig
:return: canvas with drawn graphs
:rtype: ROOT.TCanvas
"""
if isinstance(graphs, dict):
graphs = list(graphs.values())
canvas = plot_graph(graphs[0], plot_config)
for index, graph in enumerate(graphs[1:]):
add_graph_to_canvas(canvas, graph, plot_config, index+1)
return canvas
def add_signal_to_canvas(signal, canvas, plot_config, process_configs):
"""
Overlay a signal histogram to an existing canvas
:param signal: tuple of signal name, histogram
:type signal: list
:param canvas: existing canvas to which signal should be added
:type canvas: ROOT.TCanvas
:param plot_config: plot configuration
:type plot_config: PlotConfig
:param process_configs: process configuration
:type process_configs: ProcessConfig
:return: nothing
:rtype: None
"""
add_histogram_to_canvas(canvas, signal[1], plot_config, process_configs[signal[0]])
def plot_histograms(hists, plot_config, process_configs=None, switchOff=False):
"""
Plot histograms in canvas and apply styles according to plot and process configuration
:param hists: histograms to plots
:type hists: list or dict
:param plot_config: plot configuration
:type plot_config: PlotConfig
:param process_configs: process configuration
:type process_configs: ProcessConfig
:param switchOff: switch off style application (optional)
:type switchOff: bool
:return: canvas with plotted histograms
:rtype: ROOT.TCanvas
"""
if plot_config is None:
plot_config = get_default_plot_config(hists[0])
canvas = retrieve_new_canvas(plot_config.name, '', plot_config.canvas_size_x, plot_config.canvas_size_y)
canvas.cd()
is_first = True
max_y = None
if isinstance(hists, dict):
hist_defs = list(hists.items())
# import re
# hist_defs.sort(key=lambda s: int(re.findall('\d+', s[0])[0]))
elif isinstance(hists, list):
hist_defs = list(zip([None] * len(hists), hists))
if isinstance(hist_defs[0][1], PO.PlotableObject):
if not switchOff and not isinstance(hist_defs[0][1].plot_object, ROOT.TH2):
max_y = 1.4 * max([item[1].plot_object.GetMaximum() for item in hist_defs])
elif isinstance(hist_defs[0][1].plot_object, ROOT.TH2):
max_y = plot_config.ymax
# if plot_config.ordering is not None:
# sorted(hist_defs, key=lambda k: plot_config.ordering.index(k[0]))
for process, hist in hist_defs:
hist.plot_object = format_hist(hist.plot_object, plot_config)
process_config = find_process_config(process, process_configs)
if not (plot_config.is_set_to_value("ignore_style", True)) and \
plot_config.is_set_to_value("ignore_style", False):
setattr(plot_config, 'draw', hist.draw_option)
draw_option = get_draw_option_as_root_str(plot_config, process_config)
else:
draw_option = "hist"
if not is_first and "same" not in draw_option:
draw_option += "sames"
hist.plot_object.Draw(draw_option)
fm.apply_style_plotableObject(hist)
if is_first:
if isinstance(hist.plot_object, ROOT.TH2) and draw_option.lower() == "colz":
canvas.SetRightMargin(0.15)
fm.set_minimum_y(hist.plot_object, plot_config.ymin)
if plot_config.logz:
canvas.SetLogz()
if switchOff:
fm.set_maximum_y(hist.plot_object, plot_config.ymax)
else:
fm.set_maximum_y(hist.plot_object, max_y)
fm.set_minimum_y(hist.plot_object, plot_config.ymin)
if plot_config.xmin and not plot_config.xmax:
fm.set_minimum(hist.plot_object, plot_config.xmin, "x")
elif plot_config.xmin and plot_config.xmax:
fm.set_range(hist.plot_object, plot_config.xmin, plot_config.xmax, "x")
if plot_config.logx:
canvas.SetLogx()
# format_hist(hist.plot_object, plot_config)
if not isinstance(hist.plot_object, ROOT.TH2):
if plot_config.ymax:
hist.plot_object.SetMaximum(plot_config.ymax)
else:
hist.plot_object.SetMaximum(hist.plot_object.GetMaximum() * 1.2)
if plot_config.logy:
hist.plot_object.SetMaximum(hist.plot_object.GetMaximum() * 100.)
if plot_config.ymin > 0.:
hist.plot_object.SetMinimum(plot_config.ymin)
# if hasattr(plot_config, "ymin"):
# hist.plot_object.SetMinimum(max(0.1, plot_config.ymin))
else:
hist.plot_object.SetMinimum(0.9)
if hist.plot_object.GetMinimum() == 0.:
hist.plot_object.SetMinimum(0.9)
canvas.SetLogy()
canvas.Update()
is_first = False
if plot_config.normalise:
hist_defs[0][1].plot_object.SetMaximum(plot_config.ymax)
canvas.Update()
return canvas
if not switchOff and plot_config.ymax is None:
max_y = 1.4 * max([item[1].GetMaximum() for item in hist_defs])
if plot_config.ordering is not None:
hist_defs = sorted(hist_defs, key=lambda k: plot_config.ordering.index(k[0]))
for process, hist in hist_defs:
index = list(map(itemgetter(1), hist_defs)).index(hist)
hist = format_hist(hist, plot_config)
try:
process_config = find_process_config(process, process_configs)
except AttributeError:
process_config = None
if not (plot_config.is_set_to_value("ignore_style", True)) and \
plot_config.is_set_to_value("ignore_style", False):
draw_option = get_draw_option_as_root_str(plot_config, process_config)
else:
draw_option = "hist"
if not is_first and "same" not in draw_option:
draw_option += "sames"
hist.Draw(draw_option)
fm.apply_style(hist, plot_config, process_config, index=index)
if is_first:
if isinstance(hist, ROOT.TH2) and draw_option.lower() == "colz":
canvas.SetRightMargin(0.15)
format_hist(hist, plot_config)
if plot_config.ymax:
fm.set_maximum_y(hist, plot_config.ymax)
else:
fm.set_maximum_y(hist, max([h.GetMaximum() for _, h in hist_defs]) * 1.2)
if plot_config.ymin:
fm.set_minimum_y(hist, plot_config.ymin)
if plot_config.logy:
if not plot_config.normalise:
hist.SetMaximum(hist.GetMaximum() * 100.)
if plot_config.ymin > 0.:
hist.SetMinimum(plot_config.ymin)
else:
hist.SetMinimum(0.9)
if hist.GetMinimum() == 0.:
hist.SetMinimum(0.9)
canvas.SetLogy()
canvas.Update()
is_first = False
if plot_config.normalise:
hist_defs[0][1].SetMaximum(plot_config.ymax)
canvas.Update()
return canvas
def add_fit_to_canvas(canvas, fit_result, pdf=None, frame=None):
canvas.cd()
if frame:
pdf.paramOn(frame, ROOT.RooFit.Layout(0.50, 0.9, 0.8))
chi2 = frame.chiSquare("model", "data", 3)
txt = ROOT.TText(2, 100, "#chi^{2} = " + "{:.2f}".format(chi2))
ROOT.SetOwnership(txt, False)
txt.SetTextSize(0.04)
txt.SetTextColor(ROOT.kRed)
frame.addObject(txt)
else:
for i in range(len(fit_result.floatParsFinal()) - 1):
var = fit_result.floatParsFinal()[i]
var_string = "{:s} = {:.2f} \\pm {:.2f}".format(var.GetName(), var.getValV(), var.getError())
fm.add_text_to_canvas(canvas, var_string, pos={'x': 0.15, 'y': 0.9 - i * 0.05}, size=0.04, color=None)
canvas.Update()
def apply_style(obj, style_setter, style_attr, color):
"""
Apply defined styles to plottable object
:param obj: plot object to be styled
:type obj: TGraph, TH1, ...
:param style_setter: attribute to be set, e.g. Fill, Marker, Line
:type style_setter: str
:param style_attr: attribute value
:type style_attr: str
:param color: color for attribute
:type color: int
:return: None
:rtype: None
"""
if style_attr is not None:
if isinstance(style_attr, dict):
for ss, attr in style_attr.items():
getattr(obj, "Set" + ss + "Style")(attr)
else:
for ss in style_setter:
getattr(obj, "Set" + ss + "Style")(style_attr)
if color is not None:
for ss in style_setter:
getattr(obj, "Set" + ss + "Color")(color)
def add_histogram_to_canvas(canvas, hist, plot_config, process_config=None, index=None):
canvas.cd()
draw_option = get_draw_option_as_root_str(plot_config, process_config)
hist = format_obj(hist, plot_config)
apply_style(hist, *get_style_setters_and_values(plot_config, process_config, index))
if "same" not in draw_option:
draw_option += "sames"
hist.Draw(draw_option)
canvas.Update()
def plot_graph(graph, plot_config, **kwargs):
"""
Plot a TGraph object
:param graph: object to be plotted
:type graph: TGraph
:param plot_config: plot configuration defining style
:type plot_config: PlotConfig
:param kwargs: additional arguments like canvas name and title
:type kwargs:
:return: canvas containing plotted and formatted TGraph
:rtype: TCanvas
"""
kwargs.setdefault('index', 0)
kwargs.setdefault('canvas_name', plot_config.name)
kwargs.setdefault('canvas_title', '')
canvas = retrieve_new_canvas(kwargs['canvas_name'], kwargs['canvas_title'], plot_config.canvas_size_x,
plot_config.canvas_size_y)
canvas.cd()
draw_option = 'a' + get_draw_option_as_root_str(plot_config)
if plot_config.ymax is not None:
fm.set_range_y(graph, plot_config.ymin, plot_config.ymax)
graph.Draw(draw_option)
graph = format_obj(graph, plot_config)
apply_style(graph, *get_style_setters_and_values(plot_config, index=kwargs['index']))
ROOT.SetOwnership(graph, False)
if plot_config.logy:
canvas.SetLogy()
canvas.Update()
return canvas
def add_graph_to_canvas(canvas, graph, plot_config, index=None):
"""
Add a TGraph or associated object to existing canvas
:param canvas: canvas containing at least on graph
:type canvas: ROOT.TCanvas
:param graph: graph object to be added
:type graph: TGraph (or in Inheritance scheme)
:param plot_config: plot style configuration
:type plot_config: PlotConfig
:param index: index identifier for ith graph object in canvas for style choice (optional)
:type index: int
:return: nothing
:rtype: None
"""
canvas.cd()
draw_option = get_draw_option_as_root_str(plot_config)
if "same" not in draw_option:
draw_option += "same"
draw_option = draw_option.lstrip('a')
apply_style(graph, *get_style_setters_and_values(plot_config, index=index))
graph.Draw(draw_option)
ROOT.SetOwnership(graph, False)
canvas.Update()
def apply_ordering(hist_defs, ordering):
for process, _ in hist_defs:
if process not in ordering:
ordering.append(process)
return sorted(hist_defs, key=lambda k: ordering.index(k[0]))
def plot_stack(hists, plot_config, **kwargs):
"""
Plot THStack
:param hists: histogram list to be stacked
:param plot_config:
:param kwargs:
:return:
"""
kwargs.setdefault("process_configs", None)
process_configs = kwargs["process_configs"]
canvas = retrieve_new_canvas(plot_config.name, '', plot_config.canvas_size_x, plot_config.canvas_size_y)
canvas.Clear()
canvas.cd()
if isinstance(hists, dict) or isinstance(hists, defaultdict):
hist_defs = list(hists.items())
elif isinstance(hists, list):
hist_defs = list(zip([None] * len(hists), hists))
else:
_logger.error('Cannot deal with provided input {:s}'.format(hists.__str__()))
raise InvalidInputError()
stack = ROOT.THStack('hs', '')
ROOT.SetOwnership(stack, False)
data = None
if plot_config.ordering is not None:
hist_defs = apply_ordering(hist_defs, plot_config.ordering)
for index, histograms in enumerate(hist_defs):
process, hist = histograms
try:
if "data" in process.lower():
if data is None:
data = [(process, hist)]
else:
data.append((process, hist))
continue
except AttributeError:
pass
hist = format_hist(hist, plot_config)
process_config = find_process_config(process, process_configs)
draw_option = get_draw_option_as_root_str(plot_config, process_config)
fm.apply_style(hist, plot_config, process_config, index)
_logger.debug('Add hist {:s} to stack with draw option {:s}'.format(hist.GetName(), draw_option))
stack.Add(hist, draw_option)
stack.Draw()
canvas.Update()
format_hist(stack, plot_config)
min_y, max_y = fm.get_min_max_y(canvas, plot_config)
fm.set_range(stack, min_y, max_y)
if data is not None:
for data_campaing in data:
add_data_to_stack(canvas, data_campaing[1], plot_config)
if plot_config.logy:
canvas.SetLogy()
if plot_config.logx:
try:
xmin, xmax = plot_config.xmin, plot_config.xmax
except AttributeError:
xmin, xmax = max(0.0001, stack.GetXaxis().GetXmin()), stack.GetXaxis().GetXmax()
fm.set_range_x(stack, xmin, xmax)
canvas.SetLogx()
return canvas
def add_data_to_stack(canvas, data, plot_config=None, blind=None):
if blind:
blind_data(data, blind)
canvas.cd()
ROOT.SetOwnership(data, False)
data = format_hist(data, plot_config)
data.Draw("Esames")
def blind_data(data, blind):
"""
Apply blinding to a given distribution above blind cut value
:param data: histogram
:type data: TH1X
:param blind: cut value above which distribution should be blinded
:type blind: float
:return: nothing
:rtype: None
"""
for b in range(data.GetNbinsX() + 1):
if data.GetBinCenter(b) < blind:
continue
else:
data.SetBinContent(b, 0.)
data.SetBinError(b, 0.)
def add_signal_to_stack(canvas, signal, signal_strength=1., overlay=False, stack=None):
if overlay:
if not stack:
raise InvalidInputError("Requested overlay of signal, but no stack provided.")
clone = None
for h in stack.GetHists():
if clone is None:
clone = h.Clone()
else:
clone.Add(h)
canvas.cd()
for process in signal:
process.SetLineColor(ROOT.kRed)
process.Scale(signal_strength)
if overlay:
process.Add(clone)
process.Draw("histsames")
def add_ratio_to_canvas(canvas, ratio, y_min=None, y_max=None, y_title=None, name=None, title=''):
_logger.error('add_ratio_to_canvas moved to RatioPlotter. Please update your code')
return None # RatioPlotter.add_ratio_to_canvas(canvas, ratio, y_min, y_max, y_title, name, title)
|
|
#!/usr/bin/env python
"""
analyze.py
Analyze web access logs from generated by Confluence.
Reference: https://confluence.atlassian.com/display/CONFKB/How+to+Enable+User+Access+Logging
Conversion Pattern used in Log4j is explained here: http://www.tutorialspoint.com/log4j/log4j_patternlayout.htm
Here is configuration that we have defined in WEB-INF/classes/log4j.properties
log4j.appender.accesslog=org.apache.log4j.DailyRollingFileAppender
log4j.appender.accesslog.Threshold=DEBUG
log4j.appender.accesslog.File=${catalina.home}/logs/atlassian-confluence-access.log
log4j.appender.accesslog.DatePattern='.'yyyy-MM-dd
log4j.appender.accesslog.layout=com.atlassian.confluence.util.PatternLayoutWithStackTrace
log4j.appender.accesslog.layout.ConversionPattern=%d %p [%t] [%c{4}] %M %m%n
log4j.category.com.atlassian.confluence.util.AccessLogFilter=INFO, accesslog
log4j.additivity.com.atlassian.confluence.util.AccessLogFilter=false
Also we will be logging all urls. This is configured in <confluence-4.3.7-dev-std>/confluence/WEB-INF/web.xml
<!-- Filter for access logging -->
<filter-mapping>
<filter-name>AccessLogFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
"""
from logentry import LogEntry
from wikiurl import WikiUrl
import sys
import psycopg2
if len(sys.argv) < 2:
print "usage: analyze.py logfile"
exit()
else:
filename = sys.argv[1]
lineNmbr = 0
totalHits = 0
pageViews = 0
spaceHits = {}
pageHits = {}
urlcount = {}
ipcount = {}
firstTimestamp = lastTimestamp = None
f = open(filename, "r")
csvfile = open ("log.csv", "w")
ignorelist = []
with open("ignore.list") as ignorelistfile:
ignorelist = [i.strip() for i in ignorelistfile.readlines()]
for line in f:
lineNmbr += 1
print lineNmbr, "\r",
# Skip lines that may show AccessLogFilter initialized message. We need to get better at doing this.
# 2013-02-15 17:01:18,140 INFO [main] [atlassian.confluence.util.AccessLogFilter] init AccessLogFilter initialized. Format is: <user> <url> <starting memory free (kb)> +- <difference in free mem (kb)> <query time (ms)> <remote address>
if "init AccessLogFilter initialized" in line:
continue
#print line
logentry = LogEntry(line)
#print logentry
if firstTimestamp == None:
firstTimestamp = logentry.getTimestamp()
lastTimestamp = logentry.getTimestamp()
# Skip empty url because these URLs get redirected to Dashboard URL like homepage.action. In our case it's HOME wiki.
if (logentry.relativeurl == ''):
continue
wikiurl = WikiUrl(logentry.relativeurl, logentry.userid, logentry.datetimestamp, logentry.ipaddress)
#print wikiurl
# Here are few entries that we don't want to log into our database as they are duplicate ones.
# Example: a) http://<wiki base url>/display
# b) http://<wiki base url>/display/HOME
# c) http://<wiki base url>/homepage.action
# d) http://<wiki base url>/pages/editpage.action, createpage.action and so on
# Because
# Entries like a) are redirected to dashboard and hence they appear again in access log.
# Entries like b) /display/HOME, display/HR are redirected to /display/HOME/home and display/HR/home and hence they appear again in access log.
# Entries like c) homepage.action is re-directed to wiki page as set by Confluence Administrator.
# Entries like d) editpage.action, createpage.action, createblogpost.action - these all actions happen in two steps.
# editpage.action is followed by doeditpage.action, createpage.action is followed by docreatepage.action and so on.
# For example: When user wants to edit a page she clicks on edit page link and editpage.action is referenced. Later when user done with edit and click on "Save/submit",
# another action called doeditpage.action gets call. So we would like to track this other action as it confirms user really edited the page.
# Versus the the first action (editpage.action) where user just shows that she has intention to edit the page, but not yet done!
# By skipping these entries we will avoid counting pages two time for single user action.
if((wikiurl.actionName in ignorelist) or (wikiurl.actionType == "display" and wikiurl.spacekey != "" and wikiurl.title == "") or (wikiurl.actionType == "display" and wikiurl.spacekey == "") ):
continue
# Write data to csv file.
csvfile.write(wikiurl.userid + '#'+ wikiurl.ipaddress + '#' + wikiurl.actionType + '#' + wikiurl.userAction + '#' + wikiurl.userSubAction + '#' + wikiurl.unknownActionUrl + '#' + wikiurl.spacekey + '#' + wikiurl.title + '#' + wikiurl.pageId + '#' + wikiurl.queryString + '#' + wikiurl.datetimestamp + '#' + wikiurl.actionName + '\n')
# perform hit accounting
totalHits += 1
if wikiurl.actionType == "display" or (wikiurl.actionType == "pages" and wikiurl.userAction == "view" and wikiurl.userSubAction == "page"):
pageViews += 1
if(wikiurl.actionType == "display"):
page = wikiurl.spacekey + "/" + wikiurl.title
else:
page = wikiurl.pageId;
if not page in pageHits:
pageHits[page] = 1
else:
pageHits[page] += 1
if not wikiurl.spacekey in spaceHits:
spaceHits[wikiurl.spacekey] = 1
else:
spaceHits[wikiurl.spacekey] += 1
f.close()
csvfile.close()
# Time to import this data into database
# First let's export out existing data.
con = None
fout = None
fin = None
try:
con = psycopg2.connect(database='<dbname>', user='<username>', password='<password>', host='localhost')
#Export existing data from table into file.
cur = con.cursor()
fout = open('logentries-export.sql','w')
cur.copy_to(fout, 'logentries', sep="#")
fin = open('log.csv', 'r')
cur.copy_from(fin, 'logentries', sep="#")
con.commit();
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
except IOError, e:
print 'Error %s' % e
sys.exit(1)
finally:
if con:
con.close()
if fin:
fin.close()
if fout:
fout.close()
# print report to stdout ____________________________________________________
"""
print
print "For time period", firstTimestamp.strftime("%a, %b %d, %Y %H:%M:%S")
print " to", lastTimestamp.strftime("%a, %b %d, %Y %H:%M:%S")
print
print "total hits ==>", totalHits
print
print "page views ==>", pageViews
print
print "space hit summary"
spaceHitsKeys = spaceHits.keys()
spaceHitsKeys.sort()
print "-------------------"
print "| hits | space "
print "+--------+---------"
for spaceHitsKey in spaceHitsKeys:
print '| %6d | %s' % (spaceHits.get(spaceHitsKey), spaceHitsKey)
print
print "page hit summary"
pageHitsKeys = pageHits.keys()
pageHitsKeys.sort()
print "-------------------"
print "| hits | page "
print "+--------+---------"
for pageHitsKey in pageHitsKeys:
print '| %6d | %s' % (pageHits.get(pageHitsKey), pageHitsKey)
"""
# eof analyze.py ------------------------------------------------------------
|
|
"""
Python job scheduling for humans.
An in-process scheduler for periodic jobs that uses the builder pattern
for configuration. Schedule lets you run Python functions (or any other
callable) periodically at pre-determined intervals using a simple,
human-friendly syntax.
Inspired by Addam Wiggins' article "Rethinking Cron" [1] and the
"clockwork" Ruby module [2][3].
Features:
- A simple to use API for scheduling jobs.
- Very lightweight and no external dependencies.
- Excellent test coverage.
- Works with Python 2.7 and 3.3
Usage:
>>> import schedule
>>> import time
>>> def job(message='stuff'):
>>> print("I'm working on:", message)
>>> schedule.every(10).minutes.do(job)
>>> schedule.every().hour.do(job, message='things')
>>> schedule.every().day.at("10:30").do(job)
>>> while True:
>>> schedule.run_pending()
>>> time.sleep(1)
[1] http://adam.heroku.com/past/2010/4/13/rethinking_cron/
[2] https://github.com/tomykaira/clockwork
[3] http://adam.heroku.com/past/2010/6/30/replace_cron_with_clockwork/
"""
import datetime
import functools
import logging
import random
import time
from dateutil import parser
from dateutil.tz import tzlocal
from .tz import tz_offsets
logger = logging.getLogger('schedule')
class Scheduler(object):
def __init__(self):
self.jobs = []
def run_pending(self):
"""Run all jobs that are scheduled to run.
Please note that it is *intended behavior that tick() does not
run missed jobs*. For example, if you've registered a job that
should run every minute and you only call tick() in one hour
increments then your job won't be run 60 times in between but
only once.
"""
runnable_jobs = (job for job in self.jobs if job.should_run)
for job in sorted(runnable_jobs):
job.run()
def run_all(self, delay_seconds=0):
"""Run all jobs regardless if they are scheduled to run or not.
A delay of `delay` seconds is added between each job. This helps
distribute system load generated by the jobs more evenly
over time."""
logger.info('Running *all* %i jobs with %is delay inbetween',
len(self.jobs), delay_seconds)
for job in self.jobs:
job.run()
time.sleep(delay_seconds)
def clear(self):
"""Deletes all scheduled jobs."""
del self.jobs[:]
def every(self, interval=1):
"""Schedule a new periodic job."""
job = Job(interval)
self.jobs.append(job)
return job
def on(self, *days):
"""Schedule a new job to run on specific weekdays.
See the docstring for `Job.on()`.
"""
job = self.every()
job.unit = 'days'
return job.on(*days)
@property
def next_run(self):
"""Datetime when the next job should run."""
if not self.jobs:
return None
return min(self.jobs).next_run
@property
def idle_seconds(self):
"""Number of seconds until `next_run`."""
return (self.next_run - datetime.datetime.now(tzlocal())
).total_seconds()
class Job(object):
"""A periodic job as used by `Scheduler`."""
WEEKDAYS = {'sunday': 0, 'monday': 1, 'tuesday': 2, 'wednesday': 3,
'thursday': 4, 'friday': 5, 'saturday': 6}
def __init__(self, interval):
self.interval = interval # pause interval * unit between runs
self.job_func = None # the job job_func to run
self.unit = None # time units, e.g. 'minutes', 'hours', ...
self.at_time = None # optional time at which this job runs
self.between_times = ()
self.run_days = []
self.start_run = None # datetime after which this job will start
self.last_run = None # datetime of the last run
self.next_run = None # datetime of the next run
self.period = None # timedelta between runs, only valid for
def __lt__(self, other):
"""PeriodicJobs are sortable based on the scheduled time
they run next."""
return self.next_run < other.next_run
def __repr__(self):
fmt_dt = "%Y-%m-%d %H:%M:%S %Z"
fmt_t = "%H:%M:%S %Z"
def format_time(t):
return t.strftime(fmt_dt) if t else '[never]'
timestats = '(last run: %s, next run: %s)' % (
format_time(self.last_run), format_time(self.next_run))
job_func_name = self.job_func.__name__
args = [repr(x) for x in self.job_func.args]
kwargs = ['%s=%s' % (k, repr(v))
for k, v in self.job_func.keywords.items()]
call_repr = job_func_name + '(' + ', '.join(args + kwargs) + ')'
if self.run_days:
final_days = []
for day in self.run_days:
days_str = [k.title() for k, i in Job.WEEKDAYS.items()
for d in day if i == d]
final_days.append(' or '.join(days_str))
repr_str = 'Every %s' % ' and '.join(final_days)
else:
repr_str = 'Every %s %s' % (
self.interval,
self.unit[:-1] if self.interval == 1 else self.unit)
if self.between_times:
repr_str += ' between %s' % ' and '.join(
t.strftime(fmt_t).strip()
for t in self.between_times)
elif self.at_time:
repr_str += ' at %s' % self.at_time.strftime(fmt_t).strip()
if self.start_run:
repr_str += ' starting %s' % self.start_run.strftime(fmt_dt)
repr_str += ' do %s %s' % (call_repr, timestats)
return repr_str
@property
def second(self):
assert self.interval == 1
return self.seconds
@property
def seconds(self):
self.unit = 'seconds'
return self
@property
def minute(self):
assert self.interval == 1
return self.minutes
@property
def minutes(self):
self.unit = 'minutes'
return self
@property
def hour(self):
assert self.interval == 1
return self.hours
@property
def hours(self):
self.unit = 'hours'
return self
@property
def day(self):
assert self.interval == 1
return self.days
@property
def days(self):
self.unit = 'days'
return self
@property
def week(self):
assert self.interval == 1
return self.weeks
@property
def weeks(self):
self.unit = 'weeks'
return self
def on(self, *days):
"""Schedule the job to run on specific weekdays.
`days` can be a string (or sequence of strings) with the name of the
weekday (case insensitive), e.g. 'Monday', 'sunday', etc, or a starting
substring of the name of the weekday, e.g. 'tue', 'Sat', etc.
If you specify multiple days, e.g. ('mon', 'wed'), the job will run
every Monday and Wednesday.
You can also specify OR conditions by separating the day names with a
pipe, e.g. ('sun|mon', 'wed|thu'). In this case the job will run
every Sunday *or* Monday, and every Wednesday *or* Thursday.
"""
weeknums = []
for day in days:
day_or = set()
for d in day.split('|'):
for n, i in Job.WEEKDAYS.items():
if n.startswith(d.lower()):
day_or.add(i)
if day_or:
weeknums.append(day_or)
self.run_days = weeknums
return self
def at(self, time_str):
"""Schedule the job every day at a specific time.
Calling this is only valid for jobs scheduled to run every
N day(s).
"""
assert self.unit == 'days'
self.at_time = parser.parse(time_str, tzinfos=tz_offsets)
if not self.at_time.tzinfo:
self.at_time = self.at_time.replace(tzinfo=tzlocal())
return self
def between(self, time_str):
"""Schedule the job at a random time between two timestamps."""
times = []
for t in time_str.split('-'):
dt = parser.parse(t, tzinfos=tz_offsets)
if not dt.tzinfo:
dt = dt.replace(tzinfo=tzlocal())
times.append(dt)
self.between_times = tuple(times)
return self
def starting(self, date_str):
self.start_run = parser.parse(date_str, tzinfos=tz_offsets)
if not self.start_run.tzinfo:
self.start_run = self.start_run.replace(tzinfo=tzlocal())
return self
def do(self, job_func, *args, **kwargs):
"""Specifies the job_func that should be called every time the
job runs.
Any additional arguments are passed on to job_func when
the job runs.
"""
self.job_func = functools.partial(job_func, *args, **kwargs)
functools.update_wrapper(self.job_func, job_func)
self._schedule_next_run()
return self
@property
def should_run(self):
"""True if the job should be run now."""
return datetime.datetime.now(tzlocal()) >= self.next_run
def run(self):
"""Run the job and immediately reschedule it."""
logger.info('Running job %s', self)
self.job_func()
self.last_run = datetime.datetime.now(tzlocal())
self._schedule_next_run()
def _schedule_next_run(self):
"""Compute the instant when this job should run next."""
# Allow *, ** magic temporarily:
# pylint: disable=W0142
assert self.unit in ('seconds', 'minutes', 'hours', 'days', 'weeks')
starting = self.start_run or datetime.datetime.now(tzlocal())
self.period = datetime.timedelta(**{self.unit: self.interval})
self.next_run = starting + self.period
if self.run_days:
run_days = self.run_days[:]
if self.last_run:
starting = self.last_run
# Don't consider this day group if it has been run already
for day in self.run_days:
if self.last_run.isoweekday() in day:
run_days.remove(day)
days = set()
for day in run_days:
days.add(random.sample(day, 1)[0])
if not days:
days_delta = 0
else:
# Calculate the closest day from the starting date
delta_all = sorted([(i - starting.isoweekday()) % 7
for i in days])
days_delta = delta_all[0]
if (days_delta == 0 and self.last_run and
self.last_run.date() == starting.date()):
# Make sure the job doesn't run today twice
if self.unit == 'days':
days_delta = 7
elif self.unit == 'weeks':
days_delta = self.interval * 7
self.next_run = starting + datetime.timedelta(days=days_delta)
if self.between_times:
start, end = self.between_times
# Choose a random time between both timestamps
self.at_time = (start + datetime.timedelta(
seconds=random.randint(0, int(
(end - start).total_seconds()))))
if self.at_time:
self.next_run = self.next_run.replace(hour=self.at_time.hour,
minute=self.at_time.minute,
second=self.at_time.second,
microsecond=0,
tzinfo=self.at_time.tzinfo)
# If we are running for the first time, make sure we run
# at the specified time *today* as well
if (not self.last_run and not self.run_days and
self.at_time > datetime.datetime.now(tzlocal())):
self.next_run = self.next_run - datetime.timedelta(days=1)
logger.info('Scheduled job %s', self)
# The following methods are shortcuts for not having to
# create a Scheduler instance:
default_scheduler = Scheduler()
jobs = default_scheduler.jobs # todo: should this be a copy, e.g. jobs()?
def every(interval=1):
"""Schedule a new periodic job."""
return default_scheduler.every(interval)
def on(*days):
"""Schedule a new job to run on specific weekdays.
See the docstring for `Job.on()`.
"""
return default_scheduler.on(*days)
def run_pending():
"""Run all jobs that are scheduled to run.
Please note that it is *intended behavior that run_pending()
does not run missed jobs*. For example, if you've registered a job
that should run every minute and you only call run_pending()
in one hour increments then your job won't be run 60 times in
between but only once.
"""
default_scheduler.run_pending()
def run_all(delay_seconds=0):
"""Run all jobs regardless if they are scheduled to run or not.
A delay of `delay` seconds is added between each job. This can help
to distribute the system load generated by the jobs more evenly over
time."""
default_scheduler.run_all(delay_seconds=delay_seconds)
def clear():
"""Deletes all scheduled jobs."""
default_scheduler.clear()
def next_run():
"""Datetime when the next job should run."""
return default_scheduler.next_run
def idle_seconds():
"""Number of seconds until `next_run`."""
return default_scheduler.idle_seconds
|
|
#!/usr/bin/env python3
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to extract build metadata from bazel BUILD.
# To avoid having two sources of truth for the build metadata (build
# targets, source files, header files etc.), this script analyzes the contents
# of bazel BUILD files and generates a YAML file (currently called
# build_autogenerated.yaml). The format and semantics of the generated YAML files
# is chosen to match the format of a "build.yaml" file, which used
# to be build the source of truth for gRPC build before bazel became
# the primary build system.
# A good basic overview of the "build.yaml" format is available here:
# https://github.com/grpc/grpc/blob/master/templates/README.md. Note that
# while useful as an overview, the doc does not act as formal spec
# (formal spec does not exist in fact) and the doc can be incomplete,
# inaccurate or slightly out of date.
# TODO(jtattermusch): In the future we want to get rid of the legacy build.yaml
# format entirely or simplify it to a point where it becomes self-explanatory
# and doesn't need any detailed documentation.
import collections
import os
import re
import subprocess
import sys
from typing import Any, Dict, Iterable, List, Optional
import xml.etree.ElementTree as ET
import build_cleaner
import yaml
BuildMetadata = Dict[str, Any]
BuildDict = Dict[str, BuildMetadata]
BuildYaml = Dict[str, Any]
def _bazel_query_xml_tree(query: str) -> ET.Element:
"""Get xml output of bazel query invocation, parsed as XML tree"""
output = subprocess.check_output(
['tools/bazel', 'query', '--noimplicit_deps', '--output', 'xml', query])
return ET.fromstring(output)
def _rule_dict_from_xml_node(rule_xml_node):
"""Converts XML node representing a rule (obtained from "bazel query --output xml") to a dictionary that contains all the metadata we will need."""
result = {
'class': rule_xml_node.attrib.get('class'),
'name': rule_xml_node.attrib.get('name'),
'srcs': [],
'hdrs': [],
'deps': [],
'data': [],
'tags': [],
'args': [],
'generator_function': None,
'size': None,
'flaky': False,
}
for child in rule_xml_node:
# all the metadata we want is stored under "list" tags
if child.tag == 'list':
list_name = child.attrib['name']
if list_name in ['srcs', 'hdrs', 'deps', 'data', 'tags', 'args']:
result[list_name] += [item.attrib['value'] for item in child]
if child.tag == 'string':
string_name = child.attrib['name']
if string_name in ['generator_function', 'size']:
result[string_name] = child.attrib['value']
if child.tag == 'boolean':
bool_name = child.attrib['name']
if bool_name in ['flaky']:
result[bool_name] = child.attrib['value'] == 'true'
return result
def _extract_rules_from_bazel_xml(xml_tree):
"""Extract bazel rules from an XML tree node obtained from "bazel query --output xml" command."""
result = {}
for child in xml_tree:
if child.tag == 'rule':
rule_dict = _rule_dict_from_xml_node(child)
rule_clazz = rule_dict['class']
rule_name = rule_dict['name']
if rule_clazz in [
'cc_library',
'cc_binary',
'cc_test',
'cc_proto_library',
'proto_library',
'upb_proto_library',
'upb_proto_reflection_library',
]:
if rule_name in result:
raise Exception('Rule %s already present' % rule_name)
result[rule_name] = rule_dict
return result
def _get_bazel_label(target_name: str) -> str:
if ':' in target_name:
return '//%s' % target_name
else:
return '//:%s' % target_name
def _extract_source_file_path(label: str) -> str:
"""Gets relative path to source file from bazel deps listing"""
if label.startswith('//'):
label = label[len('//'):]
# labels in form //:src/core/lib/surface/call_test_only.h
if label.startswith(':'):
label = label[len(':'):]
# labels in form //test/core/util:port.cc
label = label.replace(':', '/')
return label
def _extract_public_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of public headers from a bazel rule"""
result = []
for dep in bazel_rule['hdrs']:
if dep.startswith('//:include/') and dep.endswith('.h'):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_nonpublic_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of non-public headers from a bazel rule"""
result = []
for dep in bazel_rule['hdrs']:
if dep.startswith('//') and not dep.startswith(
'//:include/') and dep.endswith('.h'):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_sources(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of source files from a bazel rule"""
result = []
for dep in bazel_rule['srcs']:
if dep.startswith('//') and (dep.endswith('.cc') or dep.endswith('.c')
or dep.endswith('.proto')):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_deps(bazel_rule: BuildMetadata,
bazel_rules: BuildDict) -> List[str]:
"""Gets list of deps from from a bazel rule"""
return list(sorted(bazel_rule['deps']))
def _create_target_from_bazel_rule(target_name: str,
bazel_rules: BuildDict) -> BuildMetadata:
"""Create build.yaml-like target definition from bazel metadata"""
bazel_rule = bazel_rules[_get_bazel_label(target_name)]
# Create a template for our target from the bazel rule. Initially we only
# populate some "private" fields with the original info we got from bazel
# and only later we will populate the public fields (once we do some extra
# postprocessing).
result = {
'name': target_name,
'_PUBLIC_HEADERS_BAZEL': _extract_public_headers(bazel_rule),
'_HEADERS_BAZEL': _extract_nonpublic_headers(bazel_rule),
'_SRC_BAZEL': _extract_sources(bazel_rule),
'_DEPS_BAZEL': _extract_deps(bazel_rule, bazel_rules),
'public_headers': bazel_rule['_COLLAPSED_PUBLIC_HEADERS'],
'headers': bazel_rule['_COLLAPSED_HEADERS'],
'src': bazel_rule['_COLLAPSED_SRCS'],
'deps': bazel_rule['_COLLAPSED_DEPS'],
}
return result
def _external_dep_name_from_bazel_dependency(bazel_dep: str) -> Optional[str]:
"""Returns name of dependency if external bazel dependency is provided or None"""
if bazel_dep.startswith('@com_google_absl//'):
# special case for add dependency on one of the absl libraries (there is not just one absl library)
prefixlen = len('@com_google_absl//')
return bazel_dep[prefixlen:]
elif bazel_dep == '//external:upb_lib':
return 'upb'
elif bazel_dep == '//external:benchmark':
return 'benchmark'
elif bazel_dep == '//external:libssl':
return 'libssl'
else:
# all the other external deps such as protobuf, cares, zlib
# don't need to be listed explicitly, they are handled automatically
# by the build system (make, cmake)
return None
def _compute_transitive_metadata(
rule_name: str, bazel_rules: Any,
bazel_label_to_dep_name: Dict[str, str]) -> None:
"""Computes the final build metadata for Bazel target with rule_name.
The dependencies that will appear on the deps list are:
* Public build targets including binaries and tests;
* External targets, like absl, re2.
All other intermediate dependencies will be merged, which means their
source file, headers, etc. will be collected into one build target. This
step of processing will greatly reduce the complexity of the generated
build specifications for other build systems, like CMake, Make, setuptools.
The final build metadata are:
* _TRANSITIVE_DEPS: all the transitive dependencies including intermediate
targets;
* _COLLAPSED_DEPS: dependencies that fits our requirement above, and it
will remove duplicated items and produce the shortest
possible dependency list in alphabetical order;
* _COLLAPSED_SRCS: the merged source files;
* _COLLAPSED_PUBLIC_HEADERS: the merged public headers;
* _COLLAPSED_HEADERS: the merged non-public headers;
* _EXCLUDE_DEPS: intermediate targets to exclude when performing collapsing
of sources and dependencies.
For the collapsed_deps, the algorithm improved cases like:
The result in the past:
end2end_tests -> [grpc_test_util, grpc, gpr, address_sorting, upb]
grpc_test_util -> [grpc, gpr, address_sorting, upb, ...]
grpc -> [gpr, address_sorting, upb, ...]
The result of the algorithm:
end2end_tests -> [grpc_test_util]
grpc_test_util -> [grpc]
grpc -> [gpr, address_sorting, upb, ...]
"""
bazel_rule = bazel_rules[rule_name]
direct_deps = _extract_deps(bazel_rule, bazel_rules)
transitive_deps = set()
collapsed_deps = set()
exclude_deps = set()
collapsed_srcs = set(_extract_sources(bazel_rule))
collapsed_public_headers = set(_extract_public_headers(bazel_rule))
collapsed_headers = set(_extract_nonpublic_headers(bazel_rule))
for dep in direct_deps:
external_dep_name_maybe = _external_dep_name_from_bazel_dependency(dep)
if dep in bazel_rules:
# Descend recursively, but no need to do that for external deps
if external_dep_name_maybe is None:
if "_PROCESSING_DONE" not in bazel_rules[dep]:
# This item is not processed before, compute now
_compute_transitive_metadata(dep, bazel_rules,
bazel_label_to_dep_name)
transitive_deps.update(bazel_rules[dep].get(
'_TRANSITIVE_DEPS', []))
collapsed_deps.update(
collapsed_deps, bazel_rules[dep].get('_COLLAPSED_DEPS', []))
exclude_deps.update(bazel_rules[dep].get('_EXCLUDE_DEPS', []))
# This dep is a public target, add it as a dependency
if dep in bazel_label_to_dep_name:
transitive_deps.update([bazel_label_to_dep_name[dep]])
collapsed_deps.update(collapsed_deps,
[bazel_label_to_dep_name[dep]])
# Add all the transitive deps of our every public dep to exclude
# list since we want to avoid building sources that are already
# built by our dependencies
exclude_deps.update(bazel_rules[dep]['_TRANSITIVE_DEPS'])
continue
# This dep is an external target, add it as a dependency
if external_dep_name_maybe is not None:
transitive_deps.update([external_dep_name_maybe])
collapsed_deps.update(collapsed_deps, [external_dep_name_maybe])
continue
# Direct dependencies are part of transitive dependencies
transitive_deps.update(direct_deps)
# Calculate transitive public deps (needed for collapsing sources)
transitive_public_deps = set(
[x for x in transitive_deps if x in bazel_label_to_dep_name])
# Remove intermediate targets that our public dependencies already depend
# on. This is the step that further shorten the deps list.
collapsed_deps = set([x for x in collapsed_deps if x not in exclude_deps])
# Compute the final source files and headers for this build target whose
# name is `rule_name` (input argument of this function).
#
# Imaging a public target PX has transitive deps [IA, IB, PY, IC, PZ]. PX,
# PY and PZ are public build targets. And IA, IB, IC are intermediate
# targets. In addition, PY depends on IC.
#
# Translate the condition into dependency graph:
# PX -> [IA, IB, PY, IC, PZ]
# PY -> [IC]
# Public targets: [PX, PY, PZ]
#
# The collapsed dependencies of PX: [PY, PZ].
# The excluded dependencies of X: [PY, IC, PZ].
# (IC is excluded as a dependency of PX. It is already included in PY, hence
# it would be redundant to include it again.)
#
# Target PX should include source files and headers of [PX, IA, IB] as final
# build metadata.
for dep in transitive_deps:
if dep not in exclude_deps and dep not in transitive_public_deps:
if dep in bazel_rules:
collapsed_srcs.update(_extract_sources(bazel_rules[dep]))
collapsed_public_headers.update(
_extract_public_headers(bazel_rules[dep]))
collapsed_headers.update(
_extract_nonpublic_headers(bazel_rules[dep]))
# This item is a "visited" flag
bazel_rule['_PROCESSING_DONE'] = True
# Following items are described in the docstinrg.
bazel_rule['_TRANSITIVE_DEPS'] = list(sorted(transitive_deps))
bazel_rule['_COLLAPSED_DEPS'] = list(sorted(collapsed_deps))
bazel_rule['_COLLAPSED_SRCS'] = list(sorted(collapsed_srcs))
bazel_rule['_COLLAPSED_PUBLIC_HEADERS'] = list(
sorted(collapsed_public_headers))
bazel_rule['_COLLAPSED_HEADERS'] = list(sorted(collapsed_headers))
bazel_rule['_EXCLUDE_DEPS'] = list(sorted(exclude_deps))
# TODO(jtattermusch): deduplicate with transitive_dependencies.py (which has a slightly different logic)
# TODO(jtattermusch): This is done to avoid introducing too many intermediate
# libraries into the build.yaml-based builds (which might in cause issues
# building language-specific artifacts) and also because the libraries
# in build.yaml-based build are generally considered units of distributions
# (= public libraries that are visible to the user and are installable),
# while in bazel builds it is customary to define larger number of smaller
# "sublibraries". The need for elision (and expansion)
# of intermediate libraries can be re-evaluated in the future.
def _populate_transitive_metadata(bazel_rules: Any,
public_dep_names: Iterable[str]) -> None:
"""Add 'transitive_deps' field for each of the rules"""
# Create the map between Bazel label and public dependency name
bazel_label_to_dep_name = {}
for dep_name in public_dep_names:
bazel_label_to_dep_name[_get_bazel_label(dep_name)] = dep_name
# Make sure we reached all the Bazel rules
# TODO(lidiz) potentially we could only update a subset of rules
for rule_name in bazel_rules:
if '_PROCESSING_DONE' not in bazel_rules[rule_name]:
_compute_transitive_metadata(rule_name, bazel_rules,
bazel_label_to_dep_name)
def update_test_metadata_with_transitive_metadata(
all_extra_metadata: BuildDict, bazel_rules: BuildDict) -> None:
"""Patches test build metadata with transitive metadata."""
for lib_name, lib_dict in list(all_extra_metadata.items()):
# Skip if it isn't not an test
if lib_dict.get('build') != 'test' or lib_dict.get('_TYPE') != 'target':
continue
bazel_rule = bazel_rules[_get_bazel_label(lib_name)]
if '//external:benchmark' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['benchmark'] = True
lib_dict['defaults'] = 'benchmark'
if '//external:gtest' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['gtest'] = True
lib_dict['language'] = 'c++'
def _get_transitive_protos(bazel_rules, t):
que = [
t,
]
visited = set()
ret = []
while que:
name = que.pop(0)
rule = bazel_rules.get(name, None)
if rule:
for dep in rule['deps']:
if dep not in visited:
visited.add(dep)
que.append(dep)
for src in rule['srcs']:
if src.endswith('.proto'):
ret.append(src)
return list(set(ret))
def _expand_upb_proto_library_rules(bazel_rules):
# Expand the .proto files from UPB proto library rules into the pre-generated
# upb.h and upb.c files.
GEN_UPB_ROOT = '//:src/core/ext/upb-generated/'
GEN_UPBDEFS_ROOT = '//:src/core/ext/upbdefs-generated/'
EXTERNAL_LINKS = [('@com_google_protobuf//', ':src/'),
('@com_google_googleapis//', '')]
for name, bazel_rule in bazel_rules.items():
gen_func = bazel_rule.get('generator_function', None)
if gen_func in ('grpc_upb_proto_library',
'grpc_upb_proto_reflection_library'):
# get proto dependency
deps = bazel_rule['deps']
if len(deps) != 1:
raise Exception(
'upb rule "{0}" should have 1 proto dependency but has "{1}"'
.format(name, deps))
# deps is not properly fetched from bazel query for upb_proto_library target
# so add the upb dependency manually
bazel_rule['deps'] = [
'//external:upb_lib', '//external:upb_lib_descriptor',
'//external:upb_generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me'
]
# populate the upb_proto_library rule with pre-generated upb headers
# and sources using proto_rule
protos = _get_transitive_protos(bazel_rules, deps[0])
if len(protos) == 0:
raise Exception(
'upb rule "{0}" should have at least one proto file.'.
format(name))
srcs = []
hdrs = []
for proto_src in protos:
for external_link in EXTERNAL_LINKS:
if proto_src.startswith(external_link[0]):
proto_src = proto_src[len(external_link[0]) +
len(external_link[1]):]
break
if proto_src.startswith('@'):
raise Exception('"{0}" is unknown workspace.'.format(name))
proto_src = _extract_source_file_path(proto_src)
ext = '.upb' if gen_func == 'grpc_upb_proto_library' else '.upbdefs'
root = GEN_UPB_ROOT if gen_func == 'grpc_upb_proto_library' else GEN_UPBDEFS_ROOT
srcs.append(root + proto_src.replace('.proto', ext + '.c'))
hdrs.append(root + proto_src.replace('.proto', ext + '.h'))
bazel_rule['srcs'] = srcs
bazel_rule['hdrs'] = hdrs
def _generate_build_metadata(build_extra_metadata: BuildDict,
bazel_rules: BuildDict) -> BuildDict:
"""Generate build metadata in build.yaml-like format bazel build metadata and build.yaml-specific "extra metadata"."""
lib_names = list(build_extra_metadata.keys())
result = {}
for lib_name in lib_names:
lib_dict = _create_target_from_bazel_rule(lib_name, bazel_rules)
# populate extra properties from the build.yaml-specific "extra metadata"
lib_dict.update(build_extra_metadata.get(lib_name, {}))
# store to results
result[lib_name] = lib_dict
# Rename targets marked with "_RENAME" extra metadata.
# This is mostly a cosmetic change to ensure that we end up with build.yaml target
# names we're used to from the past (and also to avoid too long target names).
# The rename step needs to be made after we're done with most of processing logic
# otherwise the already-renamed libraries will have different names than expected
for lib_name in lib_names:
to_name = build_extra_metadata.get(lib_name, {}).get('_RENAME', None)
if to_name:
# store lib under the new name and also change its 'name' property
if to_name in result:
raise Exception('Cannot rename target ' + str(lib_name) + ', ' +
str(to_name) + ' already exists.')
lib_dict = result.pop(lib_name)
lib_dict['name'] = to_name
result[to_name] = lib_dict
# dep names need to be updated as well
for lib_dict_to_update in list(result.values()):
lib_dict_to_update['deps'] = list([
to_name if dep == lib_name else dep
for dep in lib_dict_to_update['deps']
])
return result
def _convert_to_build_yaml_like(lib_dict: BuildMetadata) -> BuildYaml:
lib_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'library'
]
target_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'target'
]
test_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'test'
]
# list libraries and targets in predefined order
lib_list = [lib_dict[lib_name] for lib_name in lib_names]
target_list = [lib_dict[lib_name] for lib_name in target_names]
test_list = [lib_dict[lib_name] for lib_name in test_names]
# get rid of temporary private fields prefixed with "_" and some other useless fields
for lib in lib_list:
for field_to_remove in [
k for k in list(lib.keys()) if k.startswith('_')
]:
lib.pop(field_to_remove, None)
for target in target_list:
for field_to_remove in [
k for k in list(target.keys()) if k.startswith('_')
]:
target.pop(field_to_remove, None)
target.pop('public_headers',
None) # public headers make no sense for targets
for test in test_list:
for field_to_remove in [
k for k in list(test.keys()) if k.startswith('_')
]:
test.pop(field_to_remove, None)
test.pop('public_headers',
None) # public headers make no sense for tests
build_yaml_like = {
'libs': lib_list,
'filegroups': [],
'targets': target_list,
'tests': test_list,
}
return build_yaml_like
def _extract_cc_tests(bazel_rules: BuildDict) -> List[str]:
"""Gets list of cc_test tests from bazel rules"""
result = []
for bazel_rule in list(bazel_rules.values()):
if bazel_rule['class'] == 'cc_test':
test_name = bazel_rule['name']
if test_name.startswith('//'):
prefixlen = len('//')
result.append(test_name[prefixlen:])
return list(sorted(result))
def _exclude_unwanted_cc_tests(tests: List[str]) -> List[str]:
"""Filters out bazel tests that we don't want to run with other build systems or we cannot build them reasonably"""
# most qps tests are autogenerated, we are fine without them
tests = [test for test in tests if not test.startswith('test/cpp/qps:')]
# microbenchmarks aren't needed for checking correctness
tests = [
test for test in tests
if not test.startswith('test/cpp/microbenchmarks:')
]
tests = [
test for test in tests
if not test.startswith('test/core/promise/benchmark:')
]
# we have trouble with census dependency outside of bazel
tests = [
test for test in tests
if not test.startswith('test/cpp/ext/filters/census:') and
not test.startswith('test/core/xds:xds_channel_stack_modifier_test')
]
# missing opencensus/stats/stats.h
tests = [
test for test in tests if not test.startswith(
'test/cpp/end2end:server_load_reporting_end2end_test')
]
tests = [
test for test in tests if not test.startswith(
'test/cpp/server/load_reporter:lb_load_reporter_test')
]
# The test uses --running_under_bazel cmdline argument
# To avoid the trouble needing to adjust it, we just skip the test
tests = [
test for test in tests if not test.startswith(
'test/cpp/naming:resolver_component_tests_runner_invoker')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:time_change_test')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:client_crash_test')
]
# the test requires 'server_crash_test_client' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:server_crash_test')
]
# test never existed under build.yaml and it fails -> skip it
tests = [
test for test in tests
if not test.startswith('test/core/tsi:ssl_session_cache_test')
]
# the binary of this test does not get built with cmake
tests = [
test for test in tests
if not test.startswith('test/cpp/util:channelz_sampler_test')
]
# we don't need to generate fuzzers outside of bazel
tests = [test for test in tests if not test.endswith('_fuzzer')]
return tests
def _generate_build_extra_metadata_for_tests(
tests: List[str], bazel_rules: BuildDict) -> BuildDict:
"""For given tests, generate the "extra metadata" that we need for our "build.yaml"-like output. The extra metadata is generated from the bazel rule metadata by using a bunch of heuristics."""
test_metadata = {}
for test in tests:
test_dict = {'build': 'test', '_TYPE': 'target'}
bazel_rule = bazel_rules[_get_bazel_label(test)]
bazel_tags = bazel_rule['tags']
if 'manual' in bazel_tags:
# don't run the tests marked as "manual"
test_dict['run'] = False
if bazel_rule['flaky']:
# don't run tests that are marked as "flaky" under bazel
# because that would only add noise for the run_tests.py tests
# and seeing more failures for tests that we already know are flaky
# doesn't really help anything
test_dict['run'] = False
if 'no_uses_polling' in bazel_tags:
test_dict['uses_polling'] = False
if 'grpc_fuzzer' == bazel_rule['generator_function']:
# currently we hand-list fuzzers instead of generating them automatically
# because there's no way to obtain maxlen property from bazel BUILD file.
print(('skipping fuzzer ' + test))
continue
# if any tags that restrict platform compatibility are present,
# generate the "platforms" field accordingly
# TODO(jtattermusch): there is also a "no_linux" tag, but we cannot take
# it into account as it is applied by grpc_cc_test when poller expansion
# is made (for tests where uses_polling=True). So for now, we just
# assume all tests are compatible with linux and ignore the "no_linux" tag
# completely.
known_platform_tags = set(['no_windows', 'no_mac'])
if set(bazel_tags).intersection(known_platform_tags):
platforms = []
# assume all tests are compatible with linux and posix
platforms.append('linux')
platforms.append(
'posix') # there is no posix-specific tag in bazel BUILD
if not 'no_mac' in bazel_tags:
platforms.append('mac')
if not 'no_windows' in bazel_tags:
platforms.append('windows')
test_dict['platforms'] = platforms
cmdline_args = bazel_rule['args']
if cmdline_args:
test_dict['args'] = list(cmdline_args)
if test.startswith('test/cpp'):
test_dict['language'] = 'c++'
elif test.startswith('test/core'):
test_dict['language'] = 'c'
else:
raise Exception('wrong test' + test)
# short test name without the path.
# There can be name collisions, but we will resolve them later
simple_test_name = os.path.basename(_extract_source_file_path(test))
test_dict['_RENAME'] = simple_test_name
test_metadata[test] = test_dict
# detect duplicate test names
tests_by_simple_name = {}
for test_name, test_dict in list(test_metadata.items()):
simple_test_name = test_dict['_RENAME']
if not simple_test_name in tests_by_simple_name:
tests_by_simple_name[simple_test_name] = []
tests_by_simple_name[simple_test_name].append(test_name)
# choose alternative names for tests with a name collision
for collision_list in list(tests_by_simple_name.values()):
if len(collision_list) > 1:
for test_name in collision_list:
long_name = test_name.replace('/', '_').replace(':', '_')
print((
'short name of "%s" collides with another test, renaming to %s'
% (test_name, long_name)))
test_metadata[test_name]['_RENAME'] = long_name
return test_metadata
def _detect_and_print_issues(build_yaml_like: BuildYaml) -> None:
"""Try detecting some unusual situations and warn about them."""
for tgt in build_yaml_like['targets']:
if tgt['build'] == 'test':
for src in tgt['src']:
if src.startswith('src/') and not src.endswith('.proto'):
print(('source file from under "src/" tree used in test ' +
tgt['name'] + ': ' + src))
# extra metadata that will be used to construct build.yaml
# there are mostly extra properties that we weren't able to obtain from the bazel build
# _TYPE: whether this is library, target or test
# _RENAME: whether this target should be renamed to a different name (to match expectations of make and cmake builds)
_BUILD_EXTRA_METADATA = {
'third_party/address_sorting:address_sorting': {
'language': 'c',
'build': 'all',
'_RENAME': 'address_sorting'
},
'gpr': {
'language': 'c',
'build': 'all',
},
'grpc': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpc++': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
'grpc++_alts': {
'language': 'c++',
'build': 'all',
'baselib': True
},
'grpc++_error_details': {
'language': 'c++',
'build': 'all'
},
'grpc++_reflection': {
'language': 'c++',
'build': 'all'
},
'grpc++_unsecure': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
# TODO(jtattermusch): do we need to set grpc_csharp_ext's LDFLAGS for wrapping memcpy in the same way as in build.yaml?
'grpc_csharp_ext': {
'language': 'c',
'build': 'all',
},
'grpc_unsecure': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpcpp_channelz': {
'language': 'c++',
'build': 'all'
},
'grpc++_test': {
'language': 'c++',
'build': 'private',
},
'src/compiler:grpc_plugin_support': {
'language': 'c++',
'build': 'protoc',
'_RENAME': 'grpc_plugin_support'
},
'src/compiler:grpc_cpp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_cpp_plugin'
},
'src/compiler:grpc_csharp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_csharp_plugin'
},
'src/compiler:grpc_node_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_node_plugin'
},
'src/compiler:grpc_objective_c_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_objective_c_plugin'
},
'src/compiler:grpc_php_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_php_plugin'
},
'src/compiler:grpc_python_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_python_plugin'
},
'src/compiler:grpc_ruby_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_ruby_plugin'
},
# TODO(jtattermusch): consider adding grpc++_core_stats
# test support libraries
'test/core/util:grpc_test_util': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util'
},
'test/core/util:grpc_test_util_unsecure': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util_unsecure'
},
# TODO(jtattermusch): consider adding grpc++_test_util_unsecure - it doesn't seem to be used by bazel build (don't forget to set secure: False)
'test/cpp/util:test_config': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_config'
},
'test/cpp/util:test_util': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_util'
},
# end2end test support libraries
'test/core/end2end:end2end_tests': {
'language': 'c',
'build': 'private',
'_RENAME': 'end2end_tests'
},
'test/core/end2end:end2end_nosec_tests': {
'language': 'c',
'build': 'private',
'_RENAME': 'end2end_nosec_tests'
},
# benchmark support libraries
'test/cpp/microbenchmarks:helpers': {
'language': 'c++',
'build': 'test',
'defaults': 'benchmark',
'_RENAME': 'benchmark_helpers'
},
'test/cpp/interop:interop_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'interop_client'
},
'test/cpp/interop:interop_server': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'interop_server'
},
'test/cpp/interop:xds_interop_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'xds_interop_client'
},
'test/cpp/interop:xds_interop_server': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'xds_interop_server'
},
'test/cpp/interop:http2_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'http2_client'
},
'test/cpp/qps:qps_json_driver': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'qps_json_driver'
},
'test/cpp/qps:qps_worker': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'qps_worker'
},
'test/cpp/util:grpc_cli': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'grpc_cli'
},
# TODO(jtattermusch): create_jwt and verify_jwt breaks distribtests because it depends on grpc_test_utils and thus requires tests to be built
# For now it's ok to disable them as these binaries aren't very useful anyway.
#'test/core/security:create_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_create_jwt' },
#'test/core/security:verify_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_verify_jwt' },
# TODO(jtattermusch): add remaining tools such as grpc_print_google_default_creds_token (they are not used by bazel build)
# TODO(jtattermusch): these fuzzers had no build.yaml equivalent
# test/core/compression:message_compress_fuzzer
# test/core/compression:message_decompress_fuzzer
# test/core/compression:stream_compression_fuzzer
# test/core/compression:stream_decompression_fuzzer
# test/core/slice:b64_decode_fuzzer
# test/core/slice:b64_encode_fuzzer
}
# We need a complete picture of all the targets and dependencies we're interested in
# so we run multiple bazel queries and merge the results.
_BAZEL_DEPS_QUERIES = [
'deps("//test/...")',
'deps("//:all")',
'deps("//src/compiler/...")',
'deps("//src/proto/...")',
# The ^ is needed to differentiate proto_library from go_proto_library
'deps(kind("^proto_library", @envoy_api//envoy/...))',
]
# Step 1: run a bunch of "bazel query --output xml" queries to collect
# the raw build metadata from the bazel build.
# At the end of this step we will have a dictionary of bazel rules
# that are interesting to us (libraries, binaries, etc.) along
# with their most important metadata (sources, headers, dependencies)
#
# Example of a single bazel rule after being populated:
# '//:grpc' : { 'class': 'cc_library',
# 'hdrs': ['//:include/grpc/byte_buffer.h', ... ],
# 'srcs': ['//:src/core/lib/surface/init.cc', ... ],
# 'deps': ['//:grpc_common', ...],
# ... }
bazel_rules = {}
for query in _BAZEL_DEPS_QUERIES:
bazel_rules.update(
_extract_rules_from_bazel_xml(_bazel_query_xml_tree(query)))
# Step 1.5: The sources for UPB protos are pre-generated, so we want
# to expand the UPB proto library bazel rules into the generated
# .upb.h and .upb.c files.
_expand_upb_proto_library_rules(bazel_rules)
# Step 2: Extract the known bazel cc_test tests. While most tests
# will be buildable with other build systems just fine, some of these tests
# would be too difficult to build and run with other build systems,
# so we simply exclude the ones we don't want.
# Note that while making tests buildable with other build systems
# than just bazel is extra effort, we still need to do that for these
# reasons:
# - If our cmake build doesn't have any tests at all, it's hard to make
# sure that what it built actually works (we need at least some "smoke tests").
# This is quite important because the build flags between bazel / non-bazel flag might differ
# (sometimes it's for interesting reasons that are not easy to overcome)
# which makes it even more important to have at least some tests for cmake/make
# - Our portability suite actually runs cmake tests and migration of portability
# suite fully towards bazel might be intricate (e.g. it's unclear whether it's
# possible to get a good enough coverage of different compilers / distros etc.
# with bazel)
# - some things that are considered "tests" in build.yaml-based builds are actually binaries
# we'd want to be able to build anyway (qps_json_worker, interop_client, interop_server, grpc_cli)
# so it's unclear how much make/cmake simplification we would gain by removing just some (but not all) test
# TODO(jtattermusch): Investigate feasibility of running portability suite with bazel.
tests = _exclude_unwanted_cc_tests(_extract_cc_tests(bazel_rules))
# Step 3: Generate the "extra metadata" for all our build targets.
# While the bazel rules give us most of the information we need,
# the legacy "build.yaml" format requires some additional fields that
# we cannot get just from bazel alone (we call that "extra metadata").
# In this step, we basically analyze the build metadata we have from bazel
# and use heuristics to determine (and sometimes guess) the right
# extra metadata to use for each target.
#
# - For some targets (such as the public libraries, helper libraries
# and executables) determining the right extra metadata is hard to do
# automatically. For these targets, the extra metadata is supplied "manually"
# in form of the _BUILD_EXTRA_METADATA dictionary. That allows us to match
# the semantics of the legacy "build.yaml" as closely as possible.
#
# - For test binaries, it is possible to generate the "extra metadata" mostly
# automatically using a rule-based heuristic approach because most tests
# look and behave alike from the build's perspective.
#
# TODO(jtattermusch): Of course neither "_BUILD_EXTRA_METADATA" or
# the heuristic approach used for tests are ideal and they cannot be made
# to cover all possible situations (and are tailored to work with the way
# the grpc build currently works), but the idea was to start with something
# reasonably simple that matches the "build.yaml"-like semantics as closely
# as possible (to avoid changing too many things at once) and gradually get
# rid of the legacy "build.yaml"-specific fields one by one. Once that is done,
# only very little "extra metadata" would be needed and/or it would be trivial
# to generate it automatically.
all_extra_metadata = {}
all_extra_metadata.update(_BUILD_EXTRA_METADATA)
all_extra_metadata.update(
_generate_build_extra_metadata_for_tests(tests, bazel_rules))
# Step 4: Compute the build metadata that will be used in the final build.yaml.
# The final build metadata includes transitive dependencies, and sources/headers
# expanded without intermediate dependencies.
# Example:
# '//:grpc' : { ...,
# '_TRANSITIVE_DEPS': ['//:gpr_base', ...],
# '_COLLAPSED_DEPS': ['gpr', ...],
# '_COLLAPSED_SRCS': [...],
# '_COLLAPSED_PUBLIC_HEADERS': [...],
# '_COLLAPSED_HEADERS': [...]
# }
_populate_transitive_metadata(bazel_rules, list(all_extra_metadata.keys()))
# Step 4a: Update the existing test metadata with the updated build metadata.
# Certain build metadata of certain test targets depend on the transitive
# metadata that wasn't available earlier.
update_test_metadata_with_transitive_metadata(all_extra_metadata, bazel_rules)
# Step 5: Generate the final metadata for all the targets.
# This is done by combining the bazel build metadata and the "extra metadata"
# we obtained in the previous step.
# In this step, we also perform some interesting massaging of the target metadata
# to end up with a result that is as similar to the legacy build.yaml data
# as possible.
# - Some targets get renamed (to match the legacy build.yaml target names)
# - Some intermediate libraries get elided ("expanded") to better match the set
# of targets provided by the legacy build.yaml build
#
# Originally the target renaming was introduced to address these concerns:
# - avoid changing too many things at the same time and avoid people getting
# confused by some well know targets suddenly being missing
# - Makefile/cmake and also language-specific generators rely on some build
# targets being called exactly the way they they are. Some of our testing
# scrips also invoke executables (e.g. "qps_json_driver") by their name.
# - The autogenerated test name from bazel includes the package path
# (e.g. "test_cpp_TEST_NAME"). Without renaming, the target names would
# end up pretty ugly (e.g. test_cpp_qps_qps_json_driver).
# TODO(jtattermusch): reevaluate the need for target renaming in the future.
#
# Example of a single generated target:
# 'grpc' : { 'language': 'c',
# 'public_headers': ['include/grpc/byte_buffer.h', ... ],
# 'headers': ['src/core/ext/filters/client_channel/client_channel.h', ... ],
# 'src': ['src/core/lib/surface/init.cc', ... ],
# 'deps': ['gpr', 'address_sorting', ...],
# ... }
all_targets_dict = _generate_build_metadata(all_extra_metadata, bazel_rules)
# Step 6: convert the dictionary with all the targets to a dict that has
# the desired "build.yaml"-like layout.
# TODO(jtattermusch): We use the custom "build.yaml"-like layout because
# currently all other build systems use that format as their source of truth.
# In the future, we can get rid of this custom & legacy format entirely,
# but we would need to update the generators for other build systems
# at the same time.
#
# Layout of the result:
# { 'libs': { TARGET_DICT_FOR_LIB_XYZ, ... },
# 'targets': { TARGET_DICT_FOR_BIN_XYZ, ... },
# 'tests': { TARGET_DICT_FOR_TEST_XYZ, ...} }
build_yaml_like = _convert_to_build_yaml_like(all_targets_dict)
# detect and report some suspicious situations we've seen before
_detect_and_print_issues(build_yaml_like)
# Step 7: Store the build_autogenerated.yaml in a deterministic (=sorted)
# and cleaned-up form.
# A basic overview of the resulting "build.yaml"-like format is here:
# https://github.com/grpc/grpc/blob/master/templates/README.md
# TODO(jtattermusch): The "cleanup" function is taken from the legacy
# build system (which used build.yaml) and can be eventually removed.
build_yaml_string = build_cleaner.cleaned_build_yaml_dict_as_string(
build_yaml_like)
with open('build_autogenerated.yaml', 'w') as file:
file.write(build_yaml_string)
|
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import Dense, Dropout, Input
from tensorlayer.layers.core import Layer
from tensorlayer.models import Model
__all__ = ['Seq2seqLuongAttention']
class Encoder(Layer):
def __init__(self, hidden_size, cell, embedding_layer, name=None):
super(Encoder, self).__init__(name)
self.cell = cell(hidden_size)
self.hidden_size = hidden_size
self.embedding_layer = embedding_layer
self.build((None, None, self.embedding_layer.embedding_size))
self._built = True
def build(self, inputs_shape):
self.cell.build(input_shape=tuple(inputs_shape))
self._built = True
if self._trainable_weights is None:
self._trainable_weights = list()
for var in self.cell.trainable_variables:
self._trainable_weights.append(var)
def forward(self, src_seq, initial_state=None):
states = initial_state if initial_state is not None else self.cell.get_initial_state(src_seq)
encoding_hidden_states = list()
total_steps = src_seq.get_shape().as_list()[1]
for time_step in range(total_steps):
if not isinstance(states, list):
states = [states]
output, states = self.cell.call(src_seq[:, time_step, :], states, training=self.is_train)
encoding_hidden_states.append(states[0])
return output, encoding_hidden_states, states[0]
class Decoder_Attention(Layer):
def __init__(self, hidden_size, cell, embedding_layer, method, name=None):
super(Decoder_Attention, self).__init__(name)
self.cell = cell(hidden_size)
self.hidden_size = hidden_size
self.embedding_layer = embedding_layer
self.method = method
self.build((None, hidden_size + self.embedding_layer.embedding_size))
self._built = True
def build(self, inputs_shape):
self.cell.build(input_shape=tuple(inputs_shape))
self._built = True
if self.method is "concat":
self.W = self._get_weights("W", shape=(2 * self.hidden_size, self.hidden_size))
self.V = self._get_weights("V", shape=(self.hidden_size, 1))
elif self.method is "general":
self.W = self._get_weights("W", shape=(self.hidden_size, self.hidden_size))
if self._trainable_weights is None:
self._trainable_weights = list()
for var in self.cell.trainable_variables:
self._trainable_weights.append(var)
def score(self, encoding_hidden, hidden, method):
# encoding = [B, T, H]
# hidden = [B, H]
# combined = [B,T,2H]
if method is "concat":
# hidden = [B,H]->[B,1,H]->[B,T,H]
hidden = tf.expand_dims(hidden, 1)
hidden = tf.tile(hidden, [1, encoding_hidden.shape[1], 1])
# combined = [B,T,2H]
combined = tf.concat([hidden, encoding_hidden], 2)
combined = tf.cast(combined, tf.float32)
score = tf.tensordot(combined, self.W, axes=[[2], [0]]) # score = [B,T,H]
score = tf.nn.tanh(score) # score = [B,T,H]
score = tf.tensordot(self.V, score, axes=[[0], [2]]) # score = [1,B,T]
score = tf.squeeze(score, axis=0) # score = [B,T]
elif method is "dot":
# hidden = [B,H]->[B,H,1]
hidden = tf.expand_dims(hidden, 2)
score = tf.matmul(encoding_hidden, hidden)
score = tf.squeeze(score, axis=2)
elif method is "general":
# hidden = [B,H]->[B,H,1]
score = tf.matmul(hidden, self.W)
score = tf.expand_dims(score, 2)
score = tf.matmul(encoding_hidden, score)
score = tf.squeeze(score, axis=2)
score = tf.nn.softmax(score, axis=-1) # score = [B,T]
return score
def forward(self, dec_seq, enc_hiddens, last_hidden, method, return_last_state=False):
# dec_seq = [B, T_, V], enc_hiddens = [B, T, H], last_hidden = [B, H]
total_steps = dec_seq.get_shape().as_list()[1]
states = last_hidden
cell_outputs = list()
for time_step in range(total_steps):
attention_weights = self.score(enc_hiddens, last_hidden, method)
attention_weights = tf.expand_dims(attention_weights, 1) #[B, 1, T]
context = tf.matmul(attention_weights, enc_hiddens) #[B, 1, H]
context = tf.squeeze(context, 1) #[B, H]
inputs = tf.concat([dec_seq[:, time_step, :], context], 1)
if not isinstance(states, list):
states = [states]
cell_output, states = self.cell.call(inputs, states, training=self.is_train)
cell_outputs.append(cell_output)
last_hidden = states[0]
cell_outputs = tf.convert_to_tensor(cell_outputs)
cell_outputs = tf.transpose(cell_outputs, perm=[1, 0, 2])
if (return_last_state):
return cell_outputs, last_hidden
return cell_outputs
class Seq2seqLuongAttention(Model):
"""Luong Attention-based Seq2Seq model. Implementation based on https://arxiv.org/pdf/1508.04025.pdf.
Parameters
----------
hidden_size: int
The hidden size of both encoder and decoder RNN cells
cell : TensorFlow cell function
The RNN function cell for your encoder and decoder stack, e.g. tf.keras.layers.GRUCell
embedding_layer : tl.Layer
A embedding layer, e.g. tl.layers.Embedding(vocabulary_size=voc_size, embedding_size=emb_dim)
method : str
The three alternatives to calculate the attention scores, e.g. "dot", "general" and "concat"
name : str
The model name
Returns
-------
static single layer attention-based Seq2Seq model.
"""
def __init__(self, hidden_size, embedding_layer, cell, method, name=None):
super(Seq2seqLuongAttention, self).__init__(name)
self.enc_layer = Encoder(hidden_size, cell, embedding_layer)
self.dec_layer = Decoder_Attention(hidden_size, cell, embedding_layer, method=method)
self.embedding_layer = embedding_layer
self.dense_layer = tl.layers.Dense(n_units=self.embedding_layer.vocabulary_size, in_channels=hidden_size)
self.method = method
def inference(self, src_seq, encoding_hidden_states, last_hidden_states, seq_length, sos):
"""Inference mode"""
"""
Parameters
----------
src_seq : input tensor
The source sequences
encoding_hidden_states : a list of tensor
The list of encoder's hidden states at each time step
last_hidden_states: tensor
The last hidden_state from encoder
seq_length : int
The expected length of your predicted sequence.
sos : int
<SOS> : The token of "start of sequence"
"""
batch_size = src_seq.shape[0]
decoding = [[sos] for i in range(batch_size)]
dec_output = self.embedding_layer(decoding)
outputs = [[0] for i in range(batch_size)]
for step in range(seq_length):
dec_output, last_hidden_states = self.dec_layer(
dec_output, encoding_hidden_states, last_hidden_states, method=self.method, return_last_state=True
)
dec_output = tf.reshape(dec_output, [-1, dec_output.shape[-1]])
dec_output = self.dense_layer(dec_output)
dec_output = tf.reshape(dec_output, [batch_size, -1, dec_output.shape[-1]])
dec_output = tf.argmax(dec_output, -1)
outputs = tf.concat([outputs, dec_output], 1)
dec_output = self.embedding_layer(dec_output)
return outputs[:, 1:]
def forward(self, inputs, seq_length=20, sos=None):
src_seq = inputs[0]
src_seq = self.embedding_layer(src_seq)
enc_output, encoding_hidden_states, last_hidden_states = self.enc_layer(src_seq)
encoding_hidden_states = tf.convert_to_tensor(encoding_hidden_states)
encoding_hidden_states = tf.transpose(encoding_hidden_states, perm=[1, 0, 2])
last_hidden_states = tf.convert_to_tensor(last_hidden_states)
if (self.is_train):
dec_seq = inputs[1]
dec_seq = self.embedding_layer(dec_seq)
dec_output = self.dec_layer(dec_seq, encoding_hidden_states, last_hidden_states, method=self.method)
batch_size = dec_output.shape[0]
dec_output = tf.reshape(dec_output, [-1, dec_output.shape[-1]])
dec_output = self.dense_layer(dec_output)
dec_output = tf.reshape(dec_output, [batch_size, -1, dec_output.shape[-1]])
else:
dec_output = self.inference(src_seq, encoding_hidden_states, last_hidden_states, seq_length, sos)
return dec_output
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import AddressesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class AddressesRestInterceptor:
"""Interceptor for Addresses.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the AddressesRestTransport.
.. code-block:: python
class MyCustomAddressesInterceptor(AddressesRestInterceptor):
def pre_aggregated_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_aggregated_list(response):
logging.log(f"Received response: {response}")
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
transport = AddressesRestTransport(interceptor=MyCustomAddressesInterceptor())
client = AddressesClient(transport=transport)
"""
def pre_aggregated_list(
self,
request: compute.AggregatedListAddressesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.AggregatedListAddressesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for aggregated_list
Override in a subclass to manipulate the request or metadata
before they are sent to the Addresses server.
"""
return request, metadata
def post_aggregated_list(
self, response: compute.AddressAggregatedList
) -> compute.AddressAggregatedList:
"""Post-rpc interceptor for aggregated_list
Override in a subclass to manipulate the response
after it is returned by the Addresses server but before
it is returned to user code.
"""
return response
def pre_delete(
self, request: compute.DeleteAddressRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.DeleteAddressRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the Addresses server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the Addresses server but before
it is returned to user code.
"""
return response
def pre_get(
self, request: compute.GetAddressRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.GetAddressRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the Addresses server.
"""
return request, metadata
def post_get(self, response: compute.Address) -> compute.Address:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the Addresses server but before
it is returned to user code.
"""
return response
def pre_insert(
self, request: compute.InsertAddressRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.InsertAddressRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the Addresses server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the Addresses server but before
it is returned to user code.
"""
return response
def pre_list(
self, request: compute.ListAddressesRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.ListAddressesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the Addresses server.
"""
return request, metadata
def post_list(self, response: compute.AddressList) -> compute.AddressList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the Addresses server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class AddressesRestStub:
_session: AuthorizedSession
_host: str
_interceptor: AddressesRestInterceptor
class AddressesRestTransport(AddressesTransport):
"""REST backend transport for Addresses.
The Addresses API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, AddressesRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[AddressesRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or AddressesRestInterceptor()
self._prep_wrapped_messages(client_info)
class _AggregatedList(AddressesRestStub):
def __hash__(self):
return hash("AggregatedList")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.AggregatedListAddressesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.AddressAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListAddressesRequest):
The request object. A request message for
Addresses.AggregatedList. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.AddressAggregatedList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/aggregated/addresses",
},
]
request, metadata = self._interceptor.pre_aggregated_list(request, metadata)
request_kwargs = compute.AggregatedListAddressesRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.AggregatedListAddressesRequest.to_json(
compute.AggregatedListAddressesRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.AddressAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_aggregated_list(resp)
return resp
class _Delete(AddressesRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteAddressRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteAddressRequest):
The request object. A request message for
Addresses.Delete. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/regions/{region}/addresses/{address}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
request_kwargs = compute.DeleteAddressRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.DeleteAddressRequest.to_json(
compute.DeleteAddressRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(AddressesRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetAddressRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Address:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetAddressRequest):
The request object. A request message for Addresses.Get.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Address:
Represents an IP Address resource. Google Compute Engine
has two IP Address resources: \* `Global (external and
internal) <https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses>`__
\* `Regional (external and
internal) <https://cloud.google.com/compute/docs/reference/rest/v1/addresses>`__
For more information, see Reserving a static external IP
address.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/addresses/{address}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetAddressRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetAddressRequest.to_json(
compute.GetAddressRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Address.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(AddressesRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertAddressRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertAddressRequest):
The request object. A request message for
Addresses.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/addresses",
"body": "address_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertAddressRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.Address.to_json(
compute.Address(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertAddressRequest.to_json(
compute.InsertAddressRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _List(AddressesRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListAddressesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.AddressList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListAddressesRequest):
The request object. A request message for Addresses.List.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.AddressList:
Contains a list of addresses.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/addresses",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListAddressesRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListAddressesRequest.to_json(
compute.ListAddressesRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.AddressList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListAddressesRequest], compute.AddressAggregatedList
]:
stub = self._STUBS.get("aggregated_list")
if not stub:
stub = self._STUBS["aggregated_list"] = self._AggregatedList(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def delete(self) -> Callable[[compute.DeleteAddressRequest], compute.Operation]:
stub = self._STUBS.get("delete")
if not stub:
stub = self._STUBS["delete"] = self._Delete(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(self) -> Callable[[compute.GetAddressRequest], compute.Address]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(self) -> Callable[[compute.InsertAddressRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(self) -> Callable[[compute.ListAddressesRequest], compute.AddressList]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("AddressesRestTransport",)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Base classes representing defects.
"""
import logging
import numpy as np
from abc import ABCMeta, abstractmethod
from monty.json import MSONable, MontyDecoder
from functools import lru_cache
from pymatgen.core.structure import Structure, PeriodicSite
from pymatgen.core.composition import Composition
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.defects.utils import kb
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class Defect(MSONable, metaclass=ABCMeta):
"""
Abstract class for a single point defect
"""
def __init__(self, structure, defect_site, charge=0., multiplicity=None):
"""
Initializes an abstract defect
Args:
structure: Pymatgen Structure without any defects
defect_site (Site): site for defect within structure
must have same lattice as structure
charge: (int or float) defect charge
default is zero, meaning no change to NELECT after defect is created in the structure
(assuming use_structure_charge=True in vasp input set)
multiplicity (int): multiplicity of defect within
the supercell can be supplied by user. if not
specified, then space group symmetry analysis is
used to generate multiplicity.
"""
self._structure = structure
self._charge = int(charge)
self._defect_site = defect_site
lattice_match = np.allclose(structure.lattice.matrix,
defect_site.lattice.matrix,
atol=1e-5)
if not lattice_match:
raise ValueError("defect_site lattice must be same as structure "
"lattice.")
self._multiplicity = multiplicity if multiplicity else self.get_multiplicity()
@property
def bulk_structure(self):
"""
Returns the structure without any defects.
"""
return self._structure
@property
def charge(self):
"""
Returns the charge of a defect
"""
return self._charge
@property
def site(self):
"""
Returns the defect position as a site object
"""
return self._defect_site
@property
def multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
"""
return self._multiplicity
@property # type: ignore
@abstractmethod
def defect_composition(self):
"""
Returns the defect composition as a Composition object
"""
return
@abstractmethod
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Given structure and defect_site (and type of defect) should return a defect_structure that is charged
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
return
@property # type: ignore
@abstractmethod
def name(self):
"""
Returns a name for this defect
"""
return
@abstractmethod
def get_multiplicity(self):
"""
Method to determine multiplicity. For non-Interstitial objects, also confirms that defect_site
is a site in bulk_structure.
"""
return
def copy(self):
"""
Convenience method to get a copy of the defect.
Returns:
A copy of the Defect.
"""
return self.from_dict(self.as_dict())
def set_charge(self, new_charge=0.):
"""
Sets the overall charge
Args:
charge (float): new charge to set
"""
self._charge = int(new_charge)
class Vacancy(Defect):
"""
Subclass of Defect to capture essential information for a single Vacancy defect structure.
"""
@property
def defect_composition(self):
"""
Returns: Composition of defect.
"""
temp_comp = self.bulk_structure.composition.as_dict()
temp_comp[str(self.site.specie)] -= 1
return Composition(temp_comp)
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Vacancy structure, decorated with charge
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = self.bulk_structure.copy()
defect_structure.make_supercell(supercell)
# create a trivial defect structure to find where supercell transformation moves the lattice
struct_for_defect_site = Structure(self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
poss_deflist = sorted(
defect_structure.get_sites_in_sphere(defect_site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
defect_structure.remove_sites([defindex])
defect_structure.set_charge(self.charge)
return defect_structure
def get_multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
and confirms that defect_site is a site in bulk_structure.
"""
sga = SpacegroupAnalyzer(self.bulk_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
if not len(poss_deflist):
raise ValueError("Site {} is not in bulk structure! Cannot create Vacancy object.".format(self.site))
else:
defindex = poss_deflist[0][2]
defect_site = self.bulk_structure[defindex]
equivalent_sites = periodic_struc.find_equivalent_sites(defect_site)
return len(equivalent_sites)
@property
def name(self):
"""
Returns a name for this defect
"""
return "Vac_{}_mult{}".format(self.site.specie, self.multiplicity)
class Substitution(Defect):
"""
Subclass of Defect to capture essential information for a single Substitution defect structure.
"""
@property # type: ignore
@lru_cache(1)
def defect_composition(self):
"""
Returns: Composition of defect.
"""
poss_deflist = sorted(
self.bulk_structure.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
temp_comp = self.bulk_structure.composition.as_dict()
temp_comp[str(self.site.specie)] += 1
temp_comp[str(self.bulk_structure[defindex].specie)] -= 1
return Composition(temp_comp)
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Substitution structure, decorated with charge.
If bulk structure had any site properties, all of these properties are
removed in the resulting defect structure.
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = Structure(self.bulk_structure.copy().lattice,
[site.specie for site in self.bulk_structure],
[site.frac_coords for site in self.bulk_structure],
to_unit_cell=True, coords_are_cartesian=False,
site_properties=None) # remove all site_properties
defect_structure.make_supercell(supercell)
# create a trivial defect structure to find where supercell transformation moves the defect
struct_for_defect_site = Structure(self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True, coords_are_cartesian=False)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
poss_deflist = sorted(
defect_structure.get_sites_in_sphere(defect_site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
subsite = defect_structure.pop(defindex)
defect_structure.append(self.site.specie.symbol, subsite.coords, coords_are_cartesian=True,
properties=None)
defect_structure.set_charge(self.charge)
return defect_structure
def get_multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
and confirms that defect_site is a site in bulk_structure.
"""
sga = SpacegroupAnalyzer(self.bulk_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
if not len(poss_deflist):
raise ValueError("Site {} is not in bulk structure! Cannot create Substitution object.".format(self.site))
else:
defindex = poss_deflist[0][2]
defect_site = self.bulk_structure[defindex]
equivalent_sites = periodic_struc.find_equivalent_sites(defect_site)
return len(equivalent_sites)
@property # type: ignore
@lru_cache(1)
def name(self):
"""
Returns a name for this defect
"""
poss_deflist = sorted(
self.bulk_structure.get_sites_in_sphere(self.site.coords, 0.1, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
return "Sub_{}_on_{}_mult{}".format(self.site.specie, self.bulk_structure[defindex].specie, self.multiplicity)
class Interstitial(Defect):
"""
Subclass of Defect to capture essential information for a single Interstitial defect structure.
"""
def __init__(self, structure, defect_site, charge=0., site_name='', multiplicity=None):
"""
Initializes an interstial defect.
Args:
structure: Pymatgen Structure without any defects
defect_site (Site): the site for the interstitial
charge: (int or float) defect charge
default is zero, meaning no change to NELECT after defect is created in the structure
(assuming use_structure_charge=True in vasp input set)
site_name: allows user to give a unique name to defect, since Wyckoff symbol/multiplicity
is sometimes insufficient to categorize the defect type.
default is no name beyond multiplicity.
multiplicity (int): multiplicity of defect within
the supercell can be supplied by user. if not
specified, then space group symmetry is used
to generator interstitial sublattice
NOTE: multiplicity generation will not work for
interstitial complexes,
where multiplicity may depend on additional
factors (ex. orientation etc.)
If defect is not a complex, then this
process will yield the correct multiplicity,
provided that the defect does not undergo
significant relaxation.
"""
super().__init__(structure=structure, defect_site=defect_site, charge=charge)
self._multiplicity = multiplicity if multiplicity else self.get_multiplicity()
self.site_name = site_name
@property
def defect_composition(self):
"""
Returns: Defect composition.
"""
temp_comp = self.bulk_structure.composition.as_dict()
temp_comp[str(self.site.specie)] += 1
return Composition(temp_comp)
def generate_defect_structure(self, supercell=(1, 1, 1)):
"""
Returns Defective Interstitial structure, decorated with charge
If bulk structure had any site properties, all of these properties are
removed in the resulting defect structure
Args:
supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
"""
defect_structure = Structure(self.bulk_structure.copy().lattice,
[site.specie for site in self.bulk_structure],
[site.frac_coords for site in self.bulk_structure],
to_unit_cell=True, coords_are_cartesian=False,
site_properties=None) # remove all site_properties
defect_structure.make_supercell(supercell)
# create a trivial defect structure to find where supercell transformation moves the defect site
struct_for_defect_site = Structure(self.bulk_structure.copy().lattice,
[self.site.specie],
[self.site.frac_coords],
to_unit_cell=True, coords_are_cartesian=False)
struct_for_defect_site.make_supercell(supercell)
defect_site = struct_for_defect_site[0]
defect_structure.append(self.site.specie.symbol, defect_site.coords, coords_are_cartesian=True,
properties=None)
defect_structure.set_charge(self.charge)
return defect_structure
def get_multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
"""
try:
d_structure = create_saturated_interstitial_structure(self)
except ValueError:
logger.debug('WARNING! Multiplicity was not able to be calculated adequately '
'for interstitials...setting this to 1 and skipping for now...')
return 1
sga = SpacegroupAnalyzer(d_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 0.1, include_index=True),
key=lambda x: x[1])
defindex = poss_deflist[0][2]
equivalent_sites = periodic_struc.find_equivalent_sites(periodic_struc[defindex])
return len(equivalent_sites)
@property
def name(self):
"""
Returns a name for this defect
"""
if self.site_name:
return "Int_{}_{}_mult{}".format(self.site.specie, self.site_name, self.multiplicity)
else:
return "Int_{}_mult{}".format(self.site.specie, self.multiplicity)
def create_saturated_interstitial_structure(interstitial_def, dist_tol=0.1):
"""
this takes a Interstitial defect object and generates the
sublattice for it based on the structure's space group.
Useful for understanding multiplicity of an interstitial
defect in thermodynamic analysis.
NOTE: if large relaxation happens to interstitial or
defect involves a complex then there may be additional
degrees of freedom that need to be considered for
the multiplicity.
Args:
dist_tol: changing distance tolerance of saturated structure,
allowing for possibly overlapping sites
but ensuring space group is maintained
Returns:
Structure object decorated with interstitial site equivalents
"""
sga = SpacegroupAnalyzer(interstitial_def.bulk_structure.copy())
sg_ops = sga.get_symmetry_operations(cartesian=True)
# copy bulk structure to make saturated interstitial structure out of
# artificially lower distance_tolerance to allow for distinct interstitials
# with lower symmetry to be replicated - This is OK because one would never
# actually use this structure for a practical calcualtion...
saturated_defect_struct = interstitial_def.bulk_structure.copy()
saturated_defect_struct.DISTANCE_TOLERANCE = dist_tol
for sgo in sg_ops:
new_interstit_coords = sgo.operate(interstitial_def.site.coords[:])
poss_new_site = PeriodicSite(
interstitial_def.site.specie,
new_interstit_coords,
saturated_defect_struct.lattice,
to_unit_cell=True,
coords_are_cartesian=True)
try:
# will raise value error if site already exists in structure
saturated_defect_struct.append(
poss_new_site.specie, poss_new_site.coords,
coords_are_cartesian=True, validate_proximity=True)
except ValueError:
pass
# do final space group analysis to make sure symmetry not lowered by saturating defect structure
saturated_sga = SpacegroupAnalyzer(saturated_defect_struct)
if saturated_sga.get_space_group_number() != sga.get_space_group_number():
raise ValueError("Warning! Interstitial sublattice generation "
"has changed space group symmetry. Recommend "
"reducing dist_tol and trying again...")
return saturated_defect_struct
class DefectEntry(MSONable):
"""
An lightweight DefectEntry object containing key computed data
for many defect analysis.
"""
def __init__(self, defect, uncorrected_energy, corrections=None, parameters=None, entry_id=None):
"""
Args:
defect:
A Defect object from pymatgen.analysis.defects.core
uncorrected_energy (float): Energy of the defect entry. Usually the difference between
the final calculated energy for the defect supercell - the perfect
supercell energy
corrections (dict):
Dict of corrections for defect formation energy. All values will be summed and
added to the defect formation energy.
parameters (dict): An optional dict of calculation parameters and data to
use with correction schemes
(examples of parameter keys: supercell_size, axis_grid, bulk_planar_averages
defect_planar_averages )
entry_id (obj): An id to uniquely identify this defect, can be any MSONable
type
"""
self.defect = defect
self.uncorrected_energy = uncorrected_energy
self.corrections = corrections if corrections else {}
self.entry_id = entry_id
self.parameters = parameters if parameters else {}
@property
def bulk_structure(self):
"""
Returns: Structure object of bulk.
"""
return self.defect.bulk_structure
def as_dict(self):
"""
Json-serializable dict representation of DefectEntry
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"defect": self.defect.as_dict(),
"uncorrected_energy": self.uncorrected_energy,
"corrections": self.corrections,
"parameters": self.parameters,
"entry_id": self.entry_id}
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a DefectEntry object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of DefectEntry.
Returns:
DefectEntry object
"""
defect = MontyDecoder().process_decoded(d["defect"])
uncorrected_energy = d["uncorrected_energy"]
corrections = d.get("corrections", None)
parameters = d.get("parameters", None)
entry_id = d.get("entry_id", None)
return cls(defect, uncorrected_energy, corrections=corrections,
parameters=parameters, entry_id=entry_id)
@property
def site(self):
"""
Returns: Site of defect.
"""
return self.defect.site
@property
def multiplicity(self):
"""
Returns: Multiplicity of defect.
"""
return self.defect.multiplicity
@property
def charge(self):
"""
Returns: Charge of defect.
"""
return self.defect.charge
@property
def energy(self):
"""
Returns: *Corrected* energy of the entry
"""
return self.uncorrected_energy + np.sum(list(self.corrections.values()))
@property
def name(self):
"""
Returns: Defect name
"""
return self.defect.name
def copy(self):
"""
Convenience method to get a copy of the DefectEntry.
Returns:
A copy of the DefectEntry.
"""
defectentry_dict = self.as_dict()
return DefectEntry.from_dict(defectentry_dict)
def formation_energy(self, chemical_potentials=None, fermi_level=0):
"""
Compute the formation energy for a defect taking into account a given chemical potential and fermi_level
Args:
chemical_potentials (dict): Dictionary of elemental chemical potential values.
Keys are Element objects within the defect structure's composition.
Values are float numbers equal to the atomic chemical potential for that element.
fermi_level (float): Value corresponding to the electron chemical potential.
If "vbm" is supplied in parameters dict, then fermi_level is referenced to the VBM.
If "vbm" is NOT supplied in parameters dict, then fermi_level is referenced to the
calculation's absolute Kohn-Sham potential (and should include the vbm value provided
by a band structure calculation)
Returns:
Formation energy value (float)
"""
chemical_potentials = chemical_potentials if chemical_potentials else {}
chempot_correction = sum([
chem_pot * (self.bulk_structure.composition[el] - self.defect.defect_composition[el])
for el, chem_pot in chemical_potentials.items()
])
formation_energy = self.energy + chempot_correction
if "vbm" in self.parameters:
formation_energy += self.charge * (self.parameters["vbm"] + fermi_level)
else:
formation_energy += self.charge * fermi_level
return formation_energy
def defect_concentration(self, chemical_potentials, temperature=300, fermi_level=0.0):
"""
Compute the defect concentration for a temperature and Fermi level.
Args:
temperature:
the temperature in K
fermi_level:
the fermi level in eV (with respect to the VBM)
Returns:
defects concentration in cm^-3
"""
n = self.multiplicity * 1e24 / self.defect.bulk_structure.volume
conc = n * np.exp(-1.0 * self.formation_energy(chemical_potentials, fermi_level=fermi_level) /
(kb * temperature))
return conc
def __repr__(self):
"""
Human readable string representation of this entry
"""
output = [
"DefectEntry {} - {} - charge {}".format(self.entry_id, self.name, self.charge),
"Energy = {:.4f}".format(self.energy),
"Correction = {:.4f}".format(np.sum(list(self.corrections.values()))),
"Parameters:"
]
for k, v in self.parameters.items():
output.append("\t{} = {}".format(k, v))
return "\n".join(output)
def __str__(self):
return self.__repr__()
class DefectCorrection(MSONable):
"""
A Correction class modeled off the computed entry correction format
"""
@abstractmethod
def get_correction(self, entry):
"""
Returns correction for a single entry.
Args:
entry: A DefectEntry object.
Returns:
A single dictionary with the format
correction_name: energy_correction
Raises:
CompatibilityError if entry is not compatible.
"""
return
def correct_entry(self, entry):
"""
Corrects a single entry.
Args:
entry: A DefectEntry object.
Returns:
An processed entry.
Raises:
CompatibilityError if entry is not compatible.
"""
entry.correction.update(self.get_correction(entry))
return entry
|
|
import json
import logging
import threading
from urllib.parse import urljoin, urlparse
from http.server import SimpleHTTPRequestHandler, HTTPServer
import ray.cloudpickle as cloudpickle
from ray.tune import TuneError
from ray.tune.suggest import BasicVariantGenerator
from ray._private.utils import binary_to_hex, hex_to_binary
logger = logging.getLogger(__name__)
try:
import requests # `requests` is not part of stdlib.
except ImportError:
requests = None
logger.exception(
"Couldn't import `requests` library. "
"Be sure to install it on the client side."
)
class TuneClient:
"""Client to interact with an ongoing Tune experiment.
Requires a TuneServer to have started running.
Attributes:
tune_address (str): Address of running TuneServer
port_forward (int): Port number of running TuneServer
"""
def __init__(self, tune_address, port_forward):
self._tune_address = tune_address
self._port_forward = port_forward
self._path = "http://{}:{}".format(tune_address, port_forward)
def get_all_trials(self, timeout=None):
"""Returns a list of all trials' information."""
response = requests.get(urljoin(self._path, "trials"), timeout=timeout)
return self._deserialize(response)
def get_trial(self, trial_id, timeout=None):
"""Returns trial information by trial_id."""
response = requests.get(
urljoin(self._path, "trials/{}".format(trial_id)), timeout=timeout
)
return self._deserialize(response)
def add_trial(self, name, specification):
"""Adds a trial by name and specification (dict)."""
payload = {"name": name, "spec": specification}
response = requests.post(urljoin(self._path, "trials"), json=payload)
return self._deserialize(response)
def stop_trial(self, trial_id):
"""Requests to stop trial by trial_id."""
response = requests.put(urljoin(self._path, "trials/{}".format(trial_id)))
return self._deserialize(response)
def stop_experiment(self):
"""Requests to stop the entire experiment."""
response = requests.put(urljoin(self._path, "stop_experiment"))
return self._deserialize(response)
@property
def server_address(self):
return self._tune_address
@property
def server_port(self):
return self._port_forward
def _load_trial_info(self, trial_info):
trial_info["config"] = cloudpickle.loads(hex_to_binary(trial_info["config"]))
trial_info["result"] = cloudpickle.loads(hex_to_binary(trial_info["result"]))
def _deserialize(self, response):
parsed = response.json()
if "trial" in parsed:
self._load_trial_info(parsed["trial"])
elif "trials" in parsed:
for trial_info in parsed["trials"]:
self._load_trial_info(trial_info)
return parsed
def RunnerHandler(runner):
class Handler(SimpleHTTPRequestHandler):
"""A Handler is a custom handler for TuneServer.
Handles all requests and responses coming into and from
the TuneServer.
"""
def _do_header(self, response_code=200, headers=None):
"""Sends the header portion of the HTTP response.
Parameters:
response_code (int): Standard HTTP response code
headers (list[tuples]): Standard HTTP response headers
"""
if headers is None:
headers = [("Content-type", "application/json")]
self.send_response(response_code)
for key, value in headers:
self.send_header(key, value)
self.end_headers()
def do_HEAD(self):
"""HTTP HEAD handler method."""
self._do_header()
def do_GET(self):
"""HTTP GET handler method."""
response_code = 200
message = ""
try:
result = self._get_trial_by_url(self.path)
resource = {}
if result:
if isinstance(result, list):
infos = [self._trial_info(t) for t in result]
resource["trials"] = infos
else:
resource["trial"] = self._trial_info(result)
message = json.dumps(resource)
except TuneError as e:
response_code = 404
message = str(e)
self._do_header(response_code=response_code)
self.wfile.write(message.encode())
def do_PUT(self):
"""HTTP PUT handler method."""
response_code = 200
message = ""
try:
resource = {}
if self.path.endswith("stop_experiment"):
runner.request_stop_experiment()
trials = list(runner.get_trials())
else:
trials = self._get_trial_by_url(self.path)
if trials:
if not isinstance(trials, list):
trials = [trials]
for t in trials:
runner.request_stop_trial(t)
resource["trials"] = [self._trial_info(t) for t in trials]
message = json.dumps(resource)
except TuneError as e:
response_code = 404
message = str(e)
self._do_header(response_code=response_code)
self.wfile.write(message.encode())
def do_POST(self):
"""HTTP POST handler method."""
response_code = 201
content_len = int(self.headers.get("Content-Length"), 0)
raw_body = self.rfile.read(content_len)
parsed_input = json.loads(raw_body.decode())
resource = self._add_trials(parsed_input["name"], parsed_input["spec"])
headers = [("Content-type", "application/json"), ("Location", "/trials/")]
self._do_header(response_code=response_code, headers=headers)
self.wfile.write(json.dumps(resource).encode())
def _trial_info(self, trial):
"""Returns trial information as JSON."""
if trial.last_result:
result = trial.last_result.copy()
else:
result = None
info_dict = {
"id": trial.trial_id,
"trainable_name": trial.trainable_name,
"config": binary_to_hex(cloudpickle.dumps(trial.config)),
"status": trial.status,
"result": binary_to_hex(cloudpickle.dumps(result)),
}
return info_dict
def _get_trial_by_url(self, url):
"""Parses url to get either all trials or trial by trial_id."""
parts = urlparse(url)
path = parts.path
if path == "/trials":
return list(runner.get_trials())
else:
trial_id = path.split("/")[-1]
return runner.get_trial(trial_id)
def _add_trials(self, name, spec):
"""Add trial by invoking TrialRunner."""
resource = {}
resource["trials"] = []
trial_generator = BasicVariantGenerator()
trial_generator.add_configurations({name: spec})
while not trial_generator.is_finished():
trial = trial_generator.next_trial()
if not trial:
break
runner.add_trial(trial)
resource["trials"].append(self._trial_info(trial))
return resource
return Handler
class TuneServer(threading.Thread):
"""A TuneServer is a thread that initializes and runs a HTTPServer.
The server handles requests from a TuneClient.
Attributes:
runner (TrialRunner): Runner that modifies and accesses trials.
port_forward (int): Port number of TuneServer.
"""
DEFAULT_PORT = 4321
def __init__(self, runner, port=None):
"""Initialize HTTPServer and serve forever by invoking self.run()"""
threading.Thread.__init__(self)
self._port = port if port else self.DEFAULT_PORT
address = ("localhost", self._port)
logger.info("Starting Tune Server...")
self._server = HTTPServer(address, RunnerHandler(runner))
self.daemon = True
self.start()
def run(self):
self._server.serve_forever()
def shutdown(self):
"""Shutdown the underlying server."""
self._server.shutdown()
|
|
import os
import base64
from datetime import datetime
from xos.config import Config
from xos.logger import Logger, logging
from synchronizers.base.steps import *
from django.db.models import F, Q
from django.utils import timezone
from core.models import *
from django.db import reset_queries
import json
import time
import pdb
import traceback
logger = Logger(level=logging.INFO)
def f7(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x))]
def elim_dups(backend_str):
strs = backend_str.split('/')
strs = map(lambda x:x.split('(')[0],strs)
strs2 = f7(strs)
return '/'.join(strs2)
def deepgetattr(obj, attr):
return reduce(getattr, attr.split('.'), obj)
class InnocuousException(Exception):
pass
class FailedDependency(Exception):
pass
class SyncStep(object):
""" An XOS Sync step.
Attributes:
psmodel Model name the step synchronizes
dependencies list of names of models that must be synchronized first if the current model depends on them
"""
slow=False
def get_prop(prop):
try:
sync_config_dir = Config().sync_config_dir
except:
sync_config_dir = '/etc/xos/sync'
prop_config_path = '/'.join(sync_config_dir,self.name,prop)
return open(prop_config_path).read().rstrip()
def __init__(self, **args):
"""Initialize a sync step
Keyword arguments:
name -- Name of the step
provides -- XOS models sync'd by this step
"""
dependencies = []
self.driver = args.get('driver')
self.error_map = args.get('error_map')
try:
self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
except:
self.soft_deadline = 5 # 5 seconds
return
def fetch_pending(self, deletion=False):
# This is the most common implementation of fetch_pending
# Steps should override it if they have their own logic
# for figuring out what objects are outstanding.
main_obj = self.observes
if (not deletion):
objs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
else:
objs = main_obj.deleted_objects.all()
return objs
#return Sliver.objects.filter(ip=None)
def check_dependencies(self, obj, failed):
for dep in self.dependencies:
peer_name = dep[0].lower() + dep[1:] # django names are camelCased with the first letter lower
try:
peer_object = deepgetattr(obj, peer_name)
try:
peer_objects = peer_object.all()
except AttributeError:
peer_objects = [peer_object]
except:
peer_objects = []
if (hasattr(obj,'controller')):
try:
peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects)
except AttributeError:
pass
if (failed in peer_objects):
if (obj.backend_status!=failed.backend_status):
obj.backend_status = failed.backend_status
obj.save(update_fields=['backend_status'])
raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
def call(self, failed=[], deletion=False):
pending = self.fetch_pending(deletion)
for o in pending:
# another spot to clean up debug state
try:
reset_queries()
except:
# this shouldn't happen, but in case it does, catch it...
logger.log_exc("exception in reset_queries",extra=o.tologdict())
sync_failed = False
try:
backoff_disabled = Config().observer_backoff_disabled
except:
backoff_disabled = 0
try:
scratchpad = json.loads(o.backend_register)
if (scratchpad):
next_run = scratchpad['next_run']
if (not backoff_disabled and next_run>time.time()):
sync_failed = True
except:
logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
pass
if (not sync_failed):
try:
for f in failed:
self.check_dependencies(o,f) # Raises exception if failed
if (deletion):
self.delete_record(o)
o.delete(purge=True)
else:
self.sync_record(o)
o.enacted = timezone.now() # Is this the same timezone? XXX
scratchpad = {'next_run':0, 'exponent':0}
o.backend_register = json.dumps(scratchpad)
o.backend_status = "1 - OK"
o.save(update_fields=['enacted','backend_status','backend_register'])
except (InnocuousException,Exception) as e:
logger.log_exc("Syncstep caught exception",extra=o.tologdict())
force_error = False
try:
if (o.backend_status.startswith('2 - ')):
force_error = False # Already in error state
str_e = '%s/%s'%(o.backend_status[4:],str(e))
str_e = elim_dups(str_e)
else:
str_e = str(e)
except:
str_e = str(e)
if (not str_e):
str_e = 'Unknown'
try:
error = self.error_map.map(str_e)
except:
error = str_e
if isinstance(e, InnocuousException) and not force_error:
o.backend_status = '1 - %s'%error
else:
o.backend_status = '2 - %s'%error
cmd = 'wget -O /dev/null -q "http://xoslnprof.appspot.com/command?action=pushlog&node=1&log_path=/%s/%s"'%(self.__class__.__name__,error)
os.system(cmd)
try:
scratchpad = json.loads(o.backend_register)
scratchpad['exponent']
except:
logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
scratchpad = {'next_run':0, 'exponent':0}
# Second failure
if (scratchpad['exponent']):
delay = scratchpad['exponent'] * 600 # 10 minutes
if (delay<1440):
delay = 1440
scratchpad['next_run'] = time.time() + delay
scratchpad['exponent']+=1
o.backend_register = json.dumps(scratchpad)
# TOFIX:
# DatabaseError: value too long for type character varying(140)
if (o.pk):
try:
o.backend_status = o.backend_status[:1024]
o.save(update_fields=['backend_status','backend_register','updated'])
except:
print "Could not update backend status field!"
pass
sync_failed = True
if (sync_failed):
failed.append(o)
return failed
def sync_record(self, o):
return
def delete_record(self, o):
return
def __call__(self, **args):
return self.call(**args)
|
|
# This file is part of the ISIS IBEX application.
# Copyright (C) 2017 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
"""
Module for creating a log file from a configuration and periodic data source.
"""
import os
from stat import S_IROTH, S_IRGRP, S_IREAD
from string import Formatter
from ArchiverAccess.periodic_data_generator import PeriodicDataGenerator
from server_common.utilities import print_and_log
FORMATTER_NOT_APPLIED_MESSAGE = " (formatter not applied: `{0}`)"
"""Message when a formatter can not be applied when writing a pv"""
class DataFileCreationError(Exception):
"""
Exception that is thrown if the data file could not be created.
"""
class TemplateReplacer(object):
"""
Code to replace templated values
"""
def __init__(self, pv_values, start_time=None, time=None):
"""
Args:
start_time (datetime.datetime): time used to replace templated "start_time"
time (datetime.datetime): time used to templated "time", e.g. start of logging ime for log filename
pv_values: values of the pvs in order of keyword
"""
self._pv_values = pv_values
self._replacements = {}
if start_time is not None:
self._replacements["start_time"] = start_time.strftime("%Y-%m-%dT%H_%M_%S")
if time is not None:
time_as_string = time.strftime("%Y-%m-%dT%H:%M:%S")
milliseconds = time.microsecond // 1000
self._replacements["time"] = "%s.%03d" % (time_as_string, milliseconds)
def replace(self, template):
"""
Replace the values in the template with the pv values
Args:
template: template value to replace
Returns: line with values in
"""
try:
return template.format(*self._pv_values, **self._replacements)
except ValueError as ex:
# incorrect formatter output without format
template_no_format = ""
for text, name, fomat_spec, conversion in Formatter().parse(template):
template_no_format += "{text}{{{name}}}".format(text=text, name=name)
if "Disconnected" not in self._pv_values and "Archive_Off" not in self._pv_values:
template_no_format += FORMATTER_NOT_APPLIED_MESSAGE.format(ex)
return template_no_format.format(*self._pv_values, **self._replacements)
def mkdir_for_file(filepath):
"""
Make the directory tree for the file don't error if it already exists
Args:
filepath: path to create directory structure for
Returns: nothing
"""
abspath = os.path.abspath(os.path.dirname(filepath))
if not os.path.isdir(abspath):
os.makedirs(abspath)
def make_file_readonly_fn(filepath):
"""
Make file readonly.
Args:
filepath: path to file
Returns:
"""
os.chmod(filepath, S_IREAD | S_IRGRP | S_IROTH)
class DataFileCreatorFactory(object):
"""
Factory for creating a data file creator
"""
def create(self, config, archiver_data_source, filename_template, file_open_method=open,
mkdir_for_file_fn=mkdir_for_file, make_file_readonly=make_file_readonly_fn):
"""
Create an instance of a data file creator.
Args:
config(ArchiverAccess.archive_access_configuration.ArchiveAccessConfig):
configuration for the archive data file to create
archiver_data_source: archiver data source
filename_template: template for the filename
file_open_method: method to open a file
mkdir_for_file_fn: function for creating the directories needed
make_file_readonly: function to make a file readonly
Returns: ArchiveDataFileCreator
"""
return ArchiveDataFileCreator(config, archiver_data_source, filename_template,
file_open_method=file_open_method, mkdir_for_file_fn=mkdir_for_file_fn,
make_file_readonly=make_file_readonly)
class ArchiveDataFileCreator(object):
"""
Archive data file creator creates the log file based on the configuration.
"""
def __init__(self, config, archiver_data_source, filename_template, file_open_method=open,
mkdir_for_file_fn=mkdir_for_file, make_file_readonly=make_file_readonly_fn):
"""
Constructor
Args:
config(ArchiverAccess.archive_access_configuration.ArchiveAccessConfig):
configuration for the archive data file to create
archiver_data_source: archiver data source
filename_template: template for the filename
file_open_method: file like object that can be written to
mkdir_for_file_fn: function for creating the directories needed
make_file_readonly: function to make a file readonly
"""
self._config = config
self._file_open_method = file_open_method
self._archiver_data_source = archiver_data_source
self._mkdir_for_file_fn = mkdir_for_file_fn
self._make_file_readonly_fn = make_file_readonly
self._filename = None
self._first_line_written = False
self._periodic_data_generator = None
self._filename_template = filename_template
def write_complete_file(self, time_period):
"""
Write the file to the file object.
Args:
time_period (ArchiverAccess.archive_time_period.ArchiveTimePeriod): time period
Raises DataFileCreationError: if there is a problem writing the log file
"""
self.write_file_header(time_period.start_time)
self.write_data_lines(time_period)
self.finish_log_file()
def finish_log_file(self):
"""
Perform any post write tasks on the log file, e.g. make it read only.
"""
try:
self._make_file_readonly_fn(self._filename)
except Exception as ex:
raise DataFileCreationError("Failed to make log file {filename} readonly. "
"Error is: '{exception}'"
.format(exception=ex, filename=self._filename))
def write_file_header(self, start_time):
"""
Write the file header to a newly created file
Args:
start_time: start time of logging
Raises DataFileCreationError: if there is a problem writing the log file
"""
try:
pv_names_in_header = self._config.pv_names_in_header
pv_values = self._archiver_data_source.initial_values(pv_names_in_header, start_time)
template_replacer = TemplateReplacer(pv_values, start_time=start_time)
self._filename = template_replacer.replace(self._filename_template)
print_and_log("Writing log file '{0}'".format(self._filename), src="ArchiverAccess")
self._mkdir_for_file_fn(self._filename)
with self._file_open_method(self._filename, mode="w") as f:
for header_template in self._config.header:
header_line = template_replacer.replace(header_template)
f.write("{0}\n".format(header_line))
f.write("{0}\n".format(self._config.column_headers))
self._first_line_written = False
self._periodic_data_generator = PeriodicDataGenerator(self._archiver_data_source)
except Exception as ex:
raise DataFileCreationError("Failed to write header in log file {filename} for start time {time}. "
"Error is: '{exception}'"
.format(time=start_time, exception=ex, filename=self._filename))
def write_data_lines(self, time_period):
"""
Append data lines to a file for the given time period. The first data line is appended only on the first call
to this.
Args:
time_period: the time period to generate data lines for
Raises DataFileCreationError: if there is a problem writing the log file
"""
try:
assert self._filename is not None, "Called write_data_lines before writing header."
with self._file_open_method(self._filename, mode="a") as f:
periodic_data = self._periodic_data_generator.get_generator(
self._config.pv_names_in_columns, time_period)
self._ignore_first_line_if_already_written(periodic_data)
for time, values in periodic_data:
table_template_replacer = TemplateReplacer(values, time=time)
table_line = table_template_replacer.replace(self._config.table_line)
f.write("{0}\n".format(table_line))
except Exception as ex:
raise DataFileCreationError("Failed to write lines in log file {filename} for time period {time_period}. "
"Error is: '{exception}'"
.format(time_period=time_period, exception=ex, filename=self._filename))
def _ignore_first_line_if_already_written(self, periodic_data):
"""
If this is the second call to this function then the first line will have been written as part of the output
from the previous call so skip it.
Args:
periodic_data: periodic data
"""
if self._first_line_written:
next(periodic_data)
else:
self._first_line_written = True
|
|
# PYTHON_ARGCOMPLETE_OK
"""cli.
Desc: Command-line tool for listing Python packages installed by setuptools,
package metadata, package dependencies, and querying The Cheese Shop
(PyPI) for Python package release information such as which installed
packages have updates available.
Author: Rob Cakebread <gentoodev a t gmail.com>
License : BSD (See COPYING)
"""
from __future__ import print_function
import argparse
import inspect
import os
import pkg_resources
import pprint
import re
import site
import struct
import subprocess
import sys
import webbrowser
if sys.version_info[0] == 2:
from httplib import HTTPException
from urllib import urlretrieve
from urlparse import urlparse
from xmlrpclib import Fault as XMLRPCFault
else:
from http.client import HTTPException
from urllib.request import urlretrieve
from urllib.parse import urlparse
from xmlrpc.client import Fault as XMLRPCFault
from distutils.sysconfig import get_python_lib
from yolk.metadata import get_metadata
from yolk import yolklib
from yolk.pypi import CheeseShop
from yolk.setuptools_support import get_download_uri, get_pkglist
from yolk.utils import run_command, command_successful
from yolk.__init__ import __version__ as VERSION
class YolkException(Exception):
"""Exception for communicating top-level error to user."""
class StdOut(object):
"""Filter stdout or stderr from specific modules So far this is just used
for pkg_resources."""
def __init__(self, stream, modulenames):
self.stdout = stream
# Modules to squelch
self.modulenames = modulenames
def __getattr__(self, attribute):
if attribute not in self.__dict__ or attribute == '__doc__':
return getattr(self.stdout, attribute)
return self.__dict__[attribute]
def flush(self):
"""Bug workaround for Python 3.2+: Exception AttributeError: 'flush'
in.
<yolk.cli.StdOut object...
"""
def write(self, inline):
"""Write a line to stdout if it isn't in a blacklist.
Try to get the name of the calling module to see if we want to
filter it. If there is no calling module, use current frame in
case there's a traceback before there is any calling module
"""
frame = inspect.currentframe().f_back
if frame:
mod = frame.f_globals.get('__name__')
else:
mod = sys._getframe(0).f_globals.get('__name__')
if mod not in self.modulenames:
self.stdout.write(inline)
def writelines(self, inline):
"""Write multiple lines."""
for line in inline:
self.write(line)
class Yolk(object):
"""Main class for yolk."""
def __init__(self):
# PyPI project name with proper case
self.project_name = ''
# PyPI project version
self.version = ''
# List of all versions not hidden on PyPI
self.all_versions = []
self.pkg_spec = None
self.options = None
# Squelch output from setuptools
# Add future offenders to this list.
shut_up = ['distutils.log']
sys.stdout = StdOut(sys.stdout, shut_up)
sys.stderr = StdOut(sys.stderr, shut_up)
self.pypi = None
def get_plugin(self, method):
"""Return plugin object if CLI option is activated and method exists.
@param method: name of plugin's method we're calling
@type method: string
@returns: list of plugins with `method`
"""
all_plugins = []
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
plugin_obj = entry_point.load()
plugin = plugin_obj()
plugin.configure(self.options, None)
if plugin.enabled:
if not hasattr(plugin, method):
plugin = None
else:
all_plugins.append(plugin)
return all_plugins
def run(self):
"""Perform actions based on CLI options.
@returns: status code
"""
parser = setup_parser()
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
self.options = parser.parse_args()
pkg_spec = validate_pypi_opts(parser)
if not pkg_spec:
pkg_spec = self.options.pkg_spec
self.pkg_spec = pkg_spec
if self.options.fields:
self.options.fields = [s.strip().lower()
for s in self.options.fields.split(',')]
else:
self.options.fields = []
if (
not self.options.pypi_search and
len(sys.argv) == 1
):
parser.print_help()
return 2
# Options that depend on querying installed packages, not PyPI.
# We find the proper case for package names if they are installed,
# otherwise PyPI returns the correct case.
if (
self.options.show_deps or
self.options.show_all or
self.options.show_active or
self.options.show_non_active or
(self.options.show_updates and pkg_spec) or
self.options.upgrade
):
want_installed = True
else:
want_installed = False
# show_updates may or may not have a pkg_spec
if (
not want_installed or
self.options.show_updates or
self.options.upgrade
):
self.pypi = CheeseShop(self.options.debug)
# XXX: We should return 2 here if we couldn't create xmlrpc server
if pkg_spec:
(self.project_name,
self.version,
self.all_versions) = self.parse_pkg_ver(want_installed)
if want_installed and not self.project_name:
print(u'{} is not installed'.format(pkg_spec),
file=sys.stderr)
return 1
# I could prefix all these with 'cmd_' and the methods also
# and then iterate over the `options` dictionary keys...
commands = ['show_deps', 'query_metadata_pypi', 'fetch',
'versions_available', 'show_updates', 'upgrade',
'browse_website',
'show_download_links', 'pypi_search',
'show_pypi_changelog', 'show_pypi_releases',
'yolk_version', 'show_all',
'show_active', 'show_non_active', 'show_entry_map',
'show_entry_points']
# Run first command it finds, and only the first command, then return
# XXX: Check if more than one command was set in options and give
# error?
for action in commands:
if getattr(self.options, action):
return getattr(self, action)()
parser.print_help()
def show_active(self):
"""Show installed active packages."""
return self.show_distributions('active')
def show_non_active(self):
"""Show installed non-active packages."""
return self.show_distributions('nonactive')
def show_all(self):
"""Show all installed packages."""
return self.show_distributions('all')
def show_updates(self):
"""Check installed packages for available updates on PyPI.
@param project_name: optional package name to check; checks every
installed package if none specified
@type project_name: string
@returns: None
"""
if self.project_name:
pkg_list = [self.project_name]
else:
pkg_list = get_pkglist()
for (project_name, version, newest) in _updates(
pkg_list,
self.pypi,
user_installs_only=self.options.user):
print(u'{} {} ({})'.format(project_name,
version,
newest))
return 0
def upgrade(self):
"""Check installed packages for available updates on PyPI and upgrade.
@param project_name: optional package name to check; checks every
installed package if none specified
@type project_name: string
@returns: None
"""
if self.project_name:
pkg_list = [self.project_name]
else:
pkg_list = get_pkglist()
names = [values[0]
for values in _updates(pkg_list,
self.pypi,
user_installs_only=self.options.user)]
if names:
subprocess.call(
[sys.executable, '-m', 'pip', 'install', '--upgrade'] +
(['--user'] if self.options.user else []) +
names)
return 0
def show_distributions(self, show):
"""Show list of installed activated OR non-activated packages.
@param show: type of pkgs to show (all, active or nonactive)
@type show: string
@returns: None or 2 if error
"""
# Search for any plugins with active CLI options with add_column()
# method.
plugins = self.get_plugin('add_column')
# Some locations show false positive for 'development' packages:
ignores = ['/UNIONFS', '/KNOPPIX.IMG']
# See http://cheeseshop.python.org/pypi/workingenv.py for details.
workingenv = os.environ.get('WORKING_ENV')
if workingenv:
ignores.append(workingenv)
results = None
for (dist, active) in yolklib.get_distributions(show,
self.project_name,
self.version):
metadata = get_metadata(dist)
for prefix in ignores:
if dist.location.startswith(prefix):
dist.location = dist.location.replace(prefix, '')
# Case-insensitive search because of Windows.
if dist.location.lower().startswith(get_python_lib().lower()):
develop = ''
else:
develop = dist.location
if metadata:
add_column_text = ''
for my_plugin in plugins:
# See if package is 'owned' by a package manager such as
# portage, apt, rpm etc.
add_column_text += my_plugin.add_column(dist) + ' '
self.print_metadata(metadata, develop, active, add_column_text)
else:
print(str(dist) + ' has no metadata')
results = True
if not results and self.project_name:
if self.version:
pkg_spec = '{}=={}'.format(self.project_name, self.version)
else:
pkg_spec = self.project_name
if show == 'all':
print(
u'There are no versions of {} installed'.format(pkg_spec),
file=sys.stderr)
else:
print(
u'There are no {} versions of {} installed'.format(
show, pkg_spec),
file=sys.stderr)
return 2
elif show == 'all' and results and self.options.fields:
print("Versions with '*' are non-active.")
print("Versions with '!' are deployed in development mode.")
def print_metadata(self, metadata, develop, active, installed_by):
"""Print out formatted metadata.
@param metadata: package's metadata
@type metadata: pkg_resources Distribution obj
@param develop: path to pkg if its deployed in development mode
@type develop: string
@param active: show if package is activated or not
@type active: boolean
@param installed_by: Shows if pkg was installed by a package manager
other than setuptools
@type installed_by: string
@returns: None
"""
show_metadata = self.options.metadata
version = metadata['Version']
# When showing all packages, note which are not active:
if active:
if self.options.fields:
active_status = ''
else:
active_status = 'active'
else:
if self.options.fields:
active_status = '*'
else:
active_status = 'non-active'
if develop:
if self.options.fields:
development_status = '! ({})'.format(develop)
else:
development_status = 'development ({})'.format(develop)
else:
development_status = installed_by
status = '{} {}'.format(active_status, development_status)
if self.options.fields:
print(
'{} ({}){} {}'.format(metadata['Name'], version, active_status,
development_status))
else:
# Need intelligent justification.
print(metadata['Name'].ljust(15) + ' - ' + version.ljust(12) +
' - ' + status)
if self.options.fields:
for field in metadata.keys():
if field.lower() in self.options.fields:
print(u' {}: {}'.format(field, metadata[field]))
print()
elif show_metadata:
for field in metadata.keys():
if field != 'Name' and field != 'Summary':
print(u' {}: {}'.format(field, metadata[field]))
def show_deps(self):
"""Show dependencies for package(s)
@returns: 0 - success 1 - No dependency info supplied
"""
pkgs = pkg_resources.Environment()
for pkg in pkgs[self.project_name]:
if not self.version:
print(pkg.project_name, pkg.version)
i = len(list(pkg._dep_map.values())[0])
if i:
while i:
if (
not self.version or
self.version and
pkg.version == self.version
):
if self.version and i == len(list(
pkg._dep_map.values())[0]):
print(pkg.project_name, pkg.version)
print(u' ' + str(list(
pkg._dep_map.values())[0][i - 1]))
i -= 1
else:
return 1
return 0
def show_pypi_changelog(self):
"""Show detailed PyPI ChangeLog for the last `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
"""
hours = self.options.show_pypi_changelog
if not hours.isdigit():
print('You must supply an integer',
file=sys.stderr)
return 1
try:
changelog = self.pypi.changelog(int(hours))
except XMLRPCFault as err_msg:
print(err_msg, file=sys.stderr)
print("Couldn't retrieve changelog", file=sys.stderr)
return 1
last_pkg = ''
for entry in changelog:
pkg = entry[0]
if pkg != last_pkg:
print(u'{} {}\n\t{}'.format(entry[0], entry[1], entry[3]))
last_pkg = pkg
else:
print(u'\t{}'.format(entry[3]))
return 0
def show_pypi_releases(self):
"""Show PyPI releases for the last number of `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
"""
try:
hours = int(self.options.show_pypi_releases)
except ValueError:
print('You must supply an integer', file=sys.stderr)
return 1
try:
latest_releases = self.pypi.updated_releases(hours)
except XMLRPCFault as err_msg:
print(err_msg, file=sys.stderr)
print("Couldn't retrieve latest releases.", file=sys.stderr)
return 1
for release in latest_releases:
print(u'{} {}'.format(release[0], release[1]))
return 0
def show_download_links(self):
"""Query PyPI for pkg download URI for a packge.
@returns: 0
"""
# In case they specify version as 'dev' instead of using -T svn,
# don't show three svn URI's
if self.options.file_type == 'all' and self.version == 'dev':
self.options.file_type = 'svn'
if self.options.file_type == 'svn':
version = 'dev'
else:
if self.version:
version = self.version
else:
version = self.all_versions[0]
if self.options.file_type == 'all':
# Search for source, egg, and svn.
self.print_download_uri(version, True)
self.print_download_uri(version, False)
self.print_download_uri('dev', True)
else:
if self.options.file_type == 'source':
source = True
else:
source = False
self.print_download_uri(version, source)
return 0
def print_download_uri(self, version, source):
"""@param version: version number or 'dev' for svn.
@type version: string
@param source: download source or egg
@type source: boolean
@returns: None
"""
if version == 'dev':
source = True
# Use setuptools monkey-patch to grab url.
url = get_download_uri(self.project_name, version, source,
self.options.pypi_index)
if url:
print(u'{}'.format(url))
def fetch(self):
"""Download a package.
@returns: 0 = success or 1 if failed download
"""
source = True
directory = '.'
if self.options.file_type == 'svn':
svn_uri = get_download_uri(self.project_name,
'dev', True)
if svn_uri:
directory = self.project_name + '_svn'
return self.fetch_svn(svn_uri, directory)
else:
print(
'No subversion repository found for {}'.format(
self.project_name),
file=sys.stderr)
return 1
elif self.options.file_type == 'source':
source = True
elif self.options.file_type == 'egg':
source = False
uri = get_download_uri(self.project_name, self.version, source)
if uri:
return self.fetch_uri(directory, uri)
else:
print(u'No {} URI found for package: {}'.format(
self.options.file_type, self.project_name))
return 1
def fetch_uri(self, directory, uri):
"""Use ``urllib.urlretrieve`` to download package to file in sandbox
dir.
@param directory: directory to download to
@type directory: string
@param uri: uri to download
@type uri: string
@returns: 0 = success or 1 for failed download
"""
filename = os.path.basename(urlparse(uri)[2])
if os.path.exists(filename):
print(u'File exists: ' + filename, file=sys.stderr)
return 1
try:
downloaded_filename, headers = urlretrieve(uri, filename)
except IOError as err_msg:
print(
'Error downloading package {} from URL {}'.format(
filename, uri),
file=sys.stderr)
print(str(err_msg), file=sys.stderr)
return 1
if 'text/html' in headers:
dfile = open(downloaded_filename)
if re.search('404 Not Found', ''.join(dfile.readlines())):
dfile.close()
print("'404 Not Found' error", file=sys.stderr)
return 1
dfile.close()
return 0
def fetch_svn(self, svn_uri, directory):
"""Fetch subversion repository.
@param svn_uri: subversion repository uri to check out
@type svn_uri: string
@param directory: directory to download to
@type directory: string
"""
if not command_successful(['svn', '--version']):
raise YolkException('Do you have subversion installed?')
if os.path.exists(directory):
raise YolkException(
'Checkout directory exists - {}'.format(directory))
try:
os.mkdir(directory)
except OSError as err_msg:
raise YolkException('' + str(err_msg))
cwd = os.path.realpath(os.curdir)
os.chdir(directory)
status, _ = run_command(['svn', 'checkout', svn_uri])
os.chdir(cwd)
def browse_website(self, browser=None):
"""Launch web browser at project's homepage.
@param browser: name of web browser to use
@type browser: string
@returns: 0 if homepage found, 1 if no homepage found
"""
if len(self.all_versions):
metadata = self.pypi.release_data(self.project_name,
self.all_versions[0])
if 'home_page' in metadata:
if browser == 'konqueror':
browser = webbrowser.Konqueror()
else:
browser = webbrowser.get()
browser.open(metadata['home_page'], 2)
return 0
print('No homepage URL found', file=sys.stderr)
return 1
def query_metadata_pypi(self):
"""Show pkg metadata queried from PyPI.
@returns: 0
"""
if self.version and self.version in self.all_versions:
metadata = self.pypi.release_data(self.project_name, self.version)
else:
# Give highest version
metadata = self.pypi.release_data(self.project_name,
self.all_versions[0])
if metadata:
if len(self.options.fields) == 1:
try:
print(metadata[self.options.fields[0]])
except KeyError:
pass
else:
for key in metadata.keys():
if (
not self.options.fields or
(self.options.fields and
key.lower() in self.options.fields)
):
print(u'{}: {}'.format(key, metadata[key]))
return 0
def versions_available(self):
"""Query PyPI for a particular version or all versions of a package.
@returns: 0 if version(s) found or 1 if none found
"""
if self.all_versions and self.version in self.all_versions:
print_pkg_versions(self.project_name, [self.version])
elif not self.version and self.all_versions:
print_pkg_versions(self.project_name, self.all_versions)
else:
if self.version:
print(
'No package found for version {}'.format(self.version),
file=sys.stderr)
else:
print(
'No package found for {}'.format(self.project_name),
file=sys.stderr)
return 1
return 0
def parse_search_spec(self, spec):
"""Parse search args and return spec dict for PyPI.
* Owwww, my eyes!. Re-write this.
@param spec: Cheese Shop package search spec
e.g.
name=Cheetah
license=ZPL
license=ZPL AND name=Cheetah
@type spec: string
@returns: tuple with spec and operator
"""
usage = """You can search PyPI by the following:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
e.g. yolk -S name=Cheetah
yolk -S name=yolk AND license=PSF
"""
if not spec:
print(usage, file=sys.stderr)
return (None, None)
try:
spec = (' ').join(spec)
operator = 'AND'
first = second = ''
if ' AND ' in spec:
(first, second) = spec.split('AND')
elif ' OR ' in spec:
(first, second) = spec.split('OR')
operator = 'OR'
else:
first = spec
(key1, term1) = first.split('=')
key1 = key1.strip()
if second:
(key2, term2) = second.split('=')
key2 = key2.strip()
spec = {}
spec[key1] = term1
if second:
spec[key2] = term2
except:
print(usage, file=sys.stderr)
spec = operator = None
return (spec, operator)
def pypi_search(self):
"""Search PyPI by metadata keyword e.g.
yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
"""
spec = self.pkg_spec
# Add remaining cli arguments to options.pypi_search.
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg['summary']:
summary = pkg['summary'].encode('utf-8')
else:
summary = ''
print("""{} ({}):
{}
""".format(pkg['name'].encode('utf-8'), pkg['version'],
summary))
return 0
def show_entry_map(self):
"""Show entry map for a package.
@param dist: package
@param type: string
@returns: 0 for success or 1 if error
"""
pprinter = pprint.PrettyPrinter()
try:
entry_map = pkg_resources.get_entry_map(
self.options.show_entry_map)
if entry_map:
pprinter.pprint(entry_map)
except pkg_resources.DistributionNotFound:
print(
'Distribution not found: {}'.format(
self.options.show_entry_map),
file=sys.stderr)
return 1
return 0
def show_entry_points(self):
"""Show entry points for a module.
@returns: 0 for success or 1 if error
"""
found = False
for entry_point in pkg_resources.iter_entry_points(
self.options.show_entry_points):
found = True
try:
plugin = entry_point.load()
print(plugin.__module__)
print(u' {}'.format(entry_point))
if plugin.__doc__:
print(plugin.__doc__)
print()
except ImportError:
pass
if not found:
print(
'No entry points found for {}'.format(
self.options.show_entry_points),
file=sys.stderr)
return 1
return 0
def yolk_version(self):
"""Show yolk's version."""
print(u'yolk {}'.format(VERSION))
def parse_pkg_ver(self, want_installed):
"""Return tuple with project_name and version from CLI args If the user
gave the wrong case for the project name, this corrects it.
@param want_installed: whether package we want is installed or not
@type want_installed: boolean
@returns: tuple(project_name, version, all_versions)
"""
all_versions = []
arg_str = self.pkg_spec
if '==' not in arg_str:
# No version specified.
project_name = arg_str
version = None
else:
(project_name, version) = arg_str.split('==')
project_name = project_name.strip()
version = version.strip()
# Find proper case for package name.
if want_installed:
project_name = yolklib.case_sensitive_name(project_name)
else:
(project_name, all_versions) = self.pypi.query_versions_pypi(
project_name)
if not len(all_versions):
msg = "I'm afraid we have no '{}' at ".format(project_name)
msg += 'The Cheese Shop. A little Red Leicester, perhaps?'
print(msg, file=sys.stderr)
sys.exit(2)
return (project_name, version, all_versions)
def setup_parser():
"""Setup the argparser.
@returns: parser.ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='store_true', dest='yolk_version',
default=False,
help='show yolk version and exit')
parser.add_argument('--debug', action='store_true',
default=False, help='show debugging information')
parser.add_argument('-q', '--quiet', action='store_true',
default=False, help='show less output')
parser.add_argument('pkg_spec', nargs='?')
group_local = parser.add_argument_group(
'Query installed Python packages',
'The following options show information about installed Python '
'packages. Activated packages are normal packages on sys.path that '
'can be imported. Non-activated packages need '
"'pkg_resources.require()' before they can be imported, such as "
"packages installed with 'easy_install --multi-version'. PKG_SPEC can "
'be either a package name or package name and version e.g. Paste==0.9')
group_local.add_argument(
'-l', '--list', action='store_true', dest='show_all', default=False,
help='list all Python packages installed by distutils or setuptools. '
'Use PKG_SPEC to narrow results')
group_local.add_argument(
'-a', '--activated', action='store_true',
dest='show_active', default=False,
help='list activated packages installed by distutils or setuptools. '
'Use PKG_SPEC to narrow results')
group_local.add_argument(
'-n', '--non-activated', action='store_true',
dest='show_non_active', default=False,
help='list non-activated packages installed by distutils or '
'setuptools. Use PKG_SPEC to narrow results')
group_local.add_argument(
'-m', '--metadata', action='store_true',
default=False,
help='show all metadata for packages installed by '
'setuptools (use with -l -a or -n)')
group_local.add_argument(
'-f', '--fields', action='store', default=False,
help='show specific metadata (comma-separated) fields; '
'use with -m or -M')
group_local.add_argument(
'-d', '--depends', action='store', dest='show_deps',
metavar='PKG_SPEC',
help='show dependencies for a package installed by '
'setuptools if they are available')
group_local.add_argument(
'--entry-points', action='store',
dest='show_entry_points', default=False,
help='list entry points for a module. e.g. --entry-points '
'nose.plugins',
metavar='MODULE')
group_local.add_argument(
'--entry-map', action='store',
dest='show_entry_map', default=False,
help='list entry map for a package. e.g. --entry-map yolk',
metavar='PACKAGE_NAME')
group_pypi = parser.add_argument_group(
'PyPI (Cheese Shop) options',
'The following options query the Python Package Index:')
group_pypi.add_argument(
'-C', '--changelog', action='store',
dest='show_pypi_changelog', metavar='HOURS',
default=False,
help='show detailed ChangeLog for PyPI for last n hours')
group_pypi.add_argument(
'-D', '--download-links', action='store',
metavar='PKG_SPEC', dest='show_download_links',
default=False,
help="show download URL's for package listed on PyPI. Use with -T to "
'specify egg, source etc')
group_pypi.add_argument(
'-F', '--fetch-package', action='store',
metavar='PKG_SPEC', dest='fetch',
default=False,
help='download package source or egg; You can specify a file type '
'with -T')
group_pypi.add_argument(
'-H', '--browse-homepage', action='store',
metavar='PKG_SPEC', dest='browse_website',
default=False,
help='launch web browser at home page for package')
group_pypi.add_argument('-I', '--pypi-index', action='store',
default=False,
help='specify PyPI mirror for package index')
group_pypi.add_argument('-L', '--latest-releases', action='store',
dest='show_pypi_releases', metavar='HOURS',
default=False,
help='show PyPI releases for last n hours')
group_pypi.add_argument(
'-M', '--query-metadata', action='store',
dest='query_metadata_pypi', default=False,
metavar='PKG_SPEC',
help='show metadata for a package listed on PyPI. Use -f to show '
'particular fields')
group_pypi.add_argument(
'-S', action='store', dest='pypi_search',
default=False,
help='search PyPI by spec and optional AND/OR operator',
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_argument(
'-T', '--file-type', action='store', default='all',
help="You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_argument('-U', '--show-updates', action='store_true',
default=False,
help='check PyPI for updates on package(s)')
group_pypi.add_argument('--upgrade', '--pip', action='store_true',
help='run pip command to upgrade outdated '
'packages; may be used with --user')
group_pypi.add_argument('--user', action='store_true',
help='run pip with --user; for use with --upgrade')
group_pypi.add_argument('-V', '--versions-available', action='store',
default=False, metavar='PKG_SPEC',
help='show available versions for given package '
'listed on PyPI')
return parser
def print_pkg_versions(project_name, versions):
"""Print list of versions available for a package.
@returns: None
"""
for ver in versions:
print(u'{} {}'.format(project_name, ver))
def validate_pypi_opts(parser):
"""Check parse options that require pkg_spec.
@returns: pkg_spec
"""
options = parser.parse_args()
options_pkg_specs = [options.versions_available,
options.query_metadata_pypi,
options.show_download_links,
options.browse_website,
options.fetch,
options.show_deps,
]
for pkg_spec in options_pkg_specs:
if pkg_spec:
return pkg_spec
def _updates(names, pypi, user_installs_only):
"""Return updates."""
from multiprocessing.pool import ThreadPool
exception = None
def worker_function(pkg):
for (dist, active) in yolklib.get_distributions(
'all', pkg,
yolklib.get_highest_installed(pkg)):
if exception:
return
width = terminal_width()
if width:
print(u'\rChecking {}'.format(dist.project_name).ljust(width),
end='',
file=sys.stderr)
(project_name, versions) = pypi.query_versions_pypi(
dist.project_name)
return (pkg, dist, project_name, versions)
import multiprocessing
pool = ThreadPool(multiprocessing.cpu_count())
try:
results = pool.map(worker_function, names)
except IOError as _exception:
exception = _exception
print('\r', end='', file=sys.stderr)
if exception:
raise YolkException(exception)
for (pkg, dist, project_name, versions) in results:
try:
if (
user_installs_only and
not dist.location.startswith(site.getusersitepackages())
):
continue
except AttributeError:
# Probably inside a virtualenv.
pass
if versions:
# PyPI returns them in chronological order,
# but who knows if its guaranteed in the API?
# Make sure we grab the highest version:
newest = yolklib.get_highest_version(versions)
if newest != dist.version:
# We may have newer than what PyPI knows about.
if (
pkg_resources.parse_version(dist.version) <
pkg_resources.parse_version(newest)
):
yield (project_name, dist.version, newest)
def terminal_width():
try:
import fcntl
import termios
return struct.unpack(
'HHHH',
fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))[1]
except (ImportError, OSError):
# ImportError for non-Unix.
# OSError for non-TTYs.
return None
def main():
"""Let's do it."""
try:
my_yolk = Yolk()
my_yolk.run()
except (HTTPException, IOError, YolkException) as exception:
print(exception, file=sys.stderr)
return 1
except KeyboardInterrupt:
return 1
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fake_stat
CURRENT_VERSION = 'v1.18'
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
FAKE_TAG_NAME = 'tag'
FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
def get_fake_raw_version():
status_code = 200
response = {
"ApiVersion": "1.18",
"GitCommit": "fake-commit",
"GoVersion": "go1.3.3",
"Version": "1.5.0"
}
return status_code, response
def get_fake_version():
status_code = 200
response = {'GoVersion': '1', 'Version': '1.1.1',
'GitCommit': 'deadbeef+CHANGES'}
return status_code, response
def get_fake_info():
status_code = 200
response = {'Containers': 1, 'Images': 1, 'Debug': False,
'MemoryLimit': False, 'SwapLimit': False,
'IPv4Forwarding': True}
return status_code, response
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
return status_code, response
def get_fake_images():
status_code = 200
response = [{
'Id': FAKE_IMAGE_ID,
'Created': '2 days ago',
'Repository': 'busybox',
'RepoTags': ['busybox:latest', 'busybox:1.0'],
}]
return status_code, response
def get_fake_image_history():
status_code = 200
response = [
{
"Id": "b750fe79269d",
"Created": 1364102658,
"CreatedBy": "/bin/bash"
},
{
"Id": "27cf78414709",
"Created": 1364068391,
"CreatedBy": ""
}
]
return status_code, response
def post_fake_import_image():
status_code = 200
response = 'Import messages...'
return status_code, response
def get_fake_containers():
status_code = 200
response = [{
'Id': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Created': '2 days ago',
'Command': 'true',
'Status': 'fake status'
}]
return status_code, response
def post_fake_start_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_resize_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_create_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def get_fake_inspect_container():
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
'Config': {'Privileged': True},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
"State": {
"Running": True,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-09-25T14:01:18.869545111+02:00",
"Ghost": False
},
"MacAddress": "02:42:ac:11:00:0a"
}
return status_code, response
def get_fake_inspect_image():
status_code = 200
response = {
'id': FAKE_IMAGE_ID,
'parent': "27cf784147099545",
'created': "2013-03-23T22:24:18.818426-07:00",
'container': FAKE_CONTAINER_ID,
'container_config':
{
"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"PortSpecs": "",
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"Env": "",
"Cmd": ["/bin/bash"],
"Dns": "",
"Image": "base",
"Volumes": "",
"VolumesFrom": "",
"WorkingDir": ""
},
'Size': 6823592
}
return status_code, response
def get_fake_port():
status_code = 200
response = {
'HostConfig': {
'Binds': None,
'ContainerIDFile': '',
'Links': None,
'LxcConf': None,
'PortBindings': {
'1111': None,
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
'2222': None
},
'Privileged': False,
'PublishAllPorts': False
},
'NetworkSettings': {
'Bridge': 'docker0',
'PortMapping': None,
'Ports': {
'1111': None,
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
'2222': None},
'MacAddress': '02:42:ac:11:00:0a'
}
}
return status_code, response
def get_fake_insert_image():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_wait():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_logs():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
def get_fake_diff():
status_code = 200
response = [{'Path': '/test', 'Kind': 1}]
return status_code, response
def get_fake_events():
status_code = 200
response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID,
'from': FAKE_IMAGE_ID, 'time': 1423247867}]
return status_code, response
def get_fake_export():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_execute():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_execute_start():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n'
b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n'
b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n')
return status_code, response
def post_fake_stop_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_kill_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_pause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_unpause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_restart_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_rename_container():
status_code = 204
return status_code, None
def delete_fake_remove_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_image_create():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def delete_fake_remove_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_get_image():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_load_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_commit():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_push():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_build_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_tag_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_stats():
status_code = 200
response = fake_stat.OBJ
return status_code, response
# Maps real api url to fake response callback
prefix = 'http+unix://var/run/docker.sock'
fake_responses = {
'{0}/version'.format(prefix):
get_fake_raw_version,
'{1}/{0}/version'.format(CURRENT_VERSION, prefix):
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
get_fake_images,
'{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
get_fake_image_history,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_import_image,
'{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
get_fake_containers,
'{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
post_fake_start_container,
'{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
post_fake_resize_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_container,
'{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix):
post_fake_rename_container,
'{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
post_fake_tag_image,
'{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
get_fake_wait,
'{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
get_fake_logs,
'{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
get_fake_diff,
'{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
get_fake_export,
'{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix):
post_fake_execute,
'{1}/{0}/exec/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
post_fake_execute_start,
'{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix):
get_fake_stats,
'{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
post_fake_stop_container,
'{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
post_fake_kill_container,
'{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix):
post_fake_pause_container,
'{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
post_fake_unpause_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_port,
'{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
post_fake_restart_container,
'{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
delete_fake_remove_container,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_image_create,
'{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
delete_fake_remove_image,
'{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
get_fake_get_image,
'{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
post_fake_load_image,
'{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_image,
'{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
get_fake_insert_image,
'{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
post_fake_push,
'{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
post_fake_commit,
'{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
post_fake_create_container,
'{1}/{0}/build'.format(CURRENT_VERSION, prefix):
post_fake_build_container,
'{1}/{0}/events'.format(CURRENT_VERSION, prefix):
get_fake_events
}
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class RecommendedElasticPoolsOperations(object):
"""RecommendedElasticPoolsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2014-04-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2014-04-01"
self.config = config
def get(
self, resource_group_name, server_name, recommended_elastic_pool_name, custom_headers=None, raw=False, **operation_config):
"""Gets a recommented elastic pool.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param recommended_elastic_pool_name: The name of the recommended
elastic pool to be retrieved.
:type recommended_elastic_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RecommendedElasticPool
<azure.mgmt.sql.models.RecommendedElasticPool>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'recommendedElasticPoolName': self._serialize.url("recommended_elastic_pool_name", recommended_elastic_pool_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RecommendedElasticPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_databases(
self, resource_group_name, server_name, recommended_elastic_pool_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Gets a database inside of a recommented elastic pool.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param recommended_elastic_pool_name: The name of the elastic pool to
be retrieved.
:type recommended_elastic_pool_name: str
:param database_name: The name of the database to be retrieved.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Database <azure.mgmt.sql.models.Database>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}/databases/{databaseName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'recommendedElasticPoolName': self._serialize.url("recommended_elastic_pool_name", recommended_elastic_pool_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""Returns recommended elastic pools.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RecommendedElasticPoolPaged
<azure.mgmt.sql.models.RecommendedElasticPoolPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RecommendedElasticPoolPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RecommendedElasticPoolPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_databases(
self, resource_group_name, server_name, recommended_elastic_pool_name, custom_headers=None, raw=False, **operation_config):
"""Returns a list of databases inside a recommented elastic pool.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param recommended_elastic_pool_name: The name of the recommended
elastic pool to be retrieved.
:type recommended_elastic_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabasePaged <azure.mgmt.sql.models.DatabasePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}/databases'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'recommendedElasticPoolName': self._serialize.url("recommended_elastic_pool_name", recommended_elastic_pool_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DatabasePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DatabasePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_metrics(
self, resource_group_name, server_name, recommended_elastic_pool_name, custom_headers=None, raw=False, **operation_config):
"""Returns a recommented elastic pool metrics.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param recommended_elastic_pool_name: The name of the recommended
elastic pool to be retrieved.
:type recommended_elastic_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RecommendedElasticPoolMetricPaged
<azure.mgmt.sql.models.RecommendedElasticPoolMetricPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recommendedElasticPools/{recommendedElasticPoolName}/metrics'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'recommendedElasticPoolName': self._serialize.url("recommended_elastic_pool_name", recommended_elastic_pool_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RecommendedElasticPoolMetricPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RecommendedElasticPoolMetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
class WalletBackupTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
self.sync_mempools()
self.nodes[3].generate(1)
self.sync_blocks()
# As above, this mirrors the original bash test.
def start_three(self):
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
self.sync_blocks()
self.nodes[1].generate(1)
self.sync_blocks()
self.nodes[2].generate(1)
self.sync_blocks()
self.nodes[3].generate(100)
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
self.log.info("Re-starting nodes")
self.start_three()
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, 'regtest', '.', 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', ''),
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
|
|
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Model-Based Player
- Contains a class representing a player that chooses to move by sampling and conditional
its next move based on already selected orders
"""
import logging
import random
from tornado import gen
from diplomacy import Game
from diplomacy_research.models.state_space import extract_state_proto, extract_phase_history_proto, \
extract_possible_orders_proto
from diplomacy_research.players.player import Player
from diplomacy_research.utils.model import merge_dicts
# Constants
LOGGER = logging.getLogger(__name__)
class ModelBasedPlayer(Player):
""" ModelBased Player Class"""
def __init__(self, policy_adapter, player_seed=None, noise=None, temperature=None, schedule=None,
dropout_rate=None, use_beam=None, name=None):
""" Constructor
:param policy_adapter: The policy adapter (instance) to evaluate the action to select
:param player_seed: The seed to apply to the player to compute a deterministic mask.
:param noise: The sigma of the additional noise to apply to the intermediate layers.
:param temperature: The temperature to apply to the logits. (Defaults to schedule, otherwise uses 0.)
:param schedule: The temperature schedule to use. List of (prob, temperature)
e.g. [(0.75, 1.), (1., 0.)] means
- 1) 75% chance of using a temperature of 1
- 2) if not 1), then 100% of using temperature of 0.
:param dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder.
:param use_beam: Boolean that indicates that we want to use a beam search,
:param name: Optional. The name of this player.
:type policy_adapter: diplomacy_research.models.policy.base_policy_adapter.BasePolicyAdapter
"""
# pylint: disable=too-many-arguments
Player.__init__(self, name)
self.policy_adapter = policy_adapter
self._player_seed = player_seed or 0
self._noise = noise or 0.
self._temperature = None
self._schedule = [(1., 0.)]
self._dropout_rate = dropout_rate or 0.
self._use_beam = use_beam
# Using a temperature of 1. if using beam search without a temperature
if use_beam and temperature is None:
temperature = 1.
# Use temperature if provided, otherwise use schedule, otherwise defaults to greedy
if temperature is not None:
self._temperature = temperature
self._schedule = [(1., temperature)]
elif schedule is not None:
self._schedule = schedule
# ---------- Properties -------------
@property
def is_trainable(self):
""" Returns a boolean that indicates if the player wants to be trained or not """
if self.policy_adapter is not None and self.policy_adapter.is_trainable:
return True
return False
@property
def temperature(self):
""" Getter - temperature """
if self._temperature is not None:
return self._temperature
# Otherwise, computing it from schedule
remaining = 1.
weighted_temp = 0.
for prob, temp in self._schedule:
weighted_temp += remaining * prob * temp
remaining -= max(0, remaining * prob)
return weighted_temp
# ---------- Methods -------------
@gen.coroutine
def get_beam_orders(self, game, power_names, *, retry_on_failure=True, **kwargs):
""" Finds all the beams with their probabilities returned by the diverse beam search for the selected power(s)
Beams are ordered by score (highest first).
:param game: The game object
:param power_names: A list of power names we are playing, or alternatively a single power name.
:param retry_on_failure: Boolean that indicates to retry querying from the model if an error is encountered.
:param kwargs: Additional optional kwargs:
- player_seed: The seed to apply to the player to compute a deterministic mask.
- noise: The sigma of the additional noise to apply to the intermediate layers (i.e. sigma * epsilon)
- temperature: The temperature to apply to the logits. (Default to 0. for deterministic/greedy)
- dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder.
:return: 1) If power_names is a string, a tuple of beam orders, and of beam probabilities
2) If power_names is a list, a list of list which contains beam orders and beam probabilities
:type game: diplomacy.Game
"""
state_proto = extract_state_proto(game)
phase_history_proto = extract_phase_history_proto(game)
possible_orders_proto = extract_possible_orders_proto(game)
# Determining if we have a single or multiple powers
if not isinstance(power_names, list):
is_single_power = True
power_names = [power_names]
else:
is_single_power = False
# Getting beam orders
beam_orders_probs = yield [self.get_beam_orders_with_proto(state_proto,
power_name,
phase_history_proto,
possible_orders_proto,
retry_on_failure=retry_on_failure,
**kwargs) for power_name in power_names]
beam_orders_probs = beam_orders_probs[0] if is_single_power else beam_orders_probs
return beam_orders_probs
@gen.coroutine
def get_beam_orders_with_proto(self, state_proto, power_name, phase_history_proto, possible_orders_proto, **kwargs):
""" Finds all the beams with their probabilities returned by the diverse beam search for the selected power
Beams are ordered by score (highest first).
:param state_proto: A `.proto.game.State` representation of the state of the game.
:param power_name: The power name for which we want the orders and the state values
:param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases.
:param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc.
:param kwargs: Additional optional kwargs:
- player_seed: The seed to apply to the player to compute a deterministic mask.
- noise: The sigma of the additional noise to apply to the intermediate layers (i.e. sigma * epsilon)
- temperature: The temperature to apply to the logits. (Default to 0. for deterministic/greedy)
- dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder.
- retry_on_failure: Boolean that indicates to retry querying from the model if an error is encountered.
:return:A tuple consisting of
1) A list of beams (i.e. a list of selected orders for each beam)
2) A list of probability (the probability of selecting each beam)
"""
orderable_locs = self.get_orderable_locations(state_proto, power_name)
return (yield self.policy_adapter.get_beam_orders(orderable_locs,
state_proto,
power_name,
phase_history_proto,
possible_orders_proto,
**self._get_kwargs(**kwargs)))
@gen.coroutine
def get_orders_details_with_proto(self, state_proto, power_name, phase_history_proto, possible_orders_proto,
**kwargs):
""" Gets the orders (and the corresponding policy details) for the locs the power should play.
:param state_proto: A `.proto.game.State` representation of the state of the game.
:param power_name: The name of the power we are playing
:param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases.
:param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc.
:param kwargs: Additional optional kwargs:
- player_seed: If set. Override the player_seed to use for the model based player.
- noise: If set. Override the noise to use for the model based player.
- temperature: If set. Override the temperature to use for the model based player.
- dropout_rate: If set. Override the dropout_rate to use for the model based player.
- with_state_value: Boolean that indicates to also query the value function.
- use_beam: If set. Override the use_beam to use for the model based player.
- retry_on_failure: Boolean that indicates to retry querying from the model if an error is encountered.
:return: If with_state_value=False (default), a tuple consisting of:
1) The list of orders the power should play (e.g. ['A PAR H', 'A MAR - BUR', ...])
2) The policy details ==> {'locs', 'tokens', 'log_probs', 'draw_action', 'draw_prob'}
If with_state_value=True, a tuple consisting of:
1) The list of orders the power should play (e.g. ['A PAR H', 'A MAR - BUR', ...])
2) The policy details ==> {'locs', 'tokens', 'log_probs', 'draw_action', 'draw_prob'}
3) The state value for the given state
"""
orderable_locs = self.get_orderable_locations(state_proto, power_name)
return (yield self.policy_adapter.get_orders(orderable_locs,
state_proto,
power_name,
phase_history_proto,
possible_orders_proto,
**self._get_kwargs(**kwargs)))
@gen.coroutine
def get_state_value_with_proto(self, state_proto, power_name, phase_history_proto, possible_orders_proto=None,
**kwargs):
""" Calculates the player's value of the state of the game for a given power
:param state_proto: A `.proto.game.State` representation of the state of the game.
:param power_name: The power name for which we want to retrieve the value
:param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases.
:param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc.
:param kwargs: Additional optional kwargs:
- player_seed: If set. Override the player_seed to use for the model based player.
- noise: If set. Override the noise to use for the model based player.
- temperature: If set. Override the temperature to use for the model based player.
- dropout_rate: If set. Override the dropout_rate to use for the model based player.
- retry_on_failure: Boolean that indicates to retry querying from the model if an error is encountered.
:return: A float representing the value of the state of the game to the specified power
"""
# Trying to query actor-critic model for state-value
if self.policy_adapter and self.policy_adapter.has_value_model:
return (yield self.policy_adapter.get_state_value(state_proto,
power_name,
phase_history_proto,
possible_orders_proto,
**self._get_kwargs(**kwargs)))
# Otherwise, returning 0. with a warning
LOGGER.warning('There are no models available to query state value. Returning a value of 0.')
return 0.
@gen.coroutine
def get_opening_orders(self):
""" Returns a dictionary of power_name: [orders] for each power
The orders represent the opening orders that would have been submitted by the player
"""
game = Game()
state_proto = extract_state_proto(game)
phase_history_proto = extract_phase_history_proto(game)
possible_orders_proto = extract_possible_orders_proto(game)
# Retrieving all orders
# Using default player_seed, noise, temperature, and dropout_rate.
# power_orders is a list of tuples (orders, policy_details)
power_orders = yield [self.policy_adapter.get_orders(self.get_orderable_locations(state_proto, power_name),
state_proto,
power_name,
phase_history_proto,
possible_orders_proto,
retry_on_failure=False) for power_name in game.powers]
return {power_name: orders[0] for power_name, orders in zip(game.powers.keys(), power_orders)}
def _get_kwargs(self, player_seed=None, noise=None, temperature=None, dropout_rate=None, use_beam=None,
**other_kwargs):
""" Selects between the default value provided at initialization and the potential override in kwargs """
# Selecting temperature
if temperature is None:
for prob, temp in self._schedule:
if random.random() <= prob:
temperature = temp
break
else:
temperature = 0.
# Starting with player.kwargs, then overriding fields
kwargs = self.kwargs
if player_seed is not None:
kwargs['player_seed'] = player_seed
if noise is not None:
kwargs['noise'] = noise
kwargs['temperature'] = temperature
if dropout_rate is not None:
kwargs['dropout_rate'] = dropout_rate
# Setting use_beam
if use_beam is not None:
kwargs['use_beam'] = use_beam
elif self._use_beam is not None:
kwargs['use_beam'] = self._use_beam
# Merging with other kwargs and returning
return merge_dicts(kwargs, other_kwargs)
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'lastgenre' plugin."""
from __future__ import division, absolute_import, print_function
import unittest
from mock import Mock
from test import _common
from beetsplug import lastgenre
from beets import config
from test.helper import TestHelper
import six
class LastGenrePluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.plugin = lastgenre.LastGenrePlugin()
def tearDown(self):
self.teardown_beets()
def _setup_config(self, whitelist=False, canonical=False, count=1):
config['lastgenre']['canonical'] = canonical
config['lastgenre']['count'] = count
if isinstance(whitelist, (bool, six.string_types)):
# Filename, default, or disabled.
config['lastgenre']['whitelist'] = whitelist
self.plugin.setup()
if not isinstance(whitelist, (bool, six.string_types)):
# Explicit list of genres.
self.plugin.whitelist = whitelist
def test_default(self):
"""Fetch genres with whitelist and c14n deactivated
"""
self._setup_config()
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
u'Delta Blues')
def test_c14n_only(self):
"""Default c14n tree funnels up to most common genre except for *wrong*
genres that stay unchanged.
"""
self._setup_config(canonical=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
u'Blues')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
u'Iota Blues')
def test_whitelist_only(self):
"""Default whitelist rejects *wrong* (non existing) genres.
"""
self._setup_config(whitelist=True)
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
u'')
def test_whitelist_c14n(self):
"""Default whitelist and c14n both activated result in all parents
genres being selected (from specific to common).
"""
self._setup_config(canonical=True, whitelist=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
u'Delta Blues, Blues')
def test_whitelist_custom(self):
"""Keep only genres that are in the whitelist.
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
count=2)
self.assertEqual(self.plugin._resolve_genres(['pop', 'blues']),
u'Blues')
self._setup_config(canonical='', whitelist=set(['rock']))
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
u'')
def test_count(self):
"""Keep the n first genres, as we expect them to be sorted from more to
less popular.
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
count=2)
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'rock', 'blues']),
u'Jazz, Rock')
def test_count_c14n(self):
"""Keep the n first genres, after having applied c14n when necessary
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
canonical=True,
count=2)
# thanks to c14n, 'blues' superseeds 'country blues' and takes the
# second slot
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'country blues', 'rock']),
u'Jazz, Blues')
def test_c14n_whitelist(self):
"""Genres first pass through c14n and are then filtered
"""
self._setup_config(canonical=True, whitelist=set(['rock']))
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
u'')
def test_empty_string_enables_canonical(self):
"""For backwards compatibility, setting the `canonical` option
to the empty string enables it using the default tree.
"""
self._setup_config(canonical='', count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
u'Blues')
def test_empty_string_enables_whitelist(self):
"""Again for backwards compatibility, setting the `whitelist`
option to the empty string enables the default set of genres.
"""
self._setup_config(whitelist='')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
u'')
def test_no_duplicate(self):
"""Remove duplicated genres.
"""
self._setup_config(count=99)
self.assertEqual(self.plugin._resolve_genres(['blues', 'blues']),
u'Blues')
def test_tags_for(self):
class MockPylastElem(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
class MockPylastObj(object):
def get_top_tags(self):
tag1 = Mock()
tag1.weight = 90
tag1.item = MockPylastElem(u'Pop')
tag2 = Mock()
tag2.weight = 40
tag2.item = MockPylastElem(u'Rap')
return [tag1, tag2]
plugin = lastgenre.LastGenrePlugin()
res = plugin._tags_for(MockPylastObj())
self.assertEqual(res, [u'pop', u'rap'])
res = plugin._tags_for(MockPylastObj(), min_weight=50)
self.assertEqual(res, [u'pop'])
def test_get_genre(self):
mock_genres = {'track': u'1', 'album': u'2', 'artist': u'3'}
def mock_fetch_track_genre(self, obj=None):
return mock_genres['track']
def mock_fetch_album_genre(self, obj):
return mock_genres['album']
def mock_fetch_artist_genre(self, obj):
return mock_genres['artist']
lastgenre.LastGenrePlugin.fetch_track_genre = mock_fetch_track_genre
lastgenre.LastGenrePlugin.fetch_album_genre = mock_fetch_album_genre
lastgenre.LastGenrePlugin.fetch_artist_genre = mock_fetch_artist_genre
self._setup_config(whitelist=False)
item = _common.item()
item.genre = mock_genres['track']
config['lastgenre'] = {'force': False}
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, u'keep'))
config['lastgenre'] = {'force': True, 'source': u'track'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['track'], u'track'))
config['lastgenre'] = {'source': u'album'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['album'], u'album'))
config['lastgenre'] = {'source': u'artist'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['artist'], u'artist'))
mock_genres['artist'] = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, u'original'))
config['lastgenre'] = {'fallback': u'rap'}
item.genre = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (config['lastgenre']['fallback'].get(),
u'fallback'))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
import os
import re
import sys
import traceback
import pygmi
from pygmi.util import prop
from pygmi import monitor, client, curry, call, program_list, _
__all__ = ('keys', 'events', 'Match')
class Match(object):
"""
A class used for matching events based on simple patterns.
"""
def __init__(self, *args):
"""
Creates a new Match object based on arbitrary arguments
which constitute a match pattern. Each argument matches an
element of the original event. Arguments are matched based
on their type:
_: Matches anything
set: Matches any string equal to any of its elements
list: Matches any string equal to any of its elements
tuple: Matches any string equal to any of its elements
Additionally, any type with a 'search' attribute matches if
that callable attribute returns True given element in
question as its first argument.
Any other object matches if it compares equal to the
element.
"""
self.args = args
self.matchers = []
for a in args:
if a is _:
a = lambda k: True
elif isinstance(a, basestring):
a = a.__eq__
elif isinstance(a, (list, tuple, set)):
a = curry(lambda ary, k: k in ary, a)
elif hasattr(a, 'search'):
a = a.search
else:
a = str(a).__eq__
self.matchers.append(a)
def match(self, string):
"""
Returns true if this object matches an arbitrary string when
split on ascii spaces.
"""
ary = string.split(' ', len(self.matchers))
if all(m(a) for m, a in zip(self.matchers, ary)):
return ary
def flatten(items):
"""
Given an iterator which returns (key, value) pairs, returns a
new iterator of (k, value) pairs such that every list- or
tuple-valued key in the original sequence yields an individual
pair.
Example: flatten({(1, 2, 3): 'foo', 4: 'bar'}.items()) ->
(1, 'foo'), (2: 'foo'), (3: 'foo'), (4: 'bar')
"""
for k, v in items:
if not isinstance(k, (list, tuple)):
k = k,
for key in k:
yield key, v
class Events():
"""
A class to handle events read from wmii's '/event' file.
"""
def __init__(self):
"""
Initializes the event handler
"""
self.events = {}
self.eventmatchers = {}
self.alive = True
def dispatch(self, event, args=''):
"""
Distatches an event to any matching event handlers.
The handler which specifically matches the event name will
be called first, followed by any handlers with a 'match'
method which matches the event name concatenated to the args
string.
Param event: The name of the event to dispatch.
Param args: The single arguments string for the event.
"""
try:
if event in self.events:
self.events[event](args)
for matcher, action in self.eventmatchers.iteritems():
ary = matcher.match(' '.join((event, args)))
if ary is not None:
action(*ary)
except Exception, e:
traceback.print_exc(sys.stderr)
def loop(self):
"""
Enters the event loop, reading lines from wmii's '/event'
and dispatching them, via #dispatch, to event handlers.
Continues so long as #alive is True.
"""
keys.mode = 'main'
for line in client.readlines('/event'):
if not self.alive:
break
self.dispatch(*line.split(' ', 1))
self.alive = False
def bind(self, items={}, **kwargs):
"""
Binds a number of event handlers for wmii events. Keyword
arguments other than 'items' are added to the 'items' dict.
Handlers are called by #loop when a matching line is read
from '/event'. Each handler is called with, as its sole
argument, the string read from /event with its first token
stripped.
Param items: A dict of action-handler pairs to bind. Passed
through pygmi.event.flatten. Keys with a 'match' method,
such as pygmi.event.Match objects or regular expressions,
are matched against the entire event string. Any other
object matches if it compares equal to the first token of
the event.
"""
kwargs.update(items)
for k, v in flatten(kwargs.iteritems()):
if hasattr(k, 'match'):
self.eventmatchers[k] = v
else:
self.events[k] = v
def event(self, fn):
"""
A decorator which binds its wrapped function, as via #bind,
for the event which matches its name.
"""
self.bind({fn.__name__: fn})
events = Events()
class Keys(object):
"""
A class to manage wmii key bindings.
"""
def __init__(self):
"""
Initializes the class and binds an event handler for the Key
event, as via pygmi.event.events.bind.
Takes no arguments.
"""
self.modes = {}
self.modelist = []
self._set_mode('main', False)
self.defs = {}
events.bind(Key=self.dispatch)
def _add_mode(self, mode):
if mode not in self.modes:
self.modes[mode] = {
'name': mode,
'desc': {},
'groups': [],
'keys': {},
'import': {},
}
self.modelist.append(mode)
def _set_mode(self, mode, execute=True):
self._add_mode(mode)
self._mode = mode
self._keys = dict((k % self.defs, v) for k, v in
self.modes[mode]['keys'].items() +
self.modes[mode]['import'].items());
if execute:
client.write('/keys', '\n'.join(self._keys.keys()) + '\n')
mode = property(lambda self: self._mode, _set_mode,
doc="The current mode for which to dispatch keys")
@prop(doc="Returns a short help text describing the bound keys in all modes")
def help(self):
return '\n\n'.join(
('Mode %s\n' % mode['name']) +
'\n\n'.join((' %s\n' % str(group or '')) +
'\n'.join(' %- 20s %s' % (key % self.defs,
mode['keys'][key].__doc__)
for key in mode['desc'][group])
for group in mode['groups'])
for mode in (self.modes[name]
for name in self.modelist))
def bind(self, mode='main', keys=(), import_={}):
"""
Binds a series of keys for the given 'mode'. Keys may be
specified as a dict or as a sequence of tuple values and
strings.
In the latter case, documentation may be interspersed with
key bindings. Any value in the sequence which is not a tuple
begins a new key group, with that value as a description.
A tuple with two values is considered a key-value pair,
where the value is the handler for the named key. A
three valued tuple is considered a key-description-value
tuple, with the same semantics as above.
Each key binding is interpolated with the values of
#defs, as if processed by (key % self.defs)
Param mode: The name of the mode for which to bind the keys.
Param keys: A sequence of keys to bind.
Param import_: A dict specifying keys which should be
imported from other modes, of the form
{ 'mode': ['key1', 'key2', ...] }
"""
self._add_mode(mode)
mode = self.modes[mode]
group = None
def add_desc(key, desc):
if group not in mode['desc']:
mode['desc'][group] = []
mode['groups'].append(group)
if key not in mode['desc'][group]:
mode['desc'][group].append(key);
if isinstance(keys, dict):
keys = keys.iteritems()
for obj in keys:
if isinstance(obj, tuple) and len(obj) in (2, 3):
if len(obj) == 2:
key, val = obj
desc = ''
elif len(obj) == 3:
key, desc, val = obj
mode['keys'][key] = val
add_desc(key, desc)
val.__doc__ = str(desc)
else:
group = obj
def wrap_import(mode, key):
return lambda k: self.modes[mode]['keys'][key](k)
for k, v in flatten((v, k) for k, v in import_.iteritems()):
mode['import'][k % self.defs] = wrap_import(v, k)
def dispatch(self, key):
"""
Dispatches a key event for the current mode.
Param key: The key spec for which to dispatch.
"""
mode = self.modes[self.mode]
if key in self._keys:
return self._keys[key](key)
keys = Keys()
class Actions(object):
"""
A class to represent user-callable actions. All methods without
leading underscores in their names are treated as callable actions.
"""
def __getattr__(self, name):
if name.startswith('_') or name.endswith('_'):
raise AttributeError()
if hasattr(self, name + '_'):
return getattr(self, name + '_')
def action(args=''):
cmd = pygmi.find_script(name)
if cmd:
call(pygmi.shell, '-c', '$* %s' % args, '--', cmd,
background=True)
return action
def _call(self, args):
"""
Calls a method named for the first token of 'args', with the
rest of the string as its first argument. If the method
doesn't exist, a trailing underscore is appended.
"""
a = args.split(' ', 1)
if a:
getattr(self, a[0])(*a[1:])
@prop(doc="Returns the names of the public methods callable as actions, with trailing underscores stripped.")
def _choices(self):
return sorted(
program_list(pygmi.confpath) +
[re.sub('_$', '', k) for k in dir(self)
if not re.match('^_', k) and callable(getattr(self, k))])
# vim:se sts=4 sw=4 et:
|
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover the common module."""
__author__ = 'Joseph DiLallo'
import unittest
import warnings
from fakefs import fake_filesystem
from fakefs import fake_tempfile
import mock
import suds
import yaml
import googleads.common
import googleads.errors
import googleads.oauth2
class CommonTest(unittest.TestCase):
"""Tests for the googleads.common module."""
# A dictionary with all the required OAuth 2.0 keys
_OAUTH_DICT = {'client_id': 'a', 'client_secret': 'b', 'refresh_token': 'c'}
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.tempfile = fake_tempfile.FakeTempfileModule(self.filesystem)
self.fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
def _CreateYamlFile(self, key, values, add_oauth=True):
"""Return the filename of a yaml file created for testing."""
yaml_file = self.tempfile.NamedTemporaryFile(delete=False)
if add_oauth: values.update(self._OAUTH_DICT)
with self.fake_open(yaml_file.name, 'w') as yaml_handle:
yaml_handle.write(yaml.dump({key: values}))
return yaml_file.name
def testLoadFromStorage_missingFile(self):
with mock.patch('googleads.common.open', self.fake_open, create=True):
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
'yaml_filename', 'woo', [], [])
def testLoadFromStorage_missingOAuthKey(self):
yaml_fname = self._CreateYamlFile('woo', {}, False)
with mock.patch('googleads.common.open', self.fake_open, create=True):
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
yaml_fname, 'woo', [], [])
def testLoadFromStorage_passesWithNoRequiredKeys(self):
yaml_fname = self._CreateYamlFile('woo', {})
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(yaml_fname, 'woo', [], [])
mock_client.assert_called_once_with('a', 'b', 'c', None)
self.assertEquals({'oauth2_client': mock_client.return_value}, rval)
def testLoadFromStorage_missingRequiredKey(self):
with mock.patch('googleads.common.open', self.fake_open, create=True):
# Both keys are missing.
yaml_fname = self._CreateYamlFile('two', {})
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
yaml_fname, 'two', ['needed', 'keys'], [])
# One key is missing.
yaml_fname = self._CreateYamlFile('three', {'needed': 'd'})
self.assertRaises(
googleads.errors.GoogleAdsValueError,
googleads.common.LoadFromStorage,
yaml_fname, 'three', ['needed', 'keys'], [])
def testLoadFromStorage(self):
# No optional keys present.
yaml_fname = self._CreateYamlFile('one', {'needed': 'd', 'keys': 'e',
'https_proxy': 'www.moo.cow'})
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
yaml_fname, 'one', ['needed', 'keys'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', 'www.moo.cow')
self.assertEquals({'oauth2_client': mock_client.return_value,
'needed': 'd', 'keys': 'e'}, rval)
# The optional key is present.
yaml_fname = self._CreateYamlFile('one', {'needed': 'd', 'keys': 'e',
'other': 'f'})
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
yaml_fname, 'one', ['needed', 'keys'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', None)
self.assertEquals({'oauth2_client': mock_client.return_value,
'needed': 'd', 'keys': 'e', 'other': 'f'}, rval)
def testLoadFromStorage_relativePath(self):
fake_os = fake_filesystem.FakeOsModule(self.filesystem)
yaml_contents = {'one': {'needed': 'd', 'keys': 'e'}}
yaml_contents['one'].update(self._OAUTH_DICT)
self.filesystem.CreateFile('/home/test/yaml/googleads.yaml',
contents=yaml.dump(yaml_contents))
fake_os.chdir('/home/test')
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with mock.patch('googleads.common.os', fake_os):
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
'yaml/googleads.yaml', 'one', ['needed', 'keys'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', None)
self.assertEquals({'oauth2_client': mock_client.return_value,
'needed': 'd', 'keys': 'e'}, rval)
def testLoadFromStorage_warningWithUnrecognizedKey(self):
yaml_fname = self._CreateYamlFile('kval', {'Im': 'here', 'whats': 'this?'})
with mock.patch('googleads.oauth2.GoogleRefreshTokenClient') as mock_client:
with warnings.catch_warnings(record=True) as captured_warnings:
with mock.patch('googleads.common.open', self.fake_open, create=True):
rval = googleads.common.LoadFromStorage(
yaml_fname, 'kval', ['Im'], ['other'])
mock_client.assert_called_once_with('a', 'b', 'c', None)
self.assertEquals({'oauth2_client': mock_client.return_value,
'Im': 'here'}, rval)
self.assertEqual(len(captured_warnings), 1)
def testGenerateLibSig(self):
my_name = 'Joseph'
self.assertEquals(
' (%s, %s, %s)' % (my_name, googleads.common._COMMON_LIB_SIG,
googleads.common._PYTHON_VERSION),
googleads.common.GenerateLibSig(my_name))
def testPackForSuds(self):
factory = mock.Mock()
# Test that anything other than list, tuple, and dict pass right through.
self.assertEquals('input', googleads.common._PackForSuds('input', factory))
self.assertEquals(set([1]),
googleads.common._PackForSuds(set([1]), factory))
# Test that lists not containing dicts with xsi types return the same
# values, and test that the input list was not modified.
input_list = ['1', set([3]), {'moo': 'cow'}]
self.assertEquals(['1', set([3]), {'moo': 'cow'}],
googleads.common._PackForSuds(input_list, factory))
self.assertEquals(['1', set([3]), {'moo': 'cow'}], input_list)
# Test that dicts without xsi types return the same values, and test that
# the input dict was not modified
input_dict = {'1': 'moo', frozenset([2]): ['val']}
self.assertEquals({'1': 'moo', frozenset([2]): ['val']},
googleads.common._PackForSuds(input_dict, factory))
self.assertEquals({'1': 'moo', frozenset([2]): ['val']}, input_dict)
# Now it gets interesting... Test that a dictionary with xsi type gets
# changed into an object. Test that the input dict is unmodified.
input_dict = {'xsi_type': 'EliteCampaign', 'name': 'Sales', 'id': 123456,
'metadata': {'a': 'b'}}
rval = googleads.common._PackForSuds(input_dict, factory)
factory.create.assert_called_once_with('EliteCampaign')
self.assertEquals('Sales', rval.name)
self.assertEquals(123456, rval.id)
self.assertEquals({'a': 'b'}, rval.metadata)
self.assertEquals({'xsi_type': 'EliteCampaign', 'name': 'Sales',
'id': 123456, 'metadata': {'a': 'b'}}, input_dict)
# Test that this all works recursively. Nest dictionaries in dictionaries in
# lists in classes.
factory = mock.Mock()
factory.create.side_effect = [mock.Mock(), mock.Mock()]
input_list = [{'xsi_type': 'EliteCampaign', 'name': 'Sales', 'id': 123456,
'metadata': {'xsi_type': 'metadata', 'a': {'b': 'c'}}},
{'i do not have': 'a type'}]
rval = googleads.common._PackForSuds(input_list, factory)
factory.create.assert_any_call('EliteCampaign')
factory.create.assert_any_call('metadata')
self.assertIsInstance(rval, list)
self.assertEquals('Sales', rval[0].name)
self.assertEquals(123456, rval[0].id)
self.assertEquals({'b': 'c'}, rval[0].metadata.a)
self.assertEquals({'i do not have': 'a type'}, rval[1])
self.assertEquals(
[{'xsi_type': 'EliteCampaign', 'name': 'Sales', 'id': 123456,
'metadata': {'xsi_type': 'metadata', 'a': {'b': 'c'}}},
{'i do not have': 'a type'}], input_list)
def testPackForSuds_secondNamespace(self):
factory = mock.Mock()
factory.create.side_effect = [suds.TypeNotFound(''), mock.Mock()]
input_list = {'xsi_type': 'EliteCampaign', 'name': 'Sales'}
rval = googleads.common._PackForSuds(input_list, factory)
factory.create.assert_any_call('EliteCampaign')
factory.create.assert_any_call('ns0:EliteCampaign')
self.assertEquals('Sales', rval.name)
class SudsServiceProxyTest(unittest.TestCase):
"""Tests for the googleads.common.SudsServiceProxy class."""
def testSudsServiceProxy(self):
header_handler = mock.Mock()
port = mock.Mock()
port.methods = ('SoapMethod',)
services = mock.Mock()
services.ports = [port]
client = mock.Mock()
client.wsdl.services = [services]
suds_service_wrapper = googleads.common.SudsServiceProxy(
client, header_handler)
self.assertEquals(suds_service_wrapper.SoapMethod,
suds_service_wrapper._method_proxies['SoapMethod'])
self.assertEquals(suds_service_wrapper.NotSoapMethod,
client.service.NotSoapMethod)
with mock.patch('googleads.common._PackForSuds') as mock_pack_for_suds:
mock_pack_for_suds.return_value = 'modified_test'
suds_service_wrapper.SoapMethod('test')
mock_pack_for_suds.assert_called_once_with('test', client.factory)
client.service.SoapMethod.assert_called_once_with('modified_test')
header_handler.SetHeaders.assert_called_once_with(client)
class HeaderHandlerTest(unittest.TestCase):
"""Tests for the googleads.common.HeaderHeader class."""
def testSetHeaders(self):
"""For coverage."""
self.assertRaises(
NotImplementedError, googleads.common.HeaderHandler().SetHeaders,
mock.Mock())
class PrunerTest(unittest.TestCase):
"""Tests for the googleads.common.Pruner class."""
def testPruner_emptyBody(self):
context = mock.Mock()
body_node = mock.Mock()
soap_method_node = mock.Mock()
context.envelope = [None, body_node]
body_node.getChildren.return_value = [soap_method_node]
soap_method_node.isempty.return_value = True
googleads.common.Pruner().marshalled(context)
self.assertFalse(body_node.prune.called)
def testPruner_notEmptyBody(self):
context = mock.Mock()
body_node = mock.Mock()
soap_method_node = mock.Mock()
context.envelope = [None, body_node]
body_node.getChildren.return_value = [soap_method_node]
soap_method_node.isempty.return_value = False
googleads.common.Pruner().marshalled(context)
self.assertTrue(body_node.prune.called)
if __name__ == '__main__':
unittest.main()
|
|
import numpy as np
import scipy as sp
from scipy.fft import fft
from scipy.fftpack import fftshift
class Operation(object):
def __init__():
self.name = "Empty Operation"
class LeftShift(Operation):
def __init__(self, shiftPoints):
self.name = "Left Shift"
self.shiftPoints = shiftPoints
def run(self, nmrData):
nmrData.allFid.append([nmrData.allFid[-1][k][self.shiftPoints:] for
k in range(len(nmrData.allFid[-1]))])
nmrData.fidTimeForLB = nmrData.fidTime[self.shiftPoints:]
class ZeroFill(Operation):
def __init__(self, totalPoints):
self.name = "Zero Filling"
self.totalPoints = totalPoints
def run(self, nmrData):
z = np.zeros(self.totalPoints)
nmrData.allFid.append([np.concatenate((k, z[:-len(k)])) for
k in nmrData.allFid[-1]])
class LineBroadening(Operation):
def __init__(self, lineBroadening):
self.name = "Exponential Linebroadening"
self.lineBroadening = lineBroadening
def run(self, nmrData):
print("LB: {} Hz".format(self.lineBroadening))
length = len(nmrData.allFid[-1][0])
nmrData.allFid.append(
sp.multiply(nmrData.allFid[-1][:],
sp.exp(-nmrData.fidTimeForLB[:length]
* self.lineBroadening * np.pi)))
class FourierTransform(Operation):
def __init__(self):
self.name = "Fourier Transform"
def run(self, nmrData):
spectra = np.array([fftshift(fft(fid)) for fid in nmrData.allFid[-1]])
nmrData.allSpectra = []
nmrData.allSpectra.append(spectra)
nmrData.frequency = np.linspace(-nmrData.sweepWidthTD2/2,
nmrData.sweepWidthTD2/2,
len(nmrData.allFid[-1][0]))
class FourierTransform2D(Operation):
def __init__(self):
self.name = "2D Fourier Transform"
def run(self, nmrData):
spectra = np.array([fftshift(fft(nmrData.allFid[-1]))])
nmrData.allSpectra = []
nmrData.allSpectra.append(spectra)
nmrData.frequency = np.linspace(-nmrData.sweepWidthTD2/2,
nmrData.sweepWidthTD2/2,
len(nmrData.allFid[-1][0]))
nmrData.frequency1 = np.linspace(-nmrData.sweepWidthTD1/2,
nmrData.sweepWidthTD1/2,
nmrData.sizeTD1)
class Phase0D(Operation):
def __init__(self, phase, degree=True, domain="F"):
"""Zero Order Phase Correction
Arguments are nmrData and phase.
By defaullt the phase is in degree and the correction is applied in
the freuency domain."""
self.name = "Phase Zero Order"
self.phase = phase
self.degree = degree
self.domain = domain
def run(self, nmrData):
if self.degree:
phaseFactor = np.exp(-1j*float(self.phase)/180.*np.pi)
else:
phaseFactor = np.exp(-1j*self.phase)
if self.domain == "F":
spectra = [spec*phaseFactor for spec in nmrData.allSpectra[-1]]
nmrData.allSpectra.append(spectra)
elif self.domain == "T":
fids = [fid*phaseFactor for fid in nmrData.allFid[-1]]
nmrData.allFid.append(fids)
class Phase1D(Operation):
def __init__(self, value, pivot=0, scale="Hz", unit="radian"):
"""Zero Order Phase Correction
Arguments are nmrData and phase.
By defaullt the phase is in degree and the correction is applied in
the freuency domain."""
self.name = "Phase First Order"
self.value = value
self.pivot = pivot
self.unit = unit
self.scale = scale
def run(self, nmrData):
if self.unit == "radian":
self.phase = self.value
elif self.unit == "degree":
self.phase = self.value*np.pi/180
elif self.unit == "time":
self.phase = 2*np.pi*nmrData.frequency[-1]*self.value
print("Phase: ", self.phase)
phaseValues = np.linspace(-self.phase/2, self.phase/2,
num=len(nmrData.frequency))
if self.pivot != 0:
o = GetIndex(self.pivot, scale=self.scale)
i = o.run(nmrData)
phaseValues = phaseValues - phaseValues[i]
spectra = [spec*np.exp(-1j*phaseValues)
for spec in nmrData.allSpectra[-1]]
nmrData.allSpectra.append(spectra)
class GetIndex(Operation):
def __init__(self, value, scale = "Hz"):
"""Get Indices corresponding to the frequency or ppm Value."""
self.value = value
self.scale = scale
self.name = "Get Index"
def run(self, nmrData):
if self.scale == "Hz":
index = np.argmin(abs(nmrData.frequency - self.value))
elif self.scale == "ppm":
index = np.argmin(abs(nmrData.ppmScale - self.value))
return index
class GetIndices(Operation):
def __init__(self, values, scale="Hz"):
"""Get Indices corresponding to the frequency."""
self.values = values
self.scale = scale
self.name = "Get Indices"
def run(self, nmrData):
indexList = []
for v in self.values:
o = GetIndex(v, scale=self.scale)
indexList.append(o.run(nmrData))
return sorted(indexList)
class GetPartialSpectrum(Operation):
def __init__(self, index, start, stop, scale="Hz"):
"""
- `index`: selects fid out of a 2D experiment
- `start`: start value (frequency or ppm)
- `stop`: stop value
- `scale`: "Hz" or "ppm"
"""
self.index = index
self.start = start
self.stop = stop
self.scale = scale
self.name = "Get Partial Spectrum"
def run(self, nmrData):
o = GetIndices([self.start, self.stop], scale=self.scale)
values = o.run(nmrData)
return np.real(nmrData.allSpectra[-1][self.index][values[0]:values[1]])
class GetJoinedPartialSpectra(Operation):
def __init__(self, start, stop, scale="Hz", returnX=False):
self.start = start
self.stop = stop
self.scale = scale
self.returnX = returnX
self.name = "Get Joined Partial Spectra"
def run(self, nmrData):
spectra = []
o = GetPartialSpectrum(0, self.start, self.stop, scale=self.scale)
for index in range(nmrData.sizeTD1):
o.index = index
spectra.extend(o.run(nmrData))
if self.returnX:
x = np.array(list(range(len(spectra)))) * nmrData.sizeTD1 / float(len(spectra)) + 0.5
return x, spectra
else:
return spectra
class GetSingleIntegral(Operation):
def __init__(self, index, start, stop, scale="Hz", part="real"):
"""This function integrates the real part between start and stop. standard scale is Hz
Arguments:
- `index`: index of the spectrum
- `start`: lower limit of integration
- `stop`: upper limit of integration
- `scale`: Hz or ppm
- `part`: real or magnitude
"""
self.index = index
self.start = start
self.stop = stop
self.scale = scale
self.part = part
self.name = "Get Single Integral"
def run(self, nmrData):
dx = 0
if self.scale == "Hz":
dx = np.abs(nmrData.frequency[1] - nmrData.frequency[0])
elif self.scale == "ppm":
dx = np.abs(nmrData.ppmScale[1] - nmrData.ppmScale[0])
else:
dx = 1
o = GetIndices([self.start, self.stop], scale=self.scale)
indices = o.run(nmrData)
i1 = indices[0]
i2 = indices[1]
if self.part == "real":
retVal = np.sum(np.real(nmrData.allSpectra[-1][self.index][i1:i2])) * dx
elif self.part == "magnitude":
retVal = np.sum(np.abs(nmrData.allSpectra[-1][self.index][i1:i2]))*dx
return retVal
class GetAllIntegrals(Operation):
def __init__(self, start, stop, scale="Hz", part="real"):
"""This function integrates the real part between start and stop.
Default scale is Hz.
Arguments:
- `index`: index of the spectrum
- `start`: lower limit of integration
- `stop`: upper limit of integration
- `scale`: Hz or ppm
- `part`: real or magnitude
"""
self.start = start
self.stop = stop
self.scale = scale
self.part = part
self.name = "Get All Integrals"
def run(self, nmrData):
returnList = []
o = GetSingleIntegral(0, self.start, self.stop, scale=self.scale,
part=self.part)
for i in range(nmrData.sizeTD1):
o.index = i
returnList.append(o.run(nmrData))
return returnList
class GetPhase(Operation):
def __init__(self, index, start, stop, scale="Hz"):
"""This function returns the 0 order phase in degrees
that maximizes the integral in the specified range."""
self.index = index
self.start = start
self.stop = stop
self.scale = scale
self.name = "Get Single Phase"
def run(self, nmrData):
o = GetIndices([self.start, self.stop], scale=self.scale)
indices = o.run(nmrData)
i1 = indices[0]
i2 = indices[1]
phiTest = np.linspace(-180, 179, num=360)
integrals = np.zeros(np.size(phiTest))
for k in range(len(integrals)):
integrals[k] = np.sum(np.real(
nmrData.allSpectra[-1][self.index][i1:i2]
* np.exp(-1j*float(phiTest[k])/180.*np.pi)))
return phiTest[np.argmax(integrals)]
class GetAllPhases(Operation):
def __init__(self, start, stop, scale="Hz", unwrap="False"):
self.start = start
self.stop = stop
self.scale = scale
self.unwrap = unwrap
def run(self, nmrData):
pList = np.array([])
o = GetPhase(0, self.start, self.stop, scale=self.scale)
for i in range(nmrData.sizeTD1):
o.index = i
pList = np.append(pList, o.run(nmrData))
if self.unwrap:
return np.unwrap(pList/360*2*np.pi)*360/(2*np.pi)
else:
return pList
class SetPPMScale(Operation):
def __init__(self, offset=-1, ppmValue=-1, scale="offset"):
"""
SetPPMScale(self, offset, ppmValue, scale = 'offset')
this function constructs a chemical shift axis
for a given offset and corresponding ppmValue.
scale can be 'offset' or 'absolute'
- offset is used for signal frequency measured from the carrier,
- absolute is used of absolute signal frequency (in Hz).
The 'absolute' is useful when creating a ppm scale based on data from
different experiment with different SFO1
- if no options are passed, the routine will parse the O1 value from the acqus file,
assuming that it corresponds to 0 ppm
"""
self.offset = offset
self.ppmValue = ppmValue
self.scale = scale
self.name = "Set PPM Scale"
def run(self, nmrData):
if self.offset == self.ppmValue == -1:
# print("Setting PPM Scale automatically")
print("Setting PPM Scale automatically with offset from acqus: ",
nmrData.parDictionary["O1"])
self.offset = - nmrData.parDictionary["O1"]
self.ppmValue = 0
if self.scale == "offset":
freqRef = nmrData.carrier + self.offset
elif self.scale == "absolute":
freqRef = self.offset
f0 = freqRef/(1 + self.ppmValue*1e-6)
nmrData.ppmScale = (nmrData.frequency + nmrData.carrier - f0)/f0*1e6
class BaseLineCorrection(Operation):
def __init__(self, regionSet, degree, scale="Hz", applyLocally=False):
"""
Polynomial baseline correction.
region: list of intervals where the baseline is to be determined
degree: degree of the polynomial to be used.
scale: scale as used in the region specification, default is "Hz",
other option is "ppm"
applyLocally: if set to true, apply baseline correction only
within the outer limits of the region list.
"""
self.regionSet = regionSet
self.degree = degree
self.scale = scale
self.applyLocally = False
self.name = "Baseline Correction"
def run(self, nmrData):
fidList = []
for k in range(len(nmrData.allSpectra[-1])):
xVals = []
yVals = []
indices = []
thisFid = []
for pair in self.regionSet:
o = GetIndices(pair, scale=self.scale)
indices = o.run(nmrData)
i1 = indices[0]
i2 = indices[1]
indices.extend([i1, i2])
assert i1 != i2, """Empty Frequency Range -
Frequency for Baseline Corrrection outside spectral range?"""
xVals.extend(nmrData.frequency[i1:i2])
yVals.extend(np.real(nmrData.allSpectra[-1][k][i1:i2]))
z = np.polyfit(xVals, yVals, self.degree)
p = np.poly1d(z)
self.p = p
if self.applyLocally:
thisFid = nmrData.allSpectra[-1][k]
thisFid[min(indices):max(indices)] -= (
p(nmrData.frequency[min(indices):max(indices)]))
else:
thisFid = nmrData.allSpectra[-1][k] - p(nmrData.frequency)
fidList.append(thisFid)
print("BaselineCorrection done. Polynomial: " + p.__repr__())
print("Length of nmrData.allSpectra before: ", len(nmrData.allSpectra))
nmrData.allSpectra.append(fidList)
print("Length of nmrData.allSpectra after: ", len(nmrData.allSpectra))
|
|
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _mat_ptrs(a):
"""Creates an array of pointers to matrices
Args:
a: A batch of matrices on GPU
Returns:
GPU array of pointers to matrices
"""
if a.shape[0] == 1:
return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.uintp)
else:
stride = a[1].data.ptr - a[0].data.ptr
return cuda.cupy.arange(
a[0].data.ptr,
a[0].data.ptr + stride * a.shape[0],
stride,
dtype=numpy.uintp)
def _as_mat(x):
return x.reshape((len(x), 1)) if len(x.shape) == 1 else x
def _as_batch_mat(x):
return x.reshape((x.shape[0], x.shape[1], 1)) if len(x.shape) == 2 else x
def _matmul(a, b, transa=False, transb=False, transout=False):
a = _as_mat(a)
b = _as_mat(b)
if transa:
a = a.T
if transb:
b = b.T
if transout:
# (A B)^T = B^T A^T
a, b = b.T, a.T
return a.dot(b)
def _get_ld(a):
shape = a.shape[-2:]
strides = a.strides[-2:]
trans = numpy.argmin(strides)
return trans, int(max(shape[trans], max(strides) // a.itemsize))
def _batch_matmul_gpu(a, b, out, transa=False, transb=False, transout=False):
a = _as_batch_mat(a)
b = _as_batch_mat(b)
trans_axis = (0, 2, 1)
if transout:
out = out.transpose(trans_axis)
needtrans, _ = _get_ld(out)
if needtrans == 1:
# (A B)^T = B^T A^T
a, b = b, a
transa, transb = not transb, not transa
out = out.transpose(trans_axis)
if transa:
a = a.transpose(trans_axis)
if transb:
b = b.transpose(trans_axis)
transa, lda = _get_ld(a)
transb, ldb = _get_ld(b)
transout, ldout = _get_ld(out)
la, n, ka = a.shape
lb, kb, m = b.shape
assert ka == kb
assert transout == 0 or ldout == 1
assert out.shape == (la, n, m)
ap = _mat_ptrs(a)
bp = _mat_ptrs(b)
outp = _mat_ptrs(out)
cuda.cublas.sgemmBatched(
cuda.Device().cublas_handle,
transa,
transb,
n, m, ka, 1.0,
ap.data.ptr, lda,
bp.data.ptr, ldb,
0.0, outp.data.ptr, ldout, la)
def _check_ndim(in_type, lower=1, upper=2):
type_check.expect(
in_type.ndim >= lower,
in_type.ndim <= upper
)
def _convert_type(in_type, vector_ndim=1):
if in_type.ndim.eval() == vector_ndim:
in_type = type_check.Variable(
type_check.TypeInfo(in_type.shape.eval() + (1,),
in_type.dtype),
'%s(1-D array)' % in_type.name)
else:
in_type.name = '%s(2-D array)' % in_type.name
return in_type
def _get_check_index(trans, right, row_idx=0, col_idx=1):
if trans ^ right:
return row_idx
else:
return col_idx
class MatMul(function.Function):
def __init__(self, transa=False, transb=False):
self.transa = transa
self.transb = transb
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
a_type, b_type = in_types
type_check.expect(
a_type.dtype == numpy.float32,
b_type.dtype == numpy.float32
)
_check_ndim(a_type)
_check_ndim(b_type)
a_type = _convert_type(a_type)
b_type = _convert_type(b_type)
a_idx = _get_check_index(self.transa, False)
b_idx = _get_check_index(self.transb, True)
type_check.expect(
a_type.shape[a_idx] == b_type.shape[b_idx]
)
def forward(self, x):
return _matmul(x[0], x[1], transa=self.transa, transb=self.transb),
def backward(self, x, gy):
gx0 = _matmul(
gy[0], x[1], transb=not self.transb, transout=self.transa
).reshape(x[0].shape)
gx1 = _matmul(
x[0], gy[0], transa=not self.transa, transout=self.transb
).reshape(x[1].shape)
return gx0, gx1
def matmul(a, b, transa=False, transb=False):
"""Computes the matrix multiplication of two arrays.
Args:
a (Variable): The left operand of the matrix multiplication.
A 1-D array of shape (N,) is considered as an Nx1 matrix.
A 2-D array of shape (M, N) is considered as an MxN matrix.
b (Variable): The right operand of the matrix multiplication.
Its array is treated as a matrix in the same way as ``a``'s array.
transa (bool): If true, transpose a.
transb (bool): If true, transpose b.
Returns:
~chainer.Variable: The result of the matrix multiplication as a 2-D
array.
"""
return MatMul(transa=transa, transb=transb)(a, b)
class BatchMatMul(function.Function):
def __init__(self, transa=False, transb=False):
self.transa = transa
self.transb = transb
def _output_shape(self, a, b):
batch_size = a.shape[0]
a_mat_shape = _as_mat(a[0]).shape
b_mat_shape = _as_mat(b[0]).shape
m = a_mat_shape[1 if self.transa else 0]
n = b_mat_shape[0 if self.transb else 1]
return (batch_size, m, n)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
a_type, b_type = in_types
type_check.expect(
a_type.dtype == numpy.float32,
b_type.dtype == numpy.float32
)
_check_ndim(a_type, lower=2, upper=3)
_check_ndim(b_type, lower=2, upper=3)
a_type = _convert_type(a_type, vector_ndim=2)
b_type = _convert_type(b_type, vector_ndim=2)
a_idx = _get_check_index(self.transa, False, row_idx=1, col_idx=2)
b_idx = _get_check_index(self.transb, True, row_idx=1, col_idx=2)
type_check.expect(
a_type.shape[a_idx] == b_type.shape[b_idx]
)
def forward_cpu(self, x):
a, b = x
batch_size = a.shape[0]
shape = self._output_shape(a, b)
ret_dtype = numpy.find_common_type([a.dtype, b.dtype], [])
ret = numpy.empty(shape, dtype=ret_dtype)
for i in six.moves.range(batch_size):
ret[i] = _matmul(
a[i], b[i], transa=self.transa, transb=self.transb)
return ret,
def backward_cpu(self, x, gy):
a, b = x
batch_size = a.shape[0]
ga = numpy.empty_like(a)
gb = numpy.empty_like(b)
for i in six.moves.range(batch_size):
ga[i] = _matmul(
gy[0][i], b[i], transb=not self.transb,
transout=self.transa).reshape(a[0].shape)
gb[i] = _matmul(
a[i], gy[0][i], transa=not self.transa,
transout=self.transb).reshape(b[0].shape)
return ga, gb
def forward_gpu(self, x):
a, b = x
shape = self._output_shape(a, b)
ret = cuda.zeros(shape)
_batch_matmul_gpu(
a, b, transa=self.transa, transb=self.transb, out=ret)
return ret,
def backward_gpu(self, x, gy):
a, b = x
batch_size = a.shape[0]
ga = cuda.cupy.empty((batch_size,) + _as_mat(a[0]).shape, a.dtype)
gb = cuda.cupy.empty((batch_size,) + _as_mat(b[0]).shape, a.dtype)
_batch_matmul_gpu(
gy[0], b, transb=not self.transb, transout=self.transa, out=ga)
_batch_matmul_gpu(
a, gy[0], transa=not self.transa, transout=self.transb, out=gb)
ga = ga.reshape(a.shape)
gb = gb.reshape(b.shape)
return ga, gb
def batch_matmul(a, b, transa=False, transb=False):
"""Computes the batch matrix multiplications of two sets of arrays.
Args:
a (Variable): The left operand of the batch matrix multiplications.
A 2-D array of shape (B, N,) is considered as B Nx1 matrices.
A 3-D array of shape (B, M, N) is considered as B MxN matrices.
b (Variable): The right operand of the batch matrix multiplications.
Its array is treated as matrices in the same way as ``a``'s array.
transa (bool): If true, transpose each matrix in a.
transb (bool): If true, transpose each matrix in b.
Returns:
~chainer.Variable: The result of the batch matrix multiplications as a
3-D array.
"""
return BatchMatMul(transa=transa, transb=transb)(a, b)
|
|
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.import mock
from unittest import mock
import ddt
import six
from os_win import constants
from os_win import exceptions
from os_win.tests.unit import test_base
from os_win.utils.io import ioutils
from os_win.utils.winapi import constants as w_const
from os_win.utils.winapi import wintypes
@ddt.ddt
class IOUtilsTestCase(test_base.BaseTestCase):
_autospec_classes = [
ioutils.win32utils.Win32Utils,
]
def setUp(self):
super(IOUtilsTestCase, self).setUp()
self._setup_lib_mocks()
self._ioutils = ioutils.IOUtils()
self._mock_run = self._ioutils._win32_utils.run_and_check_output
self._run_args = dict(kernel32_lib_func=True,
failure_exc=exceptions.Win32IOException,
eventlet_nonblocking_mode=False)
self.addCleanup(mock.patch.stopall)
def _setup_lib_mocks(self):
self._ctypes = mock.Mock()
# This is used in order to easily make assertions on the variables
# passed by reference.
self._ctypes.byref = lambda x: (x, "byref")
self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p")
mock.patch.multiple(ioutils,
ctypes=self._ctypes, kernel32=mock.DEFAULT,
create=True).start()
def test_run_and_check_output(self):
ret_val = self._ioutils._run_and_check_output(
mock.sentinel.func, mock.sentinel.arg)
self._mock_run.assert_called_once_with(mock.sentinel.func,
mock.sentinel.arg,
**self._run_args)
self.assertEqual(self._mock_run.return_value, ret_val)
@ddt.data({},
{'inherit_handle': True},
{'sec_attr': mock.sentinel.sec_attr})
@ddt.unpack
@mock.patch.object(wintypes, 'HANDLE')
@mock.patch.object(wintypes, 'SECURITY_ATTRIBUTES')
def test_create_pipe(self, mock_sec_attr_cls, mock_handle_cls,
inherit_handle=False, sec_attr=None):
r, w = self._ioutils.create_pipe(
sec_attr, mock.sentinel.size, inherit_handle)
exp_sec_attr = None
if sec_attr:
exp_sec_attr = sec_attr
elif inherit_handle:
exp_sec_attr = mock_sec_attr_cls.return_value
self.assertEqual(mock_handle_cls.return_value.value, r)
self.assertEqual(mock_handle_cls.return_value.value, w)
self._mock_run.assert_called_once_with(
ioutils.kernel32.CreatePipe,
self._ctypes.byref(mock_handle_cls.return_value),
self._ctypes.byref(mock_handle_cls.return_value),
self._ctypes.byref(exp_sec_attr) if exp_sec_attr else None,
mock.sentinel.size,
**self._run_args)
if not sec_attr and exp_sec_attr:
self.assertEqual(inherit_handle, exp_sec_attr.bInheritHandle)
self.assertEqual(self._ctypes.sizeof.return_value,
exp_sec_attr.nLength)
self._ctypes.sizeof.assert_called_once_with(exp_sec_attr)
def test_wait_named_pipe(self):
fake_timeout_s = 10
self._ioutils.wait_named_pipe(mock.sentinel.pipe_name,
timeout=fake_timeout_s)
self._mock_run.assert_called_once_with(
ioutils.kernel32.WaitNamedPipeW,
self._ctypes.c_wchar_p(mock.sentinel.pipe_name),
fake_timeout_s * 1000,
**self._run_args)
def test_open(self):
handle = self._ioutils.open(mock.sentinel.path,
mock.sentinel.access,
mock.sentinel.share_mode,
mock.sentinel.create_disposition,
mock.sentinel.flags)
self._mock_run.assert_called_once_with(
ioutils.kernel32.CreateFileW,
self._ctypes.c_wchar_p(mock.sentinel.path),
mock.sentinel.access,
mock.sentinel.share_mode,
None,
mock.sentinel.create_disposition,
mock.sentinel.flags,
None,
error_ret_vals=[w_const.INVALID_HANDLE_VALUE],
**self._run_args)
self.assertEqual(self._mock_run.return_value, handle)
def test_cancel_io(self):
self._ioutils.cancel_io(mock.sentinel.handle,
mock.sentinel.overlapped_struct,
ignore_invalid_handle=True)
expected_ignored_err_codes = [w_const.ERROR_NOT_FOUND,
w_const.ERROR_INVALID_HANDLE]
self._mock_run.assert_called_once_with(
ioutils.kernel32.CancelIoEx,
mock.sentinel.handle,
self._ctypes.byref(mock.sentinel.overlapped_struct),
ignored_error_codes=expected_ignored_err_codes,
**self._run_args)
def test_close_handle(self):
self._ioutils.close_handle(mock.sentinel.handle)
self._mock_run.assert_called_once_with(ioutils.kernel32.CloseHandle,
mock.sentinel.handle,
**self._run_args)
def test_wait_io_completion(self):
self._ioutils._wait_io_completion(mock.sentinel.event)
self._mock_run.assert_called_once_with(
ioutils.kernel32.WaitForSingleObjectEx,
mock.sentinel.event,
ioutils.WAIT_INFINITE_TIMEOUT,
True,
error_ret_vals=[w_const.WAIT_FAILED],
**self._run_args)
def test_set_event(self):
self._ioutils.set_event(mock.sentinel.event)
self._mock_run.assert_called_once_with(ioutils.kernel32.SetEvent,
mock.sentinel.event,
**self._run_args)
def test_reset_event(self):
self._ioutils._reset_event(mock.sentinel.event)
self._mock_run.assert_called_once_with(ioutils.kernel32.ResetEvent,
mock.sentinel.event,
**self._run_args)
def test_create_event(self):
event = self._ioutils._create_event(mock.sentinel.event_attributes,
mock.sentinel.manual_reset,
mock.sentinel.initial_state,
mock.sentinel.name)
self._mock_run.assert_called_once_with(ioutils.kernel32.CreateEventW,
mock.sentinel.event_attributes,
mock.sentinel.manual_reset,
mock.sentinel.initial_state,
mock.sentinel.name,
error_ret_vals=[None],
**self._run_args)
self.assertEqual(self._mock_run.return_value, event)
@mock.patch.object(wintypes, 'LPOVERLAPPED', create=True)
@mock.patch.object(wintypes, 'LPOVERLAPPED_COMPLETION_ROUTINE',
lambda x: x, create=True)
@mock.patch.object(ioutils.IOUtils, 'set_event')
def test_get_completion_routine(self, mock_set_event,
mock_LPOVERLAPPED):
mock_callback = mock.Mock()
compl_routine = self._ioutils.get_completion_routine(mock_callback)
compl_routine(mock.sentinel.error_code,
mock.sentinel.num_bytes,
mock.sentinel.lpOverLapped)
self._ctypes.cast.assert_called_once_with(mock.sentinel.lpOverLapped,
wintypes.LPOVERLAPPED)
mock_overlapped_struct = self._ctypes.cast.return_value.contents
mock_set_event.assert_called_once_with(mock_overlapped_struct.hEvent)
mock_callback.assert_called_once_with(mock.sentinel.num_bytes)
@mock.patch.object(wintypes, 'OVERLAPPED', create=True)
@mock.patch.object(ioutils.IOUtils, '_create_event')
def test_get_new_overlapped_structure(self, mock_create_event,
mock_OVERLAPPED):
overlapped_struct = self._ioutils.get_new_overlapped_structure()
self.assertEqual(mock_OVERLAPPED.return_value, overlapped_struct)
self.assertEqual(mock_create_event.return_value,
overlapped_struct.hEvent)
@mock.patch.object(ioutils.IOUtils, '_reset_event')
@mock.patch.object(ioutils.IOUtils, '_wait_io_completion')
def test_read(self, mock_wait_io_completion, mock_reset_event):
mock_overlapped_struct = mock.Mock()
mock_event = mock_overlapped_struct.hEvent
self._ioutils.read(mock.sentinel.handle, mock.sentinel.buff,
mock.sentinel.num_bytes,
mock_overlapped_struct,
mock.sentinel.compl_routine)
mock_reset_event.assert_called_once_with(mock_event)
self._mock_run.assert_called_once_with(ioutils.kernel32.ReadFileEx,
mock.sentinel.handle,
mock.sentinel.buff,
mock.sentinel.num_bytes,
self._ctypes.byref(
mock_overlapped_struct),
mock.sentinel.compl_routine,
**self._run_args)
mock_wait_io_completion.assert_called_once_with(mock_event)
@mock.patch.object(wintypes, 'DWORD')
def test_read_file(self, mock_dword):
num_bytes_read = mock_dword.return_value
ret_val = self._ioutils.read_file(
mock.sentinel.handle,
mock.sentinel.buff,
mock.sentinel.num_bytes,
mock.sentinel.overlapped_struct)
self.assertEqual(num_bytes_read.value, ret_val)
self._mock_run.assert_called_once_with(
ioutils.kernel32.ReadFile,
mock.sentinel.handle,
mock.sentinel.buff,
mock.sentinel.num_bytes,
self._ctypes.byref(num_bytes_read),
self._ctypes.byref(mock.sentinel.overlapped_struct),
**self._run_args)
@mock.patch.object(ioutils.IOUtils, '_reset_event')
@mock.patch.object(ioutils.IOUtils, '_wait_io_completion')
def test_write(self, mock_wait_io_completion, mock_reset_event):
mock_overlapped_struct = mock.Mock()
mock_event = mock_overlapped_struct.hEvent
self._ioutils.write(mock.sentinel.handle, mock.sentinel.buff,
mock.sentinel.num_bytes,
mock_overlapped_struct,
mock.sentinel.compl_routine)
mock_reset_event.assert_called_once_with(mock_event)
self._mock_run.assert_called_once_with(ioutils.kernel32.WriteFileEx,
mock.sentinel.handle,
mock.sentinel.buff,
mock.sentinel.num_bytes,
self._ctypes.byref(
mock_overlapped_struct),
mock.sentinel.compl_routine,
**self._run_args)
mock_wait_io_completion.assert_called_once_with(mock_event)
@mock.patch.object(wintypes, 'DWORD')
def test_write_file(self, mock_dword):
num_bytes_written = mock_dword.return_value
ret_val = self._ioutils.write_file(
mock.sentinel.handle,
mock.sentinel.buff,
mock.sentinel.num_bytes,
mock.sentinel.overlapped_struct)
self.assertEqual(num_bytes_written.value, ret_val)
self._mock_run.assert_called_once_with(
ioutils.kernel32.WriteFile,
mock.sentinel.handle,
mock.sentinel.buff,
mock.sentinel.num_bytes,
self._ctypes.byref(num_bytes_written),
self._ctypes.byref(mock.sentinel.overlapped_struct),
**self._run_args)
def test_buffer_ops(self):
mock.patch.stopall()
fake_data = 'fake data'
buff = self._ioutils.get_buffer(len(fake_data), data=fake_data)
buff_data = self._ioutils.get_buffer_data(buff, len(fake_data))
self.assertEqual(six.b(fake_data), buff_data)
class IOQueueTestCase(test_base.BaseTestCase):
def setUp(self):
super(IOQueueTestCase, self).setUp()
self._mock_queue = mock.Mock()
queue_patcher = mock.patch.object(ioutils.Queue, 'Queue',
new=self._mock_queue)
queue_patcher.start()
self.addCleanup(queue_patcher.stop)
self._mock_client_connected = mock.Mock()
self._ioqueue = ioutils.IOQueue(self._mock_client_connected)
def test_get(self):
self._mock_client_connected.isSet.return_value = True
self._mock_queue.get.return_value = mock.sentinel.item
queue_item = self._ioqueue.get(timeout=mock.sentinel.timeout)
self._mock_queue.get.assert_called_once_with(
self._ioqueue, timeout=mock.sentinel.timeout)
self.assertEqual(mock.sentinel.item, queue_item)
def _test_get_timeout(self, continue_on_timeout=True):
self._mock_client_connected.isSet.side_effect = [True, True, False]
self._mock_queue.get.side_effect = ioutils.Queue.Empty
queue_item = self._ioqueue.get(timeout=mock.sentinel.timeout,
continue_on_timeout=continue_on_timeout)
expected_calls_number = 2 if continue_on_timeout else 1
self._mock_queue.get.assert_has_calls(
[mock.call(self._ioqueue, timeout=mock.sentinel.timeout)] *
expected_calls_number)
self.assertIsNone(queue_item)
def test_get_continue_on_timeout(self):
# Test that the queue blocks as long
# as the client connected event is set.
self._test_get_timeout()
def test_get_break_on_timeout(self):
self._test_get_timeout(continue_on_timeout=False)
def test_put(self):
self._mock_client_connected.isSet.side_effect = [True, True, False]
self._mock_queue.put.side_effect = ioutils.Queue.Full
self._ioqueue.put(mock.sentinel.item,
timeout=mock.sentinel.timeout)
self._mock_queue.put.assert_has_calls(
[mock.call(self._ioqueue, mock.sentinel.item,
timeout=mock.sentinel.timeout)] * 2)
@mock.patch.object(ioutils.IOQueue, 'get')
def _test_get_burst(self, mock_get,
exceeded_max_size=False):
fake_data = 'fake_data'
mock_get.side_effect = [fake_data, fake_data, None]
if exceeded_max_size:
max_size = 0
else:
max_size = constants.SERIAL_CONSOLE_BUFFER_SIZE
ret_val = self._ioqueue.get_burst(
timeout=mock.sentinel.timeout,
burst_timeout=mock.sentinel.burst_timeout,
max_size=max_size)
expected_calls = [mock.call(timeout=mock.sentinel.timeout)]
expected_ret_val = fake_data
if not exceeded_max_size:
expected_calls.append(
mock.call(timeout=mock.sentinel.burst_timeout,
continue_on_timeout=False))
expected_ret_val += fake_data
mock_get.assert_has_calls(expected_calls)
self.assertEqual(expected_ret_val, ret_val)
def test_get_burst(self):
self._test_get_burst()
def test_get_burst_exceeded_size(self):
self._test_get_burst(exceeded_max_size=True)
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import json
from mcfw.properties import bool_property, unicode_property, long_property, typed_property, unicode_list_property, \
long_list_property, float_property
from rogerthat.models.apps import LookAndFeelServiceRoles, AppLookAndFeel, ToolbarSettings, HomescreenSettings, \
NavigationItem, ColorSettings
from rogerthat.models.properties.app import AutoConnectedService
from rogerthat.models.properties.friend import FriendDetail
from rogerthat.models.properties.oauth import OAuthSettings
from rogerthat.rpc import users
from rogerthat.to import TO
from rogerthat.utils.app import get_human_user_from_app_user
from rogerthat.utils.crypto import sha256_hex
from rogerthat.utils.service import create_service_identity_user
class AppInfoTO(object):
id = unicode_property('0')
name = unicode_property('1')
ios_appstore_url = unicode_property('2')
android_playstore_url = unicode_property('3')
@staticmethod
def fromModel(model):
app = AppInfoTO()
app.id = model.app_id
app.name = model.name
app.ios_appstore_url = model.ios_appstore_web_uri
app.android_playstore_url = model.android_market_android_uri
return app
class AppQRTemplateTO(object):
key_name = unicode_property('1')
is_default = bool_property('2')
description = unicode_property('3')
body_color = unicode_property('4')
def __init__(self, key_name=None, is_default=False, description=None, body_color=None):
self.key_name = key_name
self.is_default = is_default
self.description = description
self.body_color = body_color
@classmethod
def from_model(cls, model, is_default=False):
"""
Args:
model (rogerthat.models.QRTemplate)
is_default (bool)
"""
rgb = u''.join([('%X' % c).rjust(2, '0') for c in model.body_color])
return cls(model.key().name(), is_default, model.description, rgb)
class CreateAppQRTemplateTO(AppQRTemplateTO):
file = unicode_property('5')
class AppTO(object):
id = unicode_property('0')
name = unicode_property('1')
type = long_property('2')
core_branding_hash = unicode_property('3')
facebook_app_id = long_property('4')
facebook_app_secret = unicode_property('5')
ios_app_id = unicode_property('6')
android_app_id = unicode_property('7')
creation_time = long_property('8')
auto_connected_services = typed_property('9', AutoConnectedService, True)
is_default = bool_property('10')
user_regex = unicode_property('11')
dashboard_email_address = unicode_property('12')
admin_services = unicode_list_property('13')
demo = bool_property('17')
beta = bool_property('18')
chat_enabled = bool_property('19')
mdp_client_id = unicode_property('20')
mdp_client_secret = unicode_property('21')
contact_email_address = unicode_property('22')
secure = bool_property('23')
owncloud_base_uri = unicode_property('24')
owncloud_admin_username = unicode_property('25')
owncloud_admin_password = unicode_property('26')
main_service = unicode_property('27')
embedded_apps = unicode_list_property('28')
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.App)
"""
app = cls()
app.id = model.app_id
app.name = model.name
app.type = model.type
app.main_service = model.main_service
app.core_branding_hash = model.core_branding_hash
app.facebook_app_id = model.facebook_app_id
app.facebook_app_secret = model.facebook_app_secret
app.ios_app_id = model.ios_app_id
app.android_app_id = model.android_app_id
app.creation_time = model.creation_time
if model.auto_connected_services:
app.auto_connected_services = list(model.auto_connected_services)
else:
app.auto_connected_services = []
app.is_default = model.is_default
app.user_regex = model.user_regex
app.dashboard_email_address = model.dashboard_email_address
app.admin_services = model.admin_services
app.demo = model.demo
app.beta = model.beta
app.secure = model.secure
app.chat_enabled = model.chat_enabled
app.mdp_client_id = model.mdp_client_id
app.mdp_client_secret = model.mdp_client_secret
app.contact_email_address = model.contact_email_address
app.owncloud_base_uri = model.owncloud_base_uri
app.owncloud_admin_username = model.owncloud_admin_username
app.owncloud_admin_password = model.owncloud_admin_password
app.embedded_apps = model.embedded_apps if model.embedded_apps else []
return app
class CreateAppTO(object):
app_id = unicode_property('1')
name = unicode_property('2')
type = long_property('3')
dashboard_email_address = unicode_property('4')
auto_added_services = unicode_list_property('5', default=[])
def __init__(self, _id=None, name=None, _type=None, auto_added_services=None):
self.app_id = _id
self.name = name
self.type = _type
if auto_added_services:
self.auto_added_services = auto_added_services
class AppUserRelationTO(object):
email = unicode_property('1')
name = unicode_property('2')
type = unicode_property('3') # human / application
def __init__(self, email, name, type_):
self.email = email
self.name = name
self.type = type_
class AppUserTO(object):
email = unicode_property('1')
name = unicode_property('2')
relations = typed_property('3', AppUserRelationTO, True)
def __init__(self, user_profile, friendMap):
self.email = get_human_user_from_app_user(user_profile.user).email()
self.name = user_profile.name
self.relations = list()
if friendMap:
for f in friendMap.friendDetails:
if f.existence != FriendDetail.FRIEND_EXISTENCE_ACTIVE:
continue
self.relations.append(AppUserRelationTO(f.email, f.name,
u"human" if f.type == FriendDetail.TYPE_USER else u"application"))
class AppUserListResultTO(object):
cursor = unicode_property('1')
users = typed_property('2', AppUserTO, True)
class AppSettingsTO(object):
wifi_only_downloads = bool_property('1')
background_fetch_timestamps = long_list_property('2')
oauth = typed_property('3', OAuthSettings, False)
birthday_message_enabled = bool_property('4')
birthday_message = unicode_property('5')
def __init__(self, wifi_only_downloads=None, background_fetch_timestamps=None, oauth=None,
birthday_message_enabled=False, birthday_message=None):
if background_fetch_timestamps is None:
background_fetch_timestamps = []
self.wifi_only_downloads = wifi_only_downloads
self.background_fetch_timestamps = background_fetch_timestamps
self.oauth = oauth
self.birthday_message_enabled = birthday_message_enabled
self.birthday_message = birthday_message
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.AppSettings)
"""
return cls(model.wifi_only_downloads, model.background_fetch_timestamps, model.oauth,
model.birthday_message_enabled, model.birthday_message)
# This object is sent to the phones
class AppAssetTO(object):
kind = unicode_property('1')
url = unicode_property('2')
scale_x = float_property('3')
def __init__(self, kind=None, url=None, scale_x=0.0):
self.kind = kind
self.url = url
self.scale_x = scale_x
# This object is used for managing app assets
class AppAssetFullTO(AppAssetTO):
id = unicode_property('9')
app_ids = unicode_list_property('10')
content_type = unicode_property('11')
is_default = bool_property('12')
def __init__(self, key=None, kind=None, url=None, scale_x=None, app_ids=None, uploaded_on=None, modified_on=None,
content_type=None, is_default=False):
super(AppAssetFullTO, self).__init__(kind, url, scale_x)
self.id = unicode(key)
self.app_ids = app_ids
self.uploaded_on = uploaded_on
self.modified_on = modified_on
self.content_type = content_type
self.is_default = is_default
@classmethod
def from_model(cls, asset):
"""
Args:
asset (rogerthat.models.apps.AppAsset)
"""
return cls(asset.key.id(), asset.asset_type, asset.serving_url, asset.scale_x, asset.app_ids, asset.uploaded_on,
asset.modified_on, asset.content_type, asset.is_default)
class DefaultBrandingTO(object):
id = unicode_property('1')
branding = unicode_property('2')
app_ids = unicode_list_property('3')
branding_type = unicode_property('4')
is_default = bool_property('5')
def __init__(self, key=None, branding=None, app_ids=None, branding_type=None, is_default=False):
self.id = unicode(key)
self.branding = branding
self.app_ids = app_ids
self.branding_type = branding_type
self.is_default = is_default
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.apps.DefaultBranding)
"""
return cls(model.key.id(), model.branding, model.app_ids, model.branding_type, model.is_default)
class NavigationItemTO(object):
# for these types the 'action' needs to be hashed when sent to the user
HASHED_ACTION_TYPES = ('action', 'click')
action_type = unicode_property('1') # null, action, click, cordova
# None means opening an activity
# action means listing all services with that action and opening that action when clicked
# click means clicking on a service menu item (linked to service_email).
# If service_email is None -> the main service email is used
# (action and click should be the hashed tag of the service menu item)
action = unicode_property('2') # news, messages, ...
icon = unicode_property('3') # font-awesome icon name
icon_color = unicode_property('4')
text = unicode_property('5') # translation key
# deprecated, should be included in params insteaad
collapse = bool_property('6', default=False)
service_email = unicode_property('7')
# json string, KeyValueTO will only support string values
params = unicode_property('8', default=None)
def __init__(self, action_type=None, action=None, icon=None, icon_color=None, text=None, collapse=False,
service_email=None, params=None):
self.action_type = action_type
self.action = action
self.icon = icon
self.icon_color = icon_color
self.text = text
self.collapse = collapse
self.service_email = service_email
self.params = params
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.apps.NavigationItem)
"""
collapse = model.collapse or model.params.get('collapse', False) if model.params else False
return cls(model.action_type, model.action, model.icon, model.icon_color, model.text, collapse,
model.service_email, unicode(json.dumps(model.params or {})))
def to_model(self):
return NavigationItem(
action_type=self.action_type,
action=self.action,
icon=self.icon,
icon_color=self.icon_color,
text=self.text,
service_email=self.service_email,
params=json.loads(self.params) if self.params else None
)
class ColorSettingsTO(object):
primary_color = unicode_property('1')
primary_color_dark = unicode_property('2')
# Unused but released in iOS in some apps so we have to keep this
secondary_color = unicode_property('3', default=None)
primary_icon_color = unicode_property('4')
tint_color = unicode_property('5')
def __init__(self, primary_color=None, primary_color_dark=None, primary_icon_color=None,
tint_color=None):
self.primary_color = primary_color
self.primary_color_dark = primary_color_dark
self.secondary_color = None
self.primary_icon_color = primary_icon_color
self.tint_color = tint_color
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.apps.ColorSettings)
"""
return cls(model.primary_color, model.primary_color_dark, model.primary_icon_color, model.tint_color)
def to_model(self):
return ColorSettings(
primary_color=self.primary_color,
primary_color_dark=self.primary_color_dark,
primary_icon_color=self.primary_icon_color,
tint_color=self.tint_color
)
class HomeScreenSettingsTO(object):
STYLE_NEWS = u'news'
STYLE_MESSAGES = u'messages'
color = unicode_property('1')
items = typed_property('2', NavigationItemTO, True)
style = unicode_property('3')
header_image_url = unicode_property('4')
def __init__(self, color=None, items=None, style=None, header_image_url=None):
self.color = color
self.items = items if items else []
self.style = style
self.header_image_url = header_image_url
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.apps.HomescreenSettings)
"""
return cls(model.color, [NavigationItemTO.from_model(item) for item in model.items], model.style,
model.header_image_url)
def to_model(self):
return HomescreenSettings(
color=self.color,
items=[item.to_model() for item in self.items],
style=self.style,
header_image_url=self.header_image_url
)
class ToolbarSettingsTO(object):
items = typed_property('1', NavigationItemTO, True) # type: list of NavigationItemTO
def __init__(self, items=None):
self.items = items if items else []
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.apps.ToolbarSettings)
"""
return cls([NavigationItemTO.from_model(item) for item in model.items])
def to_model(self):
return ToolbarSettings(
items=[item.to_model() for item in self.items]
)
class LookAndFeelTO(object):
colors = typed_property('1', ColorSettingsTO, False)
homescreen = typed_property('2', HomeScreenSettingsTO, False)
toolbar = typed_property('3', ToolbarSettingsTO, False)
def __init__(self, colors=None, homescreen=None, toolbar=None):
self.colors = colors
self.homescreen = homescreen
self.toolbar = toolbar
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.apps.AppLookAndFeel)
Returns:
"""
colors = ColorSettingsTO.from_model(model.colors)
homescreen = HomeScreenSettingsTO.from_model(model.homescreen)
toolbar = ToolbarSettingsTO.from_model(model.toolbar)
for ni in homescreen.items + toolbar.items:
if ni.action_type in NavigationItemTO.HASHED_ACTION_TYPES:
ni.action = sha256_hex(ni.action).decode('utf8')
return cls(colors, homescreen, toolbar)
class LookAndFeelServiceRolesTO(object):
role_ids = long_list_property('1')
service_email = unicode_property('2')
service_identity = unicode_property('3')
def __init__(self, role_ids=None, service_email=None, service_identity=None):
self.role_ids = role_ids if role_ids else []
self.service_email = service_email
self.service_identity = service_identity
def to_model(self):
service_identity_user = create_service_identity_user(users.User(self.service_email), self.service_identity)
return LookAndFeelServiceRoles(
role_ids=self.role_ids,
service_email=service_identity_user.email()
)
@classmethod
def from_model(cls, model):
"""
Args:
model (LookAndFeelServiceRoles)
"""
service_user, service_identity, = model.service_identity_tuple
return cls(model.role_ids, service_user.email(), service_identity)
class AppLookAndFeelTO(LookAndFeelTO):
id = long_property('50')
app_id = unicode_property('51')
roles = typed_property('52', LookAndFeelServiceRolesTO, True)
def __init__(self, role_id=None, colors=None, homescreen=None, toolbar=None, app_id=None, roles=None):
self.id = role_id
self.app_id = app_id
self.roles = roles if roles else []
super(AppLookAndFeelTO, self).__init__(colors, homescreen, toolbar)
@classmethod
def from_model(cls, model):
"""
Args:
model (AppLookAndFeel)
"""
colors = ColorSettingsTO.from_model(model.colors)
homescreen = HomeScreenSettingsTO.from_model(model.homescreen)
toolbar = ToolbarSettingsTO.from_model(model.toolbar)
app_id = model.app_id
roles = [LookAndFeelServiceRolesTO.from_model(role) for role in model.roles] if model.roles else []
return cls(model.id, colors, homescreen, toolbar, app_id, roles)
def to_model(self):
return AppLookAndFeel(
app_id=self.app_id,
colors=self.colors.to_model(),
homescreen=self.homescreen.to_model(),
toolbar=self.toolbar.to_model(),
roles=[role.to_model() for role in self.roles]
)
class PutLoyaltyUserResultTO(object):
url = unicode_property('1')
email = unicode_property('2')
app_id = unicode_property('3')
class GetAppAssetRequestTO(object):
kind = unicode_property('1')
class GetAppAssetResponseTO(AppAssetTO):
pass
class UpdateAppAssetRequestTO(AppAssetTO):
pass
class UpdateAppAssetResponseTO(object):
pass
class UpdateLookAndFeelResponseTO(object):
pass
class UpdateLookAndFeelRequestTO(object):
look_and_feel = typed_property('1', LookAndFeelTO, False)
def __init__(self, look_and_feel=None):
self.look_and_feel = look_and_feel
class AppTranslationTO(object):
key = unicode_property('key')
value = unicode_property('value')
def __init__(self, key, value):
self.key = key
self.value = value
class CreateEmbeddedApplicationTO(TO):
name = unicode_property('name')
file = unicode_property('file')
tags = unicode_list_property('tags')
url_regexes = unicode_list_property('url_regexes', default=[])
class UpdateEmbeddedApplicationTO(CreateEmbeddedApplicationTO):
pass
# For requests to the app
class EmbeddedAppTO(TO):
name = unicode_property('name')
serving_url = unicode_property('serving_url')
version = long_property('version')
url_regexes = unicode_list_property('url_regexes', default=[])
class GetEmbeddedAppsResponseTO(TO):
embedded_apps = typed_property('embedded_apps', EmbeddedAppTO, True)
class GetEmbeddedAppsRequestTO(TO):
pass
class GetEmbeddedAppResponseTO(EmbeddedAppTO):
pass
class GetEmbeddedAppRequestTO(TO):
name = unicode_property('name')
class UpdateEmbeddedAppRequestTO(EmbeddedAppTO):
pass
class UpdateEmbeddedAppResponseTO(TO):
pass
class UpdateEmbeddedAppsRequestTO(GetEmbeddedAppsResponseTO):
pass
class UpdateEmbeddedAppsResponseTO(TO):
pass
|
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
"""
Convenience functions
=====================
The functions in this module provide shortcuts for some of the most basic
operations on FITS files, such as reading and updating the header. They are
included directly in the 'astropy.io.fits' namespace so that they can be used
like::
astropy.io.fits.getheader(...)
These functions are primarily for convenience when working with FITS files in
the command-line interpreter. If performing several operations on the same
file, such as in a script, it is better to *not* use these functions, as each
one must open and re-parse the file. In such cases it is better to use
:func:`astropy.io.fits.open` and work directly with the
:class:`astropy.io.fits.HDUList` object and underlying HDU objects.
Several of the convenience functions, such as `getheader` and `getdata` support
special arguments for selecting which extension HDU to use when working with a
multi-extension FITS file. There are a few supported argument formats for
selecting the extension. See the documentation for `getdata` for an
explanation of all the different formats.
.. warning::
All arguments to convenience functions other than the filename that are
*not* for selecting the extension HDU should be passed in as keyword
arguments. This is to avoid ambiguity and conflicts with the
extension arguments. For example, to set NAXIS=1 on the Primary HDU:
Wrong::
astropy.io.fits.setval('myimage.fits', 'NAXIS', 1)
The above example will try to set the NAXIS value on the first extension
HDU to blank. That is, the argument '1' is assumed to specify an extension
HDU.
Right::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1)
This will set the NAXIS keyword to 1 on the primary HDU (the default). To
specify the first extension HDU use::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1)
This complexity arises out of the attempt to simultaneously support
multiple argument formats that were used in past versions of PyFITS.
Unfortunately, it is not possible to support all formats without
introducing some ambiguity. A future Astropy release may standardize
around a single format and officially deprecate the other formats.
"""
import operator
import os
import warnings
import numpy as np
from .diff import FITSDiff, HDUDiff
from .file import FILE_MODES, _File
from .hdu.base import _BaseHDU, _ValidHDU
from .hdu.hdulist import fitsopen, HDUList
from .hdu.image import PrimaryHDU, ImageHDU
from .hdu.table import BinTableHDU
from .header import Header
from .util import fileobj_closed, fileobj_name, fileobj_mode, _is_int
from ...units import Unit
from ...units.format.fits import UnitScaleError
from ...units import Quantity
from ...extern import six
from ...extern.six import string_types
from ...utils.exceptions import AstropyUserWarning
from ...utils.decorators import deprecated_renamed_argument
__all__ = ['getheader', 'getdata', 'getval', 'setval', 'delval', 'writeto',
'append', 'update', 'info', 'tabledump', 'tableload',
'table_to_hdu', 'printdiff']
def getheader(filename, *args, **kwargs):
"""
Get the header from an extension of a FITS file.
Parameters
----------
filename : file path, file object, or file like object
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for extension specification. See the
`getdata` documentation for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header
def getdata(filename, *args, **kwargs):
"""
Get the data from an extension of a FITS file (and optionally the
header).
Parameters
----------
filename : file path, file object, or file like object
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for extension specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary header::
getdata('in.fits')
By extension number::
getdata('in.fits', 0) # the primary header
getdata('in.fits', 2) # the second extension
getdata('in.fits', ext=2) # the second extension
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : array, record array or groups data object
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
"""
mode, closed = _get_file_mode(filename)
header = kwargs.pop('header', None)
lower = kwargs.pop('lower', None)
upper = kwargs.pop('upper', None)
view = kwargs.pop('view', None)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None and extidx == 0:
try:
hdu = hdulist[1]
data = hdu.data
except IndexError:
raise IndexError('No data in this HDU.')
if data is None:
raise IndexError('No data in this HDU.')
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller('lower')
elif upper:
trans = operator.methodcaller('upper')
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == '':
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data
def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword]
def setval(filename, keyword, *args, **kwargs):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
value = kwargs.pop('value', None)
comment = kwargs.pop('comment', None)
before = kwargs.pop('before', None)
after = kwargs.pop('after', None)
savecomment = kwargs.pop('savecomment', False)
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed)
def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed)
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(filename, data, header=None, output_verify='exception',
overwrite=False, checksum=False):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : file path, file object, or file like object
File to write to. If opened, must be opened in a writeable binary
mode such as 'wb' or 'ab+'.
data : array, record array, or groups data object
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify`
for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` (``IOError`` for Python 2) if ``False`` and the
output file exists. Default is ``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(filename, overwrite=overwrite, output_verify=output_verify,
checksum=checksum)
def table_to_hdu(table):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .connect import is_column_keyword, REMOVE_KEYWORDS
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from ...table.column import BaseColumn
# Only those columns which are instances of BaseColumn or Quantity can be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError('cannot write table with mixin column(s) {0}'
.format(unsupported_names))
# Create a new HDU object
if table.masked:
#float column's default mask value needs to be Nan
for column in six.itervalues(table.columns):
fill_value = column.get_fill_value()
if column.dtype.kind == 'f' and np.allclose(fill_value, 1e20):
column.set_fill_value(np.nan)
table_hdu = BinTableHDU.from_columns(np.array(table.filled()))
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# in FITS (if at all possible)
int_formats = ('B', 'I', 'J', 'K')
if not (col.format in int_formats or
col.format.p_format in int_formats):
continue
# The astype is necessary because if the string column is less
# than one character, the fill value will be N/A by default which
# is too long, and so no values will get masked.
fill_value = table[col.name].get_fill_value()
col.null = fill_value.astype(table[col.name].dtype)
else:
table_hdu = BinTableHDU.from_columns(np.array(table.filled()))
# Set units for output HDU
for col in table_hdu.columns:
unit = table[col.name].unit
if unit is not None:
try:
col.unit = unit.to_string(format='fits')
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
"The column '{0}' could not be stored in FITS format "
"because it has a scale '({1})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units.".format(col.name, str(scale)))
except ValueError:
warnings.warn(
"The unit '{0}' could not be saved to FITS format".format(
unit.to_string()), AstropyUserWarning)
# Try creating a Unit to issue a warning if the unit is not FITS compliant
Unit(col.unit, format='fits', parse_strict='warn')
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
"Meta-data keyword {0} will be ignored since it conflicts "
"with a FITS reserved keyword".format(key), AstropyUserWarning)
# Convert to FITS format
if key == 'comments':
key = 'comment'
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be added to "
"FITS Header - skipping".format(key, type(value)),
AstropyUserWarning)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be added to FITS "
"Header - skipping".format(key, type(value)),
AstropyUserWarning)
return table_hdu
def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : file path, file object, or file like object
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, table, or group data object
the new data used for appending
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode='append')
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode='append')
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close()
def update(filename, data, *args, **kwargs):
"""
Update the specified extension with the input data/header.
Parameters
----------
filename : file path, file object, or file like object
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, table, or group data object
the new data used for updating
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
extension specification(s). Header and extension specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension
update(file, dat, hdr, 3) # update the 3rd extension
update(file, dat, 'sci', 2) # update the 2nd SCI extension
update(file, dat, 3, header=hdr) # update the 3rd extension
update(file, dat, header=hdr, ext=5) # update the 5th extension
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop('header', header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, 'update', *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed)
def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each extension.
Parameters
----------
filename : file path, file object, or file like object
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default='readonly')
# Set the default value for the ignore_missing_end parameter
if 'ignore_missing_end' not in kwargs:
kwargs['ignore_missing_end'] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret
def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for extension specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By extension number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {key: kwargs.pop(key) for key in ['ext', 'extname', 'extver']
if key in kwargs}
has_extensions = args or extension
if isinstance(inputa, string_types) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an "
"HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError("Extension specification with HDUList "
"objects not implemented.")
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1,
overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : file path, file object or file-like object
Input fits file.
datafile : file path, file object or file-like object, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : file path, file object or file-like object, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : file path, file object or file-like object, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` (``IOError`` for Python 2) if ``False`` and the
output file exists. Default is ``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default='readonly')
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
# TODO: Really need to provide a better way to access the name of
# any files underlying an HDU
root, tail = os.path.splitext(f._HDUList__file.name)
datafile = root + '_' + repr(ext) + '.txt'
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close()
if isinstance(tabledump.__doc__, string_types):
tabledump.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : file path, file object or file-like object
Input data file containing the table data in ASCII format.
cdfile : file path, file object or file-like object
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : file path, file object or file-like object, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
if isinstance(tableload.__doc__, string_types):
tableload.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def _getext(filename, mode, *args, **kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
ext = kwargs.pop('ext', None)
extname = kwargs.pop('extname', None)
extver = kwargs.pop('extver', None)
err_msg = ('Redundant/conflicting extension arguments(s): {}'.format(
{'args': args, 'ext': ext, 'extname': extname,
'extver': extver}))
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], string_types):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError('Too many positional arguments.')
if (ext is not None and
not (_is_int(ext) or
(isinstance(ext, tuple) and len(ext) == 2 and
isinstance(ext[0], string_types) and _is_int(ext[1])))):
raise ValueError(
'The ext keyword must be either an extension number '
'(zero-indexed) or a (extname, extver) tuple.')
if extname is not None and not isinstance(extname, string_types):
raise ValueError('The extname argument must be a string.')
if extver is not None and not _is_int(extver):
raise ValueError('The extver argument must be an integer.')
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError('extver alone cannot specify an extension.')
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext
def _makehdu(data, header):
if header is None:
header = Header()
hdu = _BaseHDU(data, header)
if hdu.__class__ in (_BaseHDU, _ValidHDU):
# The HDU type was unrecognized, possibly due to a
# nonexistent/incomplete header
if ((isinstance(data, np.ndarray) and data.dtype.fields is not None) or
isinstance(data, np.recarray)):
hdu = BinTableHDU(data, header=header)
elif isinstance(data, np.ndarray):
hdu = ImageHDU(data, header=header)
else:
raise KeyError('Data must be a numpy array.')
return hdu
def _stat_filename_or_fileobj(filename):
closed = fileobj_closed(filename)
name = fileobj_name(filename) or ''
try:
loc = filename.tell()
except AttributeError:
loc = 0
noexist_or_empty = ((name and
(not os.path.exists(name) or
(os.path.getsize(name) == 0)))
or (not name and loc == 0))
return name, closed, noexist_or_empty
def _get_file_mode(filename, default='readonly'):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise IOError(
"File mode of the input file object ({!r}) cannot be used to "
"read/write FITS files.".format(fmode))
return mode, closed
|
|
"""The test for the data filter sensor platform."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.components.filter.sensor import (
LowPassFilter, OutlierFilter, ThrottleFilter, TimeSMAFilter,
RangeFilter, TimeThrottleFilter)
import homeassistant.util.dt as dt_util
from homeassistant.setup import setup_component
import homeassistant.core as ha
from tests.common import (get_test_home_assistant, assert_setup_component,
init_recorder_component)
class TestFilterSensor(unittest.TestCase):
"""Test the Data Filter sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
raw_values = [20, 19, 18, 21, 22, 0]
self.values = []
timestamp = dt_util.utcnow()
for val in raw_values:
self.values.append(ha.State('sensor.test_monitored',
val, last_updated=timestamp))
timestamp += timedelta(minutes=1)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
def test_setup_fail(self):
"""Test if filter doesn't exist."""
config = {
'sensor': {
'platform': 'filter',
'entity_id': 'sensor.test_monitored',
'filters': [{'filter': 'nonexisting'}]
}
}
with assert_setup_component(0):
assert setup_component(self.hass, 'sensor', config)
def test_chain(self):
"""Test if filter chaining works."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'filter',
'name': 'test',
'entity_id': 'sensor.test_monitored',
'filters': [{
'filter': 'outlier',
'window_size': 10,
'radius': 4.0
}, {
'filter': 'lowpass',
'time_constant': 10,
'precision': 2
}, {
'filter': 'throttle',
'window_size': 1
}]
}
}
t_0 = dt_util.utcnow() - timedelta(minutes=1)
t_1 = dt_util.utcnow() - timedelta(minutes=2)
t_2 = dt_util.utcnow() - timedelta(minutes=3)
fake_states = {
'sensor.test_monitored': [
ha.State('sensor.test_monitored', 18.0, last_changed=t_0),
ha.State('sensor.test_monitored', 19.0, last_changed=t_1),
ha.State('sensor.test_monitored', 18.2, last_changed=t_2),
]
}
with patch('homeassistant.components.history.'
'state_changes_during_period', return_value=fake_states):
with patch('homeassistant.components.history.'
'get_last_state_changes', return_value=fake_states):
with assert_setup_component(1, 'sensor'):
assert setup_component(self.hass, 'sensor', config)
for value in self.values:
self.hass.states.set(
config['sensor']['entity_id'], value.state)
self.hass.block_till_done()
state = self.hass.states.get('sensor.test')
assert '17.05' == state.state
def test_outlier(self):
"""Test if outlier filter works."""
filt = OutlierFilter(window_size=3,
precision=2,
entity=None,
radius=4.0)
for state in self.values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_outlier_step(self):
"""
Test step-change handling in outlier.
Test if outlier filter handles long-running step-changes correctly.
It should converge to no longer filter once just over half the
window_size is occupied by the new post step-change values.
"""
filt = OutlierFilter(window_size=3,
precision=2,
entity=None,
radius=1.1)
self.values[-1].state = 22
for state in self.values:
filtered = filt.filter_state(state)
assert 22 == filtered.state
def test_initial_outlier(self):
"""Test issue #13363."""
filt = OutlierFilter(window_size=3,
precision=2,
entity=None,
radius=4.0)
out = ha.State('sensor.test_monitored', 4000)
for state in [out]+self.values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_lowpass(self):
"""Test if lowpass filter works."""
filt = LowPassFilter(window_size=10,
precision=2,
entity=None,
time_constant=10)
for state in self.values:
filtered = filt.filter_state(state)
assert 18.05 == filtered.state
def test_range(self):
"""Test if range filter works."""
lower = 10
upper = 20
filt = RangeFilter(entity=None,
lower_bound=lower,
upper_bound=upper)
for unf_state in self.values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_range_zero(self):
"""Test if range filter works with zeroes as bounds."""
lower = 0
upper = 0
filt = RangeFilter(entity=None,
lower_bound=lower,
upper_bound=upper)
for unf_state in self.values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_throttle(self):
"""Test if lowpass filter works."""
filt = ThrottleFilter(window_size=3,
precision=2,
entity=None)
filtered = []
for state in self.values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 21] == [f.state for f in filtered]
def test_time_throttle(self):
"""Test if lowpass filter works."""
filt = TimeThrottleFilter(window_size=timedelta(minutes=2),
precision=2,
entity=None)
filtered = []
for state in self.values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 18, 22] == [f.state for f in filtered]
def test_time_sma(self):
"""Test if time_sma filter works."""
filt = TimeSMAFilter(window_size=timedelta(minutes=2),
precision=2,
entity=None,
type='last')
for state in self.values:
filtered = filt.filter_state(state)
assert 21.5 == filtered.state
|
|
# -*- coding: utf-8 -*-
"""
Contains class that orchestrates processing
"""
import re
import json
from os import listdir, stat, rename
from os.path import join, expanduser, isfile
from collections import OrderedDict
import tracktotrip as tt
from tracktotrip.utils import pairwise, estimate_meters_to_deg
from tracktotrip.location import infer_location
from tracktotrip.classifier import Classifier
from tracktotrip.learn_trip import learn_trip, complete_trip
from tracktotrip.transportation_mode import learn_transportation_mode, classify
from processmysteps import db
from .life import Life
from .default_config import CONFIG
def inside(to_find, modes):
for elm in to_find:
if elm.lower() in modes:
return elm.lower()
return None
def gte_time(small, big):
if small.hour < big.hour:
return True
elif small.hour == big.hour and small.minute <= big.minute:
return True
else:
return False
def is_time_between(lower, time, upper):
return gte_time(lower, time) and gte_time(time, upper)
def find_index_point(track, time):
for j, segment in enumerate(track.segments):
i = 0
for p_a, p_b in pairwise(segment.points):
if is_time_between(p_a.time, time, p_b.time):
return (j, i)
i = i + 1
return None, None
def apply_transportation_mode_to(track, life_content, transportation_modes):
life = Life()
life.from_string(life_content.encode('utf8').split('\n'))
for segment in track.segments:
segment.transportation_modes = []
for day in life.days:
for span in day.spans:
has = inside(span.tags, transportation_modes)
if has:
start_time = db.span_date_to_datetime(span.day, span.start)
end_time = db.span_date_to_datetime(span.day, span.end)
start_segment, start_index = find_index_point(track, start_time)
end_segment, end_index = find_index_point(track, end_time)
if start_segment is not None:
if end_index is None or end_segment != start_segment:
end_index = len(track.segments[start_segment].points) - 1
track.segments[start_segment].transportation_modes.append({
'label': has,
'from': start_index,
'to': end_index
})
def save_to_file(path, content, mode="w"):
""" Saves content to file
Args:
path (str): filepath, including filename
content (str): content to write to file
mode (str, optional): mode to write, defaults to w
"""
with open(path, mode) as dest_file:
dest_file.write(content.encode('utf-8'))
TIME_RX = re.compile(r'\<time\>([^\<]+)\<\/time\>')
def predict_start_date(filename):
""" Predicts the start date of a GPX file
Reads the first valid date, by matching TIME_RX regular expression
Args:
filename (str): file path
Returns:
:obj:`datetime.datetime`
"""
with open(filename, 'r') as opened_file:
result = TIME_RX.search(opened_file.read())
return tt.utils.isostr_to_datetime(result.group(1))
def file_details(base_path, filepath):
""" Returns file details
Example:
>>> file_details('/users/username/tracks/', '25072016.gpx')
{
'name': '25072016.gpx',
'path': '/users/username/tracks/25072016.gpx',
'size': 39083,
'start': <datetime.datetime>,
'date': '2016-07-25t07:40:52z'
}
Args:
base_path (str): Base path
filename (str): Filename
Returns:
:obj:`dict`: See example
"""
complete_path = join(base_path, filepath)
(_, _, _, _, _, _, size, _, _, _) = stat(complete_path)
date = predict_start_date(complete_path)
return {
'name': filepath,
'path': complete_path,
'size': size,
'start': date,
'date': date.date().isoformat()
}
def update_dict(target, updater):
""" Updates a dictionary, keeping the same structure
Args:
target (:obj:`dict`): dictionary to update
updater (:obj:`dict`): dictionary with the new information
"""
target_keys = target.keys()
for key in updater.keys():
if key in target_keys:
if isinstance(target[key], dict):
update_dict(target[key], updater[key])
else:
target[key] = updater[key]
class Step(object):
""" Step enumeration
"""
preview = 0
adjust = 1
annotate = 2
done = -1
_len = 3
@staticmethod
def next(current):
""" Advances from one step to the next
Args:
current (int): Current step
Returns:
int: next step
"""
return (current + 1) % Step._len
@staticmethod
def prev(current):
""" Backs one step
Args:
current (int): Current step
Returns:
int: previous step
"""
return (current - 1) % Step._len
class ProcessingManager(object):
""" Manages the processing phases
Arguments:
queue: Array of strings, with the files to be
processed. Doesn't include the current file
currentFile: String with the current file being
processed
history: Array of TrackToTrip.Track. Must always
have length greater or equal to ONE. The
last element is the current state of the system
INPUT_PATH: String with the path to the input folder
BACKUP_PATH: String with the path to the backup folder
OUTPUT_PATH: String with the path to the output folder
LIFE_PATH: String with the path to the LIFE output
folder
"""
def __init__(self, config_file):
self.config = dict(CONFIG)
if config_file and isfile(expanduser(config_file)):
with open(expanduser(config_file), 'r') as config_file:
config = json.loads(config_file.read())
update_dict(self.config, config)
clf_path = self.config['transportation']['classifier_path']
if clf_path:
self.clf = Classifier.load_from_file(open(expanduser(clf_path), 'rb'))
else:
self.clf = Classifier()
self.is_bulk_processing = False
self.queue = {}
self.life_queue = []
self.current_step = None
self.history = []
self.current_day = None
self.reset()
def list_gpxs(self):
""" Lists gpx files from the input path, and some details
Result is sorted by start date
See `file_details`
Returns:
:obj:`list` of :obj:`dict`
"""
if not self.config['input_path']:
return []
input_path = expanduser(self.config['input_path'])
files = listdir(input_path)
files = [f for f in files if f.split('.')[-1] == 'gpx']
files = [file_details(input_path, f) for f in files]
files = sorted(files, key=lambda f: f['date'])
return files
def list_lifes(self):
""" Lists life files from the input path, and some details
Returns:
:obj:`list` of :obj:`dict`
"""
if not self.config['input_path']:
return []
input_path = expanduser(self.config['input_path'])
files = listdir(input_path)
files = [f for f in files if f.split('.')[-1] == 'life']
return files
def reset(self):
""" Resets all variables and computes the first step
Returns:
:obj:`ProcessingManager`: self
"""
queue = self.list_gpxs()
if len(queue) > 0:
self.current_step = Step.preview
self.load_days()
else:
self.queue = {}
self.current_day = None
self.current_step = Step.done
self.history = []
return self
def change_day(self, day):
""" Changes current day, and computes first step
Args:
day (:obj:`datetime.date`): Only loads if it's an existing key in queue
"""
if day in self.queue.keys():
key_to_use = day
gpxs_to_use = self.queue[key_to_use]
gpxs_to_use = [tt.Track.from_gpx(gpx['path'])[0] for gpx in gpxs_to_use]
self.current_day = key_to_use
segs = []
for gpx in gpxs_to_use:
segs.extend(gpx.segments)
track = tt.Track('', segments=segs)
track.name = track.generate_name(self.config['trip_name_format'])
self.history = [track]
self.current_step = Step.preview
else:
raise TypeError('Cannot find any track for day: %s' % day)
def reload_queue(self):
""" Reloads the current queue, filling it with the current file's details existing
in the input folder
"""
queue = {}
gpxs = self.list_gpxs()
lifes = self.list_lifes()
for gpx in gpxs:
day = gpx['date']
if day in queue:
queue[day].append(gpx)
else:
queue[day] = [gpx]
self.queue = OrderedDict(sorted(queue.items()))
self.life_queue = lifes
def next_day(self, delete=True):
""" Advances a day (to next existing one)
Args:
delete (bool, optional): True to delete day from queue, NOT from input folder.
Defaults to true
"""
if delete:
del self.queue[self.current_day]
existing_days = list(self.queue.keys())
if self.current_day in existing_days:
index = existing_days.index(self.current_day)
next_day = index if len(existing_days) > index + 1 else 0
existing_days.remove(self.current_day)
else:
next_day = 0
if len(existing_days) > 0:
self.change_day(existing_days[next_day])
else:
self.reset()
def load_days(self):
""" Reloads queue and sets the current day as the oldest one
"""
self.reload_queue()
self.next_day(delete=False)
def restore(self):
""" Backs down a pass
"""
if self.current_step != Step.done and self.current_step != Step.preview:
self.current_step = Step.prev(self.current_step)
self.history.pop()
def process(self, data):
""" Processes the current step
Args:
data (:obj:`dict`): JSON payload received from the client
Returns:
:obj:`tracktotrip.Track`
"""
step = self.current_step
if 'changes' in data.keys():
changes = data['changes']
else:
changes = []
if 'LIFE' in data.keys():
life = data['LIFE']
else:
life = ''
if len(changes) > 0:
track = tt.Track.from_json(data['track'])
self.history[-1] = track
track = self.current_track().copy()
if step == Step.preview:
result = self.preview_to_adjust(track)#, changes)
elif step == Step.adjust:
result = self.adjust_to_annotate(track)
elif step == Step.annotate:
if not life or len(life) == 0:
life = track.to_life()
return self.annotate_to_next(track, life)
else:
return None
if result:
self.current_step = Step.next(self.current_step)
self.history.append(result)
return result
def bulk_process(self):
""" Starts bulk processing all GPXs queued
"""
self.is_bulk_processing = True
lifes = [open(expanduser(join(self.config['input_path'], f)), 'r') for f in self.life_queue]
lifes = u'\n'.join(lifes)
while len(self.queue.values()) > 0:
# preview -> adjust
self.process({'changes': [], 'LIFE': ''})
# adjust -> annotate
self.process({'changes': [], 'LIFE': ''})
# annotate -> store
self.process({'changes': [], 'LIFE': lifes})
self.is_bulk_processing = False
def preview_to_adjust(self, track):
""" Processes a track so that it becomes a trip
More information in `tracktotrip.Track`'s `to_trip` method
Args:
track (:obj:`tracktotrip.Track`)
changes (:obj:`list` of :obj:`dict`): Details of, user made, changes
Returns:
:obj:`tracktotrip.Track`
"""
config = self.config
if not track.name or len(track.name) == 0:
track.name = track.generate_name(config['trip_name_format'])
track.timezone(timezone=float(config['default_timezone']))
track = track.to_trip(
smooth=config['smoothing']['use'],
smooth_strategy=config['smoothing']['algorithm'],
smooth_noise=config['smoothing']['noise'],
seg=config['segmentation']['use'],
seg_eps=config['segmentation']['epsilon'],
seg_min_time=config['segmentation']['min_time'],
simplify=config['simplification']['use'],
simplify_max_dist_error=config['simplification']['max_dist_error'],
simplify_max_speed_error=config['simplification']['max_speed_error']
)
return track
def adjust_to_annotate(self, track):
""" Extracts location and transportation modes
Args:
track (:obj:`tracktotrip.Track`)
Returns:
:obj:`tracktotrip.Track`
"""
config = self.config
c_loc = config['location']
conn, cur = self.db_connect()
def get_locations(point, radius):
""" Gets locations within a radius of a point
See `db.query_locations`
Args:
point (:obj:`tracktotrip.Point`)
radius (float): Radius, in meters
Returns:
:obj:`list` of (str, ?, ?)
"""
if cur:
return db.query_locations(cur, point.lat, point.lon, radius)
else:
return []
track.infer_location(
get_locations,
max_distance=c_loc['max_distance'],
google_key=c_loc['google_key'],
foursquare_client_id=c_loc['foursquare_client_id'],
foursquare_client_secret=c_loc['foursquare_client_secret'],
limit=c_loc['limit']
)
track.infer_transportation_mode(
self.clf,
config['transportation']['min_time']
)
db.dispose(conn, cur)
return track
def db_connect(self):
""" Creates a connection with the database
Use `db.dispose` to commit and close cursor and connection
Returns:
(psycopg2.connection, psycopg2.cursor): Both are None if the connection is invalid
"""
dbc = self.config['db']
conn = db.connect_db(dbc['host'], dbc['name'], dbc['user'], dbc['port'], dbc['pass'])
if conn:
return conn, conn.cursor()
else:
return None, None
def annotate_to_next(self, track, life):
""" Stores the track and dequeues another track to be
processed.
Moves the current GPX file from the input path to the
backup path, creates a LIFE file in the life path
and creates a trip entry in the database. Finally the
trip is exported as a GPX file to the output path.
Args:
track (:obj:tracktotrip.Track`)
changes (:obj:`list` of :obj:`dict`): Details of, user made, changes
"""
if not track.name or len(track.name) == 0:
track.name = track.generate_name(self.config['trip_name_format'])
# Export trip to GPX
if self.config['output_path']:
save_to_file(join(expanduser(self.config['output_path']), track.name), track.to_gpx())
# if not self.is_bulk_processing:
# apply_transportation_mode_to(track, life, set(self.clf.labels.classes_))
# learn_transportation_mode(track, self.clf)
# with open(self.config['transportation']['classifier_path'], 'w') as classifier_file:
# self.clf.save_to_file(classifier_file)
# To LIFE
if self.config['life_path']:
name = '.'.join(track.name.split('.')[:-1])
save_to_file(join(expanduser(self.config['life_path']), name), life)
if self.config['life_all']:
life_all_file = expanduser(self.config['life_all'])
else:
life_all_file = join(expanduser(self.config['life_path']), 'all.life')
save_to_file(life_all_file, "\n\n%s" % life, mode='a+')
conn, cur = self.db_connect()
if conn and cur:
db.load_from_segments_annotated(
cur,
self.current_track(),
life,
self.config['location']['max_distance'],
self.config['location']['min_samples']
)
def insert_can_trip(can_trip, mother_trip_id):
""" Insert a cannonical trip into the database
See `db.insert_canonical_trip`
Args:
can_trip (:obj:`tracktotrip.Segment`): Canonical trip
mother_trip_id (int): Id of the trip that originated the canonical
representation
Returns:
int: Canonical trip id
"""
return db.insert_canonical_trip(cur, can_trip, mother_trip_id)
def update_can_trip(can_id, trip, mother_trip_id):
""" Updates a cannonical trip on the database
See `db.update_canonical_trip`
Args:
can_id (int): Canonical trip id
trip (:obj:`tracktotrip.Segment`): Canonical trip
mother_trip_id (int): Id of the trip that originated the canonical
representation
"""
db.update_canonical_trip(cur, can_id, trip, mother_trip_id)
trips_ids = []
for trip in track.segments:
# To database
trip_id = db.insert_segment(
cur,
trip,
self.config['location']['max_distance'],
self.config['location']['min_samples']
)
trips_ids.append(trip_id)
d_latlon = estimate_meters_to_deg(self.config['location']['max_distance'])
# Build/learn canonical trip
canonical_trips = db.match_canonical_trip(cur, trip, d_latlon)
print "canonical_trips # = %d" % len(canonical_trips)
learn_trip(
trip,
trip_id,
canonical_trips,
insert_can_trip,
update_can_trip,
self.config['simplification']['eps'],
d_latlon
)
# db.insertStays(cur, trip, trips_ids, life)
db.dispose(conn, cur)
# Backup
if self.config['backup_path']:
for gpx in self.queue[self.current_day]:
from_path = gpx['path']
to_path = join(expanduser(self.config['backup_path']), gpx['name'])
rename(from_path, to_path)
self.next_day()
self.current_step = Step.preview
return self.current_track()
def current_track(self):
""" Gets the current trip/track
It includes all trips/tracks of the day
Returns:
:obj:`tracktotrip.Track` or None
"""
if self.current_step is Step.done:
return None
elif len(self.history) > 0:
return self.history[-1]
else:
return None
def current_state(self):
""" Gets the current processing/server state
Returns:
:obj:`dict`
"""
current = self.current_track()
return {
'step': self.current_step,
'queue': list(self.queue.items()),
'track': current.to_json() if current else None,
'life': current.to_life() if current and self.current_step is Step.annotate else '',
'currentDay': self.current_day,
'lifeQueue': self.life_queue
}
def complete_trip(self, from_point, to_point):
""" Generates possible ways to complete a set of trips
Possible completions are only generated between start and end of each pair of
trips (ordered by the starting time)
Args:
data (:obj:`dict`): Requires keys 'from' and 'to', which should countain
point representations with 'lat' and 'lon'.
from_point (:obj:`tracktotrip.Point`): with keys lat and lon
to_point (:obj:`tracktotrip.Point`): with keys lat and lon
Returns:
:obj:`tracktotrip.Track`
"""
distance = estimate_meters_to_deg(self.config['location']['max_distance']) * 2
b_box = (
min(from_point.lat, to_point.lat) - distance,
min(from_point.lon, to_point.lon) - distance,
max(from_point.lat, to_point.lat) + distance,
max(from_point.lon, to_point.lon) + distance
)
canonical_trips = []
conn, cur = self.db_connect()
if conn and cur:
# get matching canonical trips, based on bounding box
canonical_trips = db.match_canonical_trip_bounds(cur, b_box)
print(len(canonical_trips))
db.dispose(conn, cur)
return complete_trip(canonical_trips, from_point, to_point, self.config['location']['max_distance'])
def load_life(self, content):
""" Adds LIFE content to the database
See `db.load_from_life`
Args:
content (str): LIFE formated string
"""
conn, cur = self.db_connect()
if conn and cur:
db.load_from_segments_annotated(
cur,
tt.Track('', []),
content,
self.config['location']['max_distance'],
self.config['location']['min_samples']
)
db.dispose(conn, cur)
def update_config(self, new_config):
update_dict(self.config, new_config)
if self.current_step is Step.done:
self.load_days()
def location_suggestion(self, point):
c_loc = self.config['location']
conn, cur = self.db_connect()
def get_locations(point, radius):
""" Gets locations within a radius of a point
See `db.query_locations`
Args:
point (:obj:`tracktotrip.Point`)
radius (float): Radius, in meters
Returns:
:obj:`list` of (str, ?, ?)
"""
if cur:
return db.query_locations(cur, point.lat, point.lon, radius)
else:
return []
locs = infer_location(
point,
get_locations,
max_distance=c_loc['max_distance'],
google_key=c_loc['google_key'],
foursquare_client_id=c_loc['foursquare_client_id'],
foursquare_client_secret=c_loc['foursquare_client_secret'],
limit=c_loc['limit']
)
db.dispose(conn, cur)
return locs.to_json()
def get_canonical_trips(self):
conn, cur = self.db_connect()
result = []
if conn and cur:
result = db.get_canonical_trips(cur)
for val in result:
val['points'] = val['points'].to_json()
val['points']['id'] = val['id']
db.dispose(conn, cur)
return [r['points'] for r in result]
def get_canonical_locations(self):
conn, cur = self.db_connect()
result = []
if conn and cur:
result = db.get_canonical_locations(cur)
for val in result:
val['points'] = val['points'].to_json()
val['points']['label'] = val['label']
db.dispose(conn, cur)
return [r['points'] for r in result]
def get_transportation_suggestions(self, points):
segment = tt.Segment(points).compute_metrics()
points = segment.points
modes = classify(self.clf, points, self.config['transportation']['min_time'])
return modes['classification']
def remove_day(self, day):
if day in self.queue.keys():
if day == self.current_day:
self.next_day()
else:
del self.queue[day]
|
|
import json
import logging
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.crawler import CrawlerRunner
from scrapy.utils.python import to_unicode
from tests.spiders import FollowAllSpider, DelaySpider, SimpleSpider, \
BrokenStartRequestsSpider, SingleRequestSpider, DuplicateStartRequestsSpider
from tests.mockserver import MockServer
class CrawlTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.runner = CrawlerRunner()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_follow_all(self):
crawler = self.runner.create_crawler(FollowAllSpider)
yield crawler.crawl()
self.assertEqual(len(crawler.spider.urls_visited), 11) # 10 + start_url
@defer.inlineCallbacks
def test_delay(self):
# short to long delays
yield self._test_delay(0.2, False)
yield self._test_delay(1, False)
# randoms
yield self._test_delay(0.2, True)
yield self._test_delay(1, True)
@defer.inlineCallbacks
def _test_delay(self, delay, randomize):
settings = {"DOWNLOAD_DELAY": delay, 'RANDOMIZE_DOWNLOAD_DELAY': randomize}
crawler = CrawlerRunner(settings).create_crawler(FollowAllSpider)
yield crawler.crawl(maxlatency=delay * 2)
t = crawler.spider.times
totaltime = t[-1] - t[0]
avgd = totaltime / (len(t) - 1)
tolerance = 0.6 if randomize else 0.2
self.assertTrue(avgd > delay * (1 - tolerance),
"download delay too small: %s" % avgd)
@defer.inlineCallbacks
def test_timeout_success(self):
crawler = self.runner.create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 > 0)
self.assertTrue(crawler.spider.t2 > crawler.spider.t1)
@defer.inlineCallbacks
def test_timeout_failure(self):
crawler = CrawlerRunner({"DOWNLOAD_TIMEOUT": 0.35}).create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
# server hangs after receiving response headers
yield crawler.crawl(n=0.5, b=1)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
@defer.inlineCallbacks
def test_retry_503(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_failed(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:65432/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_dns_error(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
# try to fetch the homepage of a non-existent domain
yield crawler.crawl("http://dns.resolution.invalid./")
self._assert_retried(l)
@defer.inlineCallbacks
def test_start_requests_bug_before_yield(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_before_yield=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_bug_yielding(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_yielding=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_lazyness(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl()
#self.assertTrue(False, crawler.spider.seedsseen)
#self.assertTrue(crawler.spider.seedsseen.index(None) < crawler.spider.seedsseen.index(99),
# crawler.spider.seedsseen)
@defer.inlineCallbacks
def test_start_requests_dupes(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(DuplicateStartRequestsSpider)
yield crawler.crawl(dont_filter=True, distinct_urls=2, dupe_factor=3)
self.assertEqual(crawler.spider.visited, 6)
yield crawler.crawl(dont_filter=False, distinct_urls=3, dupe_factor=4)
self.assertEqual(crawler.spider.visited, 3)
@defer.inlineCallbacks
def test_unbounded_response(self):
# Completeness of responses without Content-Length or Transfer-Encoding
# can not be determined, we treat them as valid but flagged as "partial"
from six.moves.urllib.parse import urlencode
query = urlencode({'raw': '''\
HTTP/1.1 200 OK
Server: Apache-Coyote/1.1
X-Powered-By: Servlet 2.4; JBoss-4.2.3.GA (build: SVNTag=JBoss_4_2_3_GA date=200807181417)/JBossWeb-2.0
Set-Cookie: JSESSIONID=08515F572832D0E659FD2B0D8031D75F; Path=/
Pragma: no-cache
Expires: Thu, 01 Jan 1970 00:00:00 GMT
Cache-Control: no-cache
Cache-Control: no-store
Content-Type: text/html;charset=UTF-8
Content-Language: en
Date: Tue, 27 Aug 2013 13:05:05 GMT
Connection: close
foo body
with multiples lines
'''})
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/raw?{0}".format(query))
self.assertEqual(str(l).count("Got response 200"), 1)
@defer.inlineCallbacks
def test_retry_conn_lost(self):
# connection lost after receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=0")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_aborted(self):
# connection lost before receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=1")
self._assert_retried(l)
def _assert_retried(self, log):
self.assertEqual(str(log).count("Retrying"), 2)
self.assertEqual(str(log).count("Gave up retrying"), 1)
@defer.inlineCallbacks
def test_referer_header(self):
"""Referer header is set by RefererMiddleware unless it is already set"""
req0 = Request('http://localhost:8998/echo?headers=1&body=0', dont_filter=1)
req1 = req0.replace()
req2 = req0.replace(headers={'Referer': None})
req3 = req0.replace(headers={'Referer': 'http://example.com'})
req0.meta['next'] = req1
req1.meta['next'] = req2
req2.meta['next'] = req3
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed=req0)
# basic asserts in case of weird communication errors
self.assertIn('responses', crawler.spider.meta)
self.assertNotIn('failures', crawler.spider.meta)
# start requests doesn't set Referer header
echo0 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body))
self.assertNotIn('Referer', echo0['headers'])
# following request sets Referer to start request url
echo1 = json.loads(to_unicode(crawler.spider.meta['responses'][1].body))
self.assertEqual(echo1['headers'].get('Referer'), [req0.url])
# next request avoids Referer header
echo2 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body))
self.assertNotIn('Referer', echo2['headers'])
# last request explicitly sets a Referer header
echo3 = json.loads(to_unicode(crawler.spider.meta['responses'][3].body))
self.assertEqual(echo3['headers'].get('Referer'), ['http://example.com'])
@defer.inlineCallbacks
def test_engine_status(self):
from scrapy.utils.engine import get_engine_status
est = []
def cb(response):
est.append(get_engine_status(crawler.engine))
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed='http://localhost:8998/', callback_func=cb)
self.assertEqual(len(est), 1, est)
s = dict(est[0])
self.assertEqual(s['engine.spider.name'], crawler.spider.name)
self.assertEqual(s['len(engine.scraper.slot.active)'], 1)
@defer.inlineCallbacks
def test_graceful_crawl_error_handling(self):
"""
Test whether errors happening anywhere in Crawler.crawl() are properly
reported (and not somehow swallowed) after a graceful engine shutdown.
The errors should not come from within Scrapy's core but from within
spiders/middlewares/etc., e.g. raised in Spider.start_requests(),
SpiderMiddleware.process_start_requests(), etc.
"""
class TestError(Exception):
pass
class FaultySpider(SimpleSpider):
def start_requests(self):
raise TestError
crawler = self.runner.create_crawler(FaultySpider)
yield self.assertFailure(crawler.crawl(), TestError)
self.assertFalse(crawler.crawling)
@defer.inlineCallbacks
def test_open_spider_error_on_faulty_pipeline(self):
settings = {
"ITEM_PIPELINES": {
"tests.pipelines.ZeroDivisionErrorPipeline": 300,
}
}
crawler = CrawlerRunner(settings).create_crawler(SimpleSpider)
yield self.assertFailure(
self.runner.crawl(crawler, "http://localhost:8998/status?n=200"),
ZeroDivisionError)
self.assertFalse(crawler.crawling)
@defer.inlineCallbacks
def test_crawlerrunner_accepts_crawler(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as log:
yield self.runner.crawl(crawler, "http://localhost:8998/status?n=200")
self.assertIn("Got response 200", str(log))
@defer.inlineCallbacks
def test_crawl_multiple(self):
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=200")
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=503")
with LogCapture() as log:
yield self.runner.join()
self._assert_retried(log)
self.assertIn("Got response 200", str(log))
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SHA values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SHA available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
from hearthbreaker.cards.base import MinionCard
from hearthbreaker.cards.heroes import Jaraxxus
from hearthbreaker.cards.weapons.warlock import BloodFury
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.game_objects import Minion
from hearthbreaker.tags.action import Summon, Kill, Damage, Discard, DestroyManaCrystal, Give, Equip, \
Remove, Heal, ReplaceHeroWithMinion
from hearthbreaker.tags.base import Effect, Aura, Deathrattle, Battlecry, Buff, ActionTag
from hearthbreaker.tags.card_source import HandSource
from hearthbreaker.tags.condition import IsType, MinionCountIs, Not, OwnersTurn, IsHero, And, Adjacent, IsMinion
from hearthbreaker.tags.event import TurnEnded, CharacterDamaged, DidDamage, Damaged
from hearthbreaker.tags.selector import MinionSelector, PlayerSelector, \
SelfSelector, BothPlayer, HeroSelector, CharacterSelector, RandomPicker, Attribute, EventValue, CardSelector, \
FriendlyPlayer
from hearthbreaker.tags.status import ChangeHealth, ManaChange, ChangeAttack, Immune
class FlameImp(MinionCard):
def __init__(self):
super().__init__("Flame Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(3), HeroSelector()))
def create_minion(self, player):
return Minion(3, 2)
class PitLord(MinionCard):
def __init__(self):
super().__init__("Pit Lord", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.EPIC, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(5), HeroSelector()))
def create_minion(self, player):
return Minion(5, 6)
class Voidwalker(MinionCard):
def __init__(self):
super().__init__("Voidwalker", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.FREE, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(1, 3, taunt=True)
class DreadInfernal(MinionCard):
def __init__(self):
super().__init__("Dread Infernal", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(1), CharacterSelector(players=BothPlayer())))
def create_minion(self, player):
return Minion(6, 6)
class Felguard(MinionCard):
def __init__(self):
super().__init__("Felguard", 3, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(DestroyManaCrystal(), PlayerSelector()))
def create_minion(self, player):
return Minion(3, 5, taunt=True)
class Doomguard(MinionCard):
def __init__(self):
super().__init__("Doomguard", 5, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Discard(amount=2), PlayerSelector()))
def create_minion(self, player):
return Minion(5, 7, charge=True)
class Succubus(MinionCard):
def __init__(self):
super().__init__("Succubus", 2, CHARACTER_CLASS.WARLOCK, CARD_RARITY.FREE, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Discard(), PlayerSelector()))
def create_minion(self, player):
return Minion(4, 3)
class SummoningPortal(MinionCard):
def __init__(self):
super().__init__("Summoning Portal", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(0, 4, auras=[Aura(ManaChange(-2, 1, minimum=1), CardSelector(condition=IsMinion()))])
class BloodImp(MinionCard):
def __init__(self):
super().__init__("Blood Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(0, 1, stealth=True,
effects=[Effect(TurnEnded(), ActionTag(Give(ChangeHealth(1)),
MinionSelector(picker=RandomPicker())))])
class LordJaraxxus(MinionCard):
def __init__(self):
super().__init__("Lord Jaraxxus", 9, CHARACTER_CLASS.WARLOCK, CARD_RARITY.LEGENDARY,
minion_type=MINION_TYPE.DEMON,
battlecry=(Battlecry(ReplaceHeroWithMinion(Jaraxxus()), HeroSelector()),
Battlecry(Remove(), SelfSelector()),
Battlecry(Equip(BloodFury()), PlayerSelector())))
def create_minion(self, player):
return Minion(3, 15)
class Infernal(MinionCard):
def __init__(self):
super().__init__("Infernal", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, False,
minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(6, 6)
class VoidTerror(MinionCard):
def __init__(self):
super().__init__("Void Terror", 3, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.DEMON,
battlecry=(Battlecry(
Give([Buff(ChangeHealth(Attribute("health", MinionSelector(Adjacent())))),
Buff(ChangeAttack(Attribute("attack", MinionSelector(Adjacent()))))]),
SelfSelector()), Battlecry(Kill(), MinionSelector(Adjacent()))))
def create_minion(self, player):
return Minion(3, 3)
class Voidcaller(MinionCard):
def __init__(self):
super().__init__("Voidcaller", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(3, 4, deathrattle=Deathrattle(Summon(HandSource(FriendlyPlayer(), [IsType(MINION_TYPE.DEMON)])),
PlayerSelector()))
class AnimaGolem(MinionCard):
def __init__(self):
super().__init__("Anima Golem", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.EPIC, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(9, 9, effects=[Effect(TurnEnded(MinionCountIs(1), BothPlayer()),
ActionTag(Kill(), SelfSelector()))])
class Imp(MinionCard):
def __init__(self):
super().__init__("Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, False, minion_type=MINION_TYPE.DEMON,
ref_name="Imp (warlock)")
def create_minion(self, player):
return Minion(1, 1)
class WorthlessImp(MinionCard):
def __init__(self):
super().__init__("Worthless Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, False, MINION_TYPE.DEMON)
def create_minion(self, p):
return Minion(1, 1)
class FelCannon(MinionCard):
def __init__(self):
super().__init__("Fel Cannon", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(3, 5, effects=[Effect(TurnEnded(), ActionTag(Damage(2),
MinionSelector(Not(IsType(MINION_TYPE.MECH, True)),
BothPlayer(), RandomPicker())))])
class MalGanis(MinionCard):
def __init__(self):
super().__init__("Mal'Ganis", 9, CHARACTER_CLASS.WARLOCK, CARD_RARITY.LEGENDARY, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(9, 7, auras=[Aura(ChangeHealth(2), MinionSelector(IsType(MINION_TYPE.DEMON))),
Aura(ChangeAttack(2), MinionSelector(IsType(MINION_TYPE.DEMON))),
Aura(Immune(), HeroSelector())])
class FloatingWatcher(MinionCard):
def __init__(self):
super().__init__("Floating Watcher", 5, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(4, 4, effects=[Effect(CharacterDamaged(And(IsHero(), OwnersTurn())),
ActionTag(Give([Buff(ChangeAttack(2)), Buff(ChangeHealth(2))]),
SelfSelector()))])
class MistressOfPain(MinionCard):
def __init__(self):
super().__init__("Mistress of Pain", 2, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE,
minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(1, 4, effects=[Effect(DidDamage(), ActionTag(Heal(EventValue()), HeroSelector()))])
class ImpGangBoss(MinionCard):
def __init__(self):
super().__init__("Imp Gang Boss", 3, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(2, 4, effects=[Effect(Damaged(), ActionTag(Summon(Imp()), PlayerSelector()))])
|
|
import sys
import petsc4py
petsc4py.init(sys.argv)
# from scipy.io import savemat, loadmat
# from src.ref_solution import *
# import warnings
# from memory_profiler import profile
# from time import time
import pickle
from src.myio import *
# from src.objComposite import *
from src.StokesFlowMethod import *
from src.geo import *
from src import stokes_flow as sf
from codeStore.helix_common import AtBtCt, AtBtCt_full, AtBtCt_multiObj
def get_problem_kwargs(**main_kwargs):
problem_kwargs = get_solver_kwargs()
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'dumbAtBtCt')
OptDB.setValue('f', fileHandle)
problem_kwargs['fileHandle'] = fileHandle
dumb_d = OptDB.getReal('dumb_d', 5)
problem_kwargs['dumb_d'] = dumb_d
dumb_theta = OptDB.getReal('dumb_theta', np.pi / 3)
problem_kwargs['dumb_theta'] = dumb_theta
kwargs_list = (get_sphere_kwargs(), get_forcefree_kwargs(), main_kwargs,)
for t_kwargs in kwargs_list:
for key in t_kwargs:
problem_kwargs[key] = t_kwargs[key]
dumb_rs2_fct = OptDB.getReal('dumb_rs2_fct', 1)
problem_kwargs['dumb_rs2_fct'] = dumb_rs2_fct
dumb_ds2_fct = OptDB.getReal('dumb_ds2_fct', 1)
problem_kwargs['dumb_ds2_fct'] = dumb_ds2_fct
return problem_kwargs
def print_case_info(**problem_kwargs):
fileHandle = problem_kwargs['fileHandle']
print_solver_info(**problem_kwargs)
print_forcefree_info(**problem_kwargs)
print_sphere_info(fileHandle, **problem_kwargs)
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
dumb_rs2_fct = problem_kwargs['dumb_rs2_fct']
dumb_ds2_fct = problem_kwargs['dumb_ds2_fct']
PETSc.Sys.Print(' dumb_d: %f, dumb_theta: %f' % (dumb_d, dumb_theta))
PETSc.Sys.Print(' dumb_rs2_fct: %f, dumb_ds2_fct: %f' % (dumb_rs2_fct, dumb_ds2_fct))
return True
def main_resistanceMatrix(**main_kwargs):
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
# pickProblem = problem_kwargs['pickProblem']
print_case_info(**problem_kwargs)
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
dumb_rs2_fct = problem_kwargs['dumb_rs2_fct']
dumb_ds2_fct = problem_kwargs['dumb_ds2_fct']
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
# sphere_geo1 = sphere_geo0.copy()
sphere_geo1 = sphere_geo()
sphere_geo1.create_delta(ds * dumb_ds2_fct, rs * dumb_rs2_fct)
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
dumb_geo = base_geo()
dumb_geo.combine([sphere_geo0, sphere_geo1], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geo.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
dumb_obj = sf.StokesFlowObj()
dumb_obj.set_data(dumb_geo, dumb_geo, 'dumb')
problem = sf.problem_dic[matrix_method](**problem_kwargs)
problem.add_obj(dumb_obj)
problem.print_info()
problem.create_matrix()
# PETSc.Sys.Print(problem.get_obj_list()[0].get_u_nodes()[:10])
# PETSc.Sys.Print(problem.get_M()[:5, :5])
# PETSc.Sys.Print(helicoid_center)
At, Bt1, Bt2, Ct = AtBtCt_full(problem, save_vtk=False, pick_M=False, print_each=False,
center=np.zeros(3), save_name=fileHandle)
PETSc.Sys.Print(At)
PETSc.Sys.Print(At[2, 2] - At[0, 0])
return True
def main_multiObj(**main_kwargs):
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
# pickProblem = problem_kwargs['pickProblem']
print_case_info(**problem_kwargs)
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
dumb_rs2_fct = problem_kwargs['dumb_rs2_fct']
dumb_ds2_fct = problem_kwargs['dumb_ds2_fct']
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
# sphere_geo1 = sphere_geo0.copy()
sphere_geo1 = sphere_geo()
sphere_geo1.create_delta(ds * dumb_ds2_fct, rs * dumb_rs2_fct)
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
sphere_geo0.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta,
rotation_origin=np.zeros(3))
sphere_geo1.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta,
rotation_origin=np.zeros(3))
sphere_obj0 = sf.StokesFlowObj()
sphere_obj0.set_data(sphere_geo0, sphere_geo0, 'sphere_obj0')
sphere_obj1 = sf.StokesFlowObj()
sphere_obj1.set_data(sphere_geo1, sphere_geo1, 'sphere_obj0')
problem = sf.problem_dic[matrix_method](**problem_kwargs)
problem.add_obj(sphere_obj0)
problem.add_obj(sphere_obj1)
problem.print_info()
problem.create_matrix()
# PETSc.Sys.Print(problem.get_obj_list()[0].get_u_nodes()[:10])
# PETSc.Sys.Print(problem.get_M()[:5, :5])
# PETSc.Sys.Print(helicoid_center)
AtBtCt_multiObj(problem, save_vtk=False, pick_M=False, save_name=fileHandle,
uNormFct=1, wNormFct=1, uwNormFct=1, )
return True
def main_multi_axis(**main_kwargs):
# given velocity, sphere radius are different,
# and motion in a unite speed along the center line.
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
# pickProblem = problem_kwargs['pickProblem']
print_case_info(**problem_kwargs)
dumb_u = np.array((0, 0, 1, 0, 0, 0))
dumb_center = np.zeros(3)
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
dumb_rs2_fct = problem_kwargs['dumb_rs2_fct']
dumb_ds2_fct = problem_kwargs['dumb_ds2_fct']
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
# sphere_geo1 = sphere_geo0.copy()
sphere_geo1 = sphere_geo()
sphere_geo1.create_delta(ds * dumb_ds2_fct, rs * dumb_rs2_fct)
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
sphere_geo0.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta,
rotation_origin=dumb_center)
sphere_geo1.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta,
rotation_origin=dumb_center)
sphere_obj0 = sf.StokesFlowObj()
sphere_obj0.set_data(sphere_geo0, sphere_geo0, 'sphere_obj0')
sphere_obj1 = sf.StokesFlowObj()
sphere_obj1.set_data(sphere_geo1, sphere_geo1, 'sphere_obj1')
sphere_obj0.set_rigid_velocity(dumb_u, center=dumb_center)
sphere_obj1.set_rigid_velocity(dumb_u, center=dumb_center)
problem = sf.problem_dic[matrix_method](**problem_kwargs)
problem.add_obj(sphere_obj0)
problem.add_obj(sphere_obj1)
problem.print_info()
problem.create_matrix()
problem.solve()
for tobj in problem.get_obj_list():
center = tobj.get_u_geo().get_center()
tFT = tobj.get_total_force(center=center)
tF = tFT[:3]
tT = tFT[3:]
PETSc.Sys.Print('--->>%s, tF: %s' % (tobj.get_name(), str(tF)))
PETSc.Sys.Print('--->>%s, tT: %s' % (tobj.get_name(), str(tT)))
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
save_name = check_file_extension(fileHandle, '.pickle')
tpickle = [problem_kwargs,
[tobj.get_total_force(center=center) for tobj in problem.get_obj_list()]]
if rank == 0:
with open(save_name, 'wb') as output:
pickle.dump(tpickle, output, protocol=4)
print('save force on each sphere to %s' % save_name)
return True
if __name__ == '__main__':
# code resluts are wrong.
OptDB = PETSc.Options()
# pythonmpi helicoid.py -sm lg_rs -legendre_m 3 -legendre_k 2 -epsilon 3 -ffweight 2 -main_fun_noIter 1 -vortexStrength 1 -helicoid_r1 1 -helicoid_r2 0.3 -helicoid_ds 0.03
# if OptDB.getBool('main_fun_noIter', False):
# OptDB.setValue('main_fun', False)
# main_fun_noIter()
if OptDB.getBool('main_resistanceMatrix', False):
OptDB.setValue('main_fun', False)
main_resistanceMatrix()
if OptDB.getBool('main_multiObj', False):
OptDB.setValue('main_fun', False)
main_multiObj()
if OptDB.getBool('main_multi_axis', False):
OptDB.setValue('main_fun', False)
main_multi_axis()
# if OptDB.getBool('main_fun', True):
# main_fun()
|
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import models
import presubmit_util
# Model definitions for rappor.xml content
_SUMMARY_TYPE = models.TextNodeType('summary')
_NOISE_VALUES_TYPE = models.ObjectNodeType('noise-values',
float_attributes=[
'fake-prob',
'fake-one-prob',
'one-coin-prob',
'zero-coin-prob',
])
_NOISE_LEVEL_TYPE = models.ObjectNodeType('noise-level',
extra_newlines=(1, 1, 1),
string_attributes=['name'],
children=[
models.ChildType('summary', _SUMMARY_TYPE, False),
models.ChildType('values', _NOISE_VALUES_TYPE, False),
])
_NOISE_LEVELS_TYPE = models.ObjectNodeType('noise-levels',
extra_newlines=(1, 1, 1),
dont_indent=True,
children=[
models.ChildType('types', _NOISE_LEVEL_TYPE, True),
])
_PARAMETERS_TYPE = models.ObjectNodeType('parameters',
int_attributes=[
'num-cohorts',
'bytes',
'hash-functions',
],
# Remove probabilities once all parsers process noise levels.
float_attributes=[
'fake-prob',
'fake-one-prob',
'one-coin-prob',
'zero-coin-prob',
],
string_attributes=[
'reporting-level',
'noise-level',
])
_RAPPOR_PARAMETERS_TYPE = models.ObjectNodeType('rappor-parameters',
extra_newlines=(1, 1, 1),
string_attributes=['name'],
children=[
models.ChildType('summary', _SUMMARY_TYPE, False),
models.ChildType('parameters', _PARAMETERS_TYPE, False),
])
_RAPPOR_PARAMETERS_TYPES_TYPE = models.ObjectNodeType('rappor-parameter-types',
extra_newlines=(1, 1, 1),
dont_indent=True,
children=[
models.ChildType('types', _RAPPOR_PARAMETERS_TYPE, True),
])
_OWNER_TYPE = models.TextNodeType('owner', single_line=True)
_STRING_FIELD_TYPE = models.ObjectNodeType('string-field',
extra_newlines=(1, 1, 0),
string_attributes=['name'],
children=[
models.ChildType('summary', _SUMMARY_TYPE, False),
])
_FLAG_TYPE = models.TextNodeType('flag', single_line=True)
_FLAGS_FIELD_TYPE = models.ObjectNodeType('flags-field',
extra_newlines=(1, 1, 0),
string_attributes=['name', 'noise-level'],
children=[
models.ChildType('flags', _FLAG_TYPE, True),
models.ChildType('summary', _SUMMARY_TYPE, False),
])
_UINT64_FIELD_TYPE = models.ObjectNodeType('uint64-field',
extra_newlines=(1, 1, 0),
string_attributes=['name', 'noise-level'],
children=[
models.ChildType('summary', _SUMMARY_TYPE, False),
])
_RAPPOR_METRIC_TYPE = models.ObjectNodeType('rappor-metric',
extra_newlines=(1, 1, 1),
string_attributes=['name', 'type'],
children=[
models.ChildType('owners', _OWNER_TYPE, True),
models.ChildType('summary', _SUMMARY_TYPE, False),
models.ChildType('strings', _STRING_FIELD_TYPE, True),
models.ChildType('flags', _FLAGS_FIELD_TYPE, True),
models.ChildType('uint64', _UINT64_FIELD_TYPE, True),
])
_RAPPOR_METRICS_TYPE = models.ObjectNodeType('rappor-metrics',
extra_newlines=(1, 1, 1),
dont_indent=True,
children=[
models.ChildType('metrics', _RAPPOR_METRIC_TYPE, True),
])
_RAPPOR_CONFIGURATION_TYPE = models.ObjectNodeType('rappor-configuration',
extra_newlines=(1, 1, 1),
dont_indent=True,
children=[
models.ChildType('noiseLevels', _NOISE_LEVELS_TYPE, False),
models.ChildType('parameterTypes', _RAPPOR_PARAMETERS_TYPES_TYPE, False),
models.ChildType('metrics', _RAPPOR_METRICS_TYPE, False),
])
RAPPOR_XML_TYPE = models.DocumentType(_RAPPOR_CONFIGURATION_TYPE)
def GetTypeNames(config):
return set(p['name'] for p in config['parameterTypes']['types'])
def GetMissingOwnerErrors(metrics):
"""Check that all of the metrics have owners.
Args:
metrics: A list of rappor metric description objects.
Returns:
A list of errors about metrics missing owners.
"""
missing_owners = [m for m in metrics if not m['owners']]
return ['Rappor metric "%s" is missing an owner.' % metric['name']
for metric in missing_owners]
def GetInvalidTypeErrors(type_names, metrics):
"""Check that all of the metrics have valid types.
Args:
type_names: The set of valid type names.
metrics: A list of rappor metric description objects.
Returns:
A list of errors about metrics with invalid_types.
"""
invalid_types = [m for m in metrics if m['type'] not in type_names]
return ['Rappor metric "%s" has invalid type "%s"' % (
metric['name'], metric['type'])
for metric in invalid_types]
def GetErrors(config):
"""Check that rappor.xml passes some basic validation checks.
Args:
config: The parsed rappor.xml contents.
Returns:
A list of validation errors.
"""
metrics = config['metrics']['metrics']
type_names = GetTypeNames(config)
return (GetMissingOwnerErrors(metrics) or
GetInvalidTypeErrors(type_names, metrics))
def Cleanup(config):
"""Preform cleanup on description contents, such as sorting metrics.
Args:
config: The parsed rappor.xml contents.
"""
types = config['parameterTypes']['types']
types.sort(key=lambda x: x['name'])
metrics = config['metrics']['metrics']
metrics.sort(key=lambda x: x['name'])
def UpdateXML(original_xml):
"""Parse the original xml and return a pretty printed version.
Args:
original_xml: A string containing the original xml file contents.
Returns:
A Pretty printed xml string.
"""
comments, config = RAPPOR_XML_TYPE.Parse(original_xml)
errors = GetErrors(config)
if errors:
for error in errors:
logging.error("%s", error)
return None
Cleanup(config)
return RAPPOR_XML_TYPE.PrettyPrint(comments, config)
def main(argv):
presubmit_util.DoPresubmitMain(argv, 'rappor.xml', 'rappor.old.xml',
'pretty_print.py', UpdateXML)
if '__main__' == __name__:
sys.exit(main(sys.argv))
|
|
# -*- coding: utf-8 -*-
"""Random agreement Fuzzy Pattern Classifier method.
The module structure is the following:
- The "RandomAgreementFuzzyPatternClassifier" implements the model learning using the [1] algorithm.
References:
[1] Davidsen, 2014.
"""
import logging
import numpy as np
import scipy.stats as stats
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array
import fylearn.fuzzylogic as fl
def agreement_t_test(a, b):
""" Check agreement based on means of two samples, using the t-statistic. """
means1 = np.nanmean(a, 0)
t1, p1 = stats.ttest_1samp(b, means1)
# t2, p2 = stats.ttest_1samp(a, means2)
# select agreeing featurs (p <= 0.05)
return p1 < 0.05
def fuzzify_partitions(p):
def fuzzify_p(A):
R = np.zeros((A.shape[0], A.shape[1] * p))
cmin, cmax = np.nanmin(A, 0), np.nanmax(A, 0)
psize = (cmax - cmin) / (p - 1)
mus = []
# iterate features
for i in range(A.shape[1]):
# iterate partitions
mu_i = []
offset = cmin[i]
for j in range(p):
f = fl.TriangularSet(offset - psize[i], offset, offset + psize[i])
R[:, (i * p) + j] = f(A[:, i])
mu_i.append(f)
offset += psize[i]
mus.append(mu_i)
return p, R, mus
return fuzzify_p
def fuzzify_mean(A):
# output for fuzzified values
R = np.zeros((A.shape[0], A.shape[1] * 3))
cmin, cmax, cmean = np.nanmin(A, 0), np.nanmax(A, 0), np.nanmean(A, 0)
left = np.array([cmin - (cmax - cmin), cmin, cmax]).T
middle = np.array([cmin, cmean, cmax]).T
right = np.array([cmin, cmax, cmax + (cmax - cmin)]).T
mus = []
for i in range(A.shape[1]):
f_l = fl.TriangularSet(*left[i])
f_m = fl.TriangularSet(*middle[i])
f_r = fl.TriangularSet(*right[i])
R[:,(i*3)] = f_l(A[:,i])
R[:,(i*3)+1] = f_m(A[:,i])
R[:,(i*3)+2] = f_r(A[:,i])
mus.extend([(i, f_l), (i, f_m), (i, f_r)])
return 3, R, mus
def agreement_fuzzy(aggregation, A, B):
"""
Calculate agreement between two samples.
A : First sample
B : Second sample
"""
# avg values of samples (column wise)
S_A, S_B = np.nanmean(A, 0), np.nanmean(B, 0)
d = 1.0 - ((S_A - S_B) ** 2)
a = aggregation(d)
return a, d
def agreement_hamming(p, X, a, b):
d = np.abs(X[a, :] - X[b, :])
f = int(X.shape[1] / p)
E = np.zeros(f)
for i in range(f):
E[i] = np.sum(d[(i * p):(i * p) + p])
return 1.0 - ((1.0 / p) * E)
def triangular_factory(*args):
return fl.TriangularSet(args[0], args[1], args[2])
def pi_factory(*args):
return fl.PiSet(a=args[0], r=args[1], b=args[2], m=2.0)
def build_memberships(X, factory):
mins = np.nanmin(X, 0)
maxs = np.nanmax(X, 0)
means = np.nanmean(X, 0)
return [ (i, factory(means[i] - ((maxs[i] - mins[i]) / 2.0),
means[i], means[i] + ((maxs[i] - mins[i]) / 2.0))) for i in range(X.shape[1]) ]
def agreement_pruning(X, proto, n_features, rs):
if len(proto) <= n_features: # nothing to prune.
return proto
# prune from random samples
for S in X[rs.choice(len(X), len(proto) - n_features)]:
y = np.array([p(S[idx]) for idx, p in proto ]) # evaluate sample using the prototype
worst = np.argsort(y) # find worst
del proto[worst[0]] # filter worst
# print "proto-after", proto
return proto
def build_for_class(X, max_samples, n_features, rs, factory):
# construct wanted number of prototypes
max_no = max(max_samples, len(X))
sample_idx = rs.permutation(max_no) % len(X)
# construct memberships for all features based on the sample
proto = build_memberships(X, factory)
return agreement_pruning(X, proto, n_features, rs)
def build_for_class_multi(X, max_samples, n_features, rs, factory, n_protos):
protos = []
for p in range(n_protos):
# construct wanted number of prototypes
max_no = max(max_samples, len(X))
sample_idx = rs.permutation(max_no) % len(X)
# construct memberships for all features based on the sample
proto = build_memberships(X[sample_idx], factory)
# perform pruning
proto = agreement_pruning(X, proto, n_features, rs)
# add to list of protos for the class
protos.append(proto)
return protos
def _predict(prototypes, aggregation, classes, X, n_features):
Mus = np.zeros((X.shape[0], n_features))
R = np.zeros((X.shape[0], len(classes))) # holds output for each class
attribute_idxs = range(n_features)
# class_idx has class_prototypes membership functions
for class_idx, class_prototypes in prototypes.items():
for i in attribute_idxs:
fidx, cp = class_prototypes[i]
Mus[:, i] = cp(X[:, fidx])
R[:, class_idx] = aggregation(Mus)
return classes.take(np.argmax(R, 1))
def _predict_multi(prototypes, aggregation, classes, X, n_features):
Mus = np.zeros(X.shape) # holds output per prototype
R = np.zeros((X.shape[0], len(classes))) # holds output for each class
feature_nos = range(n_features) # index for agreements
# class_idx has class_prototypes membership functions
for class_idx, class_prototypes in prototypes.items():
C = np.zeros((X.shape[0], len(class_prototypes)))
for j, cp in enumerate(class_prototypes):
for i in feature_nos:
f_idx, mu_f = cp[i]
Mus[:, i] = mu_f(X[:, f_idx])
C[:, j] = aggregation(Mus)
R[:, class_idx] = np.max(C, 1)
return classes.take(np.argmax(R, 1))
logger = logging.getLogger("rafpc")
class RandomAgreementFuzzyPatternClassifier(BaseEstimator, ClassifierMixin):
def get_params(self, deep=False):
return {"n_protos": self.n_protos,
"n_features": self.n_features,
"max_samples": self.max_samples,
"epsilon": self.epsilon,
"aggregation": self.aggregation,
"membership_factory": self.membership_factory,
"random_state": self.random_state}
def set_params(self, **kwargs):
for key, value in kwargs.items():
self.setattr(key, value)
return self
def __init__(self, n_protos=5, n_features=None,
max_samples=100, epsilon=0.95,
aggregation=fl.mean, membership_factory=triangular_factory,
random_state=None):
"""
Initialize the classifier
Parameters:
-----------
n_protos : the number of prototypes to keep for each class.
n_features : the number of features to include in each prototype.
None means use all features.
max_samples : the number of samples to draw in finding agreement.
epsilon : the minimum agreement needed before eliminiation.
aggregation : The aggregation to use for inference.
membership_factory : the factory to create membership functions.
random_state : The random state to use for drawing samples.
None means no specific random state.
"""
self.n_protos = n_protos
self.n_features = n_features
self.max_samples = max_samples
self.epsilon = epsilon
self.aggregation = aggregation
self.membership_factory = membership_factory
self.random_state = random_state
def fit(self, X, y):
# get random
rs = check_random_state(self.random_state)
X = check_array(X)
self.classes_, y = np.unique(y, return_inverse=True)
if np.nan in self.classes_:
raise Exception("NaN not supported in class values")
# fuzzify data
# p, X_fuzzy, mu_s = fuzzify_mean(X)
# agreeing not set, require all features to be in agreement
if self.n_features is None:
self.n_features = X.shape[1]
if self.n_features > X.shape[1]:
self.n_features = X.shape[1]
# raise Exception("n_features must be <= number features in X")
# build membership functions for each feature for each class
self.protos_ = {}
for class_idx, class_value in enumerate(self.classes_):
X_class = X[y == class_idx]
# create protos from n_protos most agreeing
self.protos_[class_idx] = \
build_for_class_multi(X_class, self.max_samples,
self.n_features, rs, self.membership_factory,
self.n_protos)
return self
def predict(self, X):
"""
Predict outputs given examples.
Parameters:
-----------
X : the examples to predict (array or matrix)
Returns:
--------
y_pred : Predicted values for each row in matrix.
"""
if self.protos_ is None:
raise Exception("Prototypes not initialized. Perform a fit first.")
X = check_array(X)
# predict
return _predict_multi(self.protos_, self.aggregation, self.classes_, X, self.n_features)
|
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, print_function, division,
absolute_import)
import asyncore
import asynchat
import socket
class Request(object):
def __init__(self, command, *args):
self.command = command
self.args = args
@property
def arg(self):
return None if not self.args else self.args[0]
@property
def message_nr(self):
arg = self.arg
return None if arg is None else int(arg)
@property
def has_args(self):
return self.args is not None and len(self.args) > 0
@property
def merged_arg(self):
if self.args is None:
return None
return ' '.join(self.args)
class Response(object):
def __init__(self, status, lines=None):
self.status = status
self.lines = lines
@classmethod
def error(cls, lines=None):
return cls('-ERR', lines)
@classmethod
def ok(cls, lines=None):
return cls('+OK', lines)
@classmethod
def ok_extra(cls, msg, lines=None):
return cls('+OK %s' % msg, lines)
class Pop3Exception(Exception):
pass
def file_to_lines(path):
with open(path, 'rb') as f:
last = None
new_line = True
line = []
while True:
current = f.read(1)
if current == b'':
# EOF
yield ''.join(line)
return
line.append(current)
if new_line:
new_line = False
if current == b'.':
# EOT indicator is .\r\n -- stuff extra dot at new line
line.append(b'.')
if last == b'\r' and current == 'b\n':
# Done with this line; pass it without \r\n
yield ''.join(line[:-2])
line = []
last = current
class Session(asynchat.async_chat):
def __init__(self, sock, store, user, password):
asynchat.async_chat.__init__(self, sock)
self.store = store
self.user = user
self.password = password
self.buffer = []
self.set_terminator('\r\n')
def collect_incoming_data(self, data):
self.buffer.append(data)
def clear_buffer(self):
raw_request = ''.join(self.buffer).strip()
self.buffer = []
return raw_request
def found_terminator(self):
request_parts = self.clear_buffer().split()
request = Request(request_parts[0], *request_parts[1:])
handler = getattr(self, 'do_%s' % request.command.lower(), None)
response = None
if handler and callable(handler):
try:
response = handler(request)
except Pop3Exception:
response = Response.error()
if not response:
response = Response.error()
self.respond(response)
def do_noop(self, request):
return Response.ok()
def do_user(self, request):
if not self.user:
# Anything will do when no user was set
return Response.ok()
if self.user == request.merged_arg:
return Response.ok()
raise Pop3Exception()
def do_pass(self, request):
# TODO Begin transaction
if not self.password:
# Anything will do when no password was set
return Response.ok()
if self.password == request.merged_arg:
return Response.ok()
raise Pop3Exception()
def do_quit(self, request):
# TODO Commit transaction
if self.store.delete_marked_messages():
return Response.ok()
raise Pop3Exception()
def do_retr(self, request):
m = self.store.get(request.message_nr)
if not m:
raise Pop3Exception()
return Response.ok(file_to_lines(m.path))
def do_capa(self, request):
capabilities = [n for n in dir(self)
if n.startswith('do_') and callable(getattr(self, n))]
return Response.ok([n[3:].upper() for n in capabilities])
def do_stat(self, request):
msg = '%d %d' % (len(self.store), self.store.total_byte_size)
return Response.ok_extra(msg)
def do_rset(self, request):
m = self.store.get(request.message_nr, include_deleted=True)
if m:
m.deleted = False
return Response.ok()
raise Pop3Exception()
def do_dele(self, request):
m = self.store.get(request.message_nr)
if m:
m.deleted = True
return Response.ok()
raise Pop3Exception()
def do_list(self, request):
if not request.has_args:
return Response.ok(['%d %d' % (m.nr, m.size) for m in self.store])
m = self.store.get(request.message_nr)
if m:
return Response.ok_extra('%d %d' % (m.nr, m.size))
raise Pop3Exception()
def do_uidl(self, request):
if not request.has_args:
return Response.ok(['%d %s' % (m.nr, m.uid) for m in self.store])
m = self.store.get(request.message_nr)
if m:
return Response.ok_extra('%d %s' % (m.nr, m.uid))
raise Pop3Exception()
def respond(self, response):
self.push((b'%s\r\n' % response.status).encode('ascii'))
if response.lines:
for line in response.lines:
msg = b'%s\r\n' % line
self.push(msg.encode('ascii'))
self.push(b'.\r\n'.encode('ascii'))
class Server(asyncore.dispatcher):
def __init__(self, address, store, user, password):
asyncore.dispatcher.__init__(self)
self.store = store
self.user = user
self.password = password
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(address)
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is not None:
# TODO Handle disconnect
sock, addr = pair
sock.send(b'+OK Welcome to Flying Rat\r\n')
Session(sock, self.store, self.user, self.password)
|
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.graphviz
~~~~~~~~~~~~~~~~~~~
Allow graphviz-formatted graphs to be included in Sphinx-generated
documents inline.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import codecs
import posixpath
from os import path
from subprocess import Popen, PIPE
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.errors import SphinxError
from sphinx.locale import _
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
from sphinx.util.compat import Directive
mapname_re = re.compile(r'<map id="(.*?)"')
class GraphvizError(SphinxError):
category = 'Graphviz error'
class graphviz(nodes.General, nodes.Element):
pass
class Graphviz(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'inline': directives.flag,
'caption': directives.unchanged,
}
def run(self):
if self.arguments:
document = self.state.document
if self.content:
return [document.reporter.warning(
'Graphviz directive cannot have both content and '
'a filename argument', line=self.lineno)]
env = self.state.document.settings.env
rel_filename, filename = env.relfn2path(self.arguments[0])
env.note_dependency(rel_filename)
try:
fp = codecs.open(filename, 'r', 'utf-8')
try:
dotcode = fp.read()
finally:
fp.close()
except (IOError, OSError):
return [document.reporter.warning(
'External Graphviz file %r not found or reading '
'it failed' % filename, line=self.lineno)]
else:
dotcode = '\n'.join(self.content)
if not dotcode.strip():
return [self.state_machine.reporter.warning(
'Ignoring "graphviz" directive without content.',
line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = []
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'caption' in self.options:
node['caption'] = self.options['caption']
node['inline'] = 'inline' in self.options
return [node]
class GraphvizSimple(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'inline': directives.flag,
'caption': directives.unchanged,
}
def run(self):
node = graphviz()
node['code'] = '%s %s {\n%s\n}\n' % \
(self.name, self.arguments[0], '\n'.join(self.content))
node['options'] = []
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'caption' in self.options:
node['caption'] = self.options['caption']
node['inline'] = 'inline' in self.options
return [node]
def render_dot(self, code, options, format, prefix='graphviz'):
"""Render graphviz code into a PNG or PDF output file."""
hashkey = (code + str(options) + \
str(self.builder.config.graphviz_dot) + \
str(self.builder.config.graphviz_dot_args)
).encode('utf-8')
fname = '%s-%s.%s' % (prefix, sha(hashkey).hexdigest(), format)
if hasattr(self.builder, 'imgpath'):
# HTML
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, '_images', fname)
else:
# LaTeX
relfn = fname
outfn = path.join(self.builder.outdir, fname)
if path.isfile(outfn):
return relfn, outfn
if hasattr(self.builder, '_graphviz_warned_dot') or \
hasattr(self.builder, '_graphviz_warned_ps2pdf'):
return None, None
ensuredir(path.dirname(outfn))
# graphviz expects UTF-8 by default
if isinstance(code, unicode):
code = code.encode('utf-8')
dot_args = [self.builder.config.graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(options)
dot_args.extend(['-T' + format, '-o' + outfn])
if format == 'png':
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
try:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
except OSError, err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting' %
self.builder.config.graphviz_dot)
self.builder._graphviz_warned_dot = True
return None, None
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code)
except (OSError, IOError), err:
if err.errno not in (EPIPE, EINVAL):
raise
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise GraphvizError('dot exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s' % (stderr, stdout))
if not path.isfile(outfn):
raise GraphvizError('dot did not produce an output file:\n[stderr]\n%s\n'
'[stdout]\n%s' % (stderr, stdout))
return relfn, outfn
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
raise GraphvizError("graphviz_output_format must be one of 'png', "
"'svg', but is %r" % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError, exc:
self.builder.warn('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
inline = node.get('inline', False)
if inline:
wrapper = 'span'
else:
wrapper = 'p'
self.body.append(self.starttag(node, wrapper, CLASS='graphviz'))
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
imgcss = imgcls and 'class="%s"' % imgcls or ''
if format == 'svg':
svgtag = '<img src="%s" alt="%s" %s/>\n' % (fname, alt, imgcss)
self.body.append(svgtag)
else:
mapfile = open(outfn + '.map', 'rb')
try:
imgmap = mapfile.readlines()
finally:
mapfile.close()
if len(imgmap) == 2:
# nothing in image map (the lines are <map> and </map>)
self.body.append('<img src="%s" alt="%s" %s/>\n' %
(fname, alt, imgcss))
else:
# has a map: get the name of the map and connect the parts
mapname = mapname_re.match(imgmap[0].decode('utf-8')).group(1)
self.body.append('<img src="%s" alt="%s" usemap="#%s" %s/>\n' %
(fname, alt, mapname, imgcss))
self.body.extend([item.decode('utf-8') for item in imgmap])
if node.get('caption') and not inline:
self.body.append('</p>\n<p class="caption">')
self.body.append(self.encode(node['caption']))
self.body.append('</%s>\n' % wrapper)
raise nodes.SkipNode
def html_visit_graphviz(self, node):
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError, exc:
self.builder.warn('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
inline = node.get('inline', False)
if inline:
para_separator = ''
else:
para_separator = '\n'
if fname is not None:
caption = node.get('caption')
# XXX add ids from previous target node
if caption and not inline:
self.body.append('\n\\begin{figure}[h!]')
self.body.append('\n\\begin{center}')
self.body.append('\n\\caption{%s}' % self.encode(caption))
self.body.append('\n\\includegraphics{%s}' % fname)
self.body.append('\n\\end{center}')
self.body.append('\n\\end{figure}\n')
else:
self.body.append('%s\\includegraphics{%s}%s' %
(para_separator, fname, para_separator))
raise nodes.SkipNode
def latex_visit_graphviz(self, node):
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError, exc:
self.builder.warn('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('\n\n@float\n')
caption = node.get('caption')
if caption:
self.body.append('@caption{%s}\n' % self.escape_arg(caption))
self.body.append('@image{%s,,,[graphviz],png}\n'
'@end float\n\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_graphviz(self, node):
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
self.add_text(_('[graph]'))
raise nodes.SkipNode
def man_visit_graphviz(self, node):
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
self.body.append(_('[graph]'))
raise nodes.SkipNode
def setup(app):
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
texinfo=(texinfo_visit_graphviz, None),
text=(text_visit_graphviz, None),
man=(man_visit_graphviz, None))
app.add_directive('graphviz', Graphviz)
app.add_directive('graph', GraphvizSimple)
app.add_directive('digraph', GraphvizSimple)
app.add_config_value('graphviz_dot', 'dot', 'html')
app.add_config_value('graphviz_dot_args', [], 'html')
app.add_config_value('graphviz_output_format', 'png', 'html')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def _unknown_shape(op):
return [tensor_shape.unknown_shape() for _ in op.outputs]
# NOTE(cwhipkey): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("If")(_unknown_shape)
ops.RegisterShape("Iff")(_unknown_shape)
ops.RegisterShape("Ii")(_unknown_shape)
ops.RegisterShape("Iif")(_unknown_shape)
ops.RegisterShape("Iii")(_unknown_shape)
ops.RegisterShape("In")(_unknown_shape)
ops.RegisterShape("Iri")(_unknown_shape)
ops.RegisterShape("None")(_unknown_shape)
ops.RegisterShape("Of")(_unknown_shape)
ops.RegisterShape("Oi")(_unknown_shape)
ops.RegisterShape("Oif")(_unknown_shape)
ops.RegisterShape("Oii")(_unknown_shape)
ops.RegisterShape("OpWithDefaultAttr")(_unknown_shape)
ops.RegisterShape("OpWithFutureDefaultAttr")(_unknown_shape)
ops.RegisterShape("Or")(_unknown_shape)
ops.RegisterShape("Otl")(_unknown_shape)
ops.RegisterShape("Unary")(_unknown_shape)
_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'None'
}
op {
name: 'Oi'
output_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'Or'
output_arg { name: 'a' type: DT_INT32 is_ref: true }
}
op {
name: 'Of'
output_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Ii'
input_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'If'
input_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Oii'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Oif'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iii'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Iff'
input_arg { name: 'a' type: DT_FLOAT }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iif'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iri'
input_arg { name: 'a' type: DT_INT32 is_ref: true }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'In'
input_arg { name: 'a' number_attr: 'N' type_attr: 'T' }
attr { name: 'N' type: 'int' minimum: 1 }
attr { name: 'T' type: 'type' }
}
op {
name: 'Otl'
output_arg { name: 'a' type_list_attr: 't' }
attr { name: 'T' type: 'list(type)' minimum: 1 }
}
op {
name: 'Unary'
input_arg { name: 'a' type_attr: 'T' }
output_arg { name: 'b' type_attr: 'T' }
attr { name: 'T' type: 'type' }
}
op {
name: 'OpWithDefaultAttr'
output_arg { name: 'a' type: DT_INT32 }
attr { name: 'default_float' type: 'float' default_value { f: 123.0 } }
}
op {
name: 'OpWithFutureDefaultAttr'
}
""", _op_list)
op_def_registry.register_op_list(_op_list)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
for op_def in _op_list.op:
tf.RegisterShape(op_def.name)(None)
class ImportGraphDefTest(tf.test.TestCase):
def _MakeGraphDef(self, text, producer=tf.GRAPH_DEF_VERSION,
min_consumer=tf.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, text)
ret = tf.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with tf.Graph().as_default():
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oif' }
node { name: 'B' op: 'Otl'
attr { key: 't'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "Oif")
self.assertEqual(b.type, "Otl")
self.assertEqual(c.type, "In")
self.assertEqual(d.type, "In")
self.assertEqual(a.outputs[0].dtype, tf.int32)
self.assertEqual(a.outputs[1].dtype, tf.float32)
self.assertEqual(b.outputs[0].dtype, tf.int32)
self.assertEqual(b.outputs[1].dtype, tf.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testInputMap(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0, "B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0, b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0, u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
b, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with tf.Graph().as_default():
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Or' }
node { name: 'B' op: 'Oi' }
node { name: 'C' op: 'Iii' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'Iri' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_dtypes, [tf.int32, tf.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes,
[dtypes.int32_ref, tf.int32])
self.assertEqual(d.outputs, [])
def testCyclic(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'B:0' }
node { name: 'B' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'A:0' }
"""),
return_elements=["A", "B"])
self.assertEqual(a.inputs[0], b.outputs[0])
self.assertEqual(b.inputs[0], a.outputs[0])
def testTypeMismatchInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue(
"Cannot convert a tensor of type int32 to an input of type float" in
str(e.exception))
def testShapeWhitelist(self):
# Barrier's shape is an output vector of 2, but the
# graph says it's a scalar. This is currently whitelisted.
with tf.Graph().as_default():
_ = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Barrier'
attr { key: '_output_shapes'
value { list { shape { } } } } }
"""),
return_elements=["A"],
name="import")
def testShapeWhitelistViolation(self):
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
self.assertTrue("More inputs specified ('A:0') than the op expects" in
str(e.exception))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Iif' input: 'A:0' }
"""))
self.assertTrue("Input types mismatch (expected 'int32, float32' but "
"got 'int32')" in str(e.exception))
def testMissingInputOpInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue("Input tensor 'A:0' not found" in str(e.exception))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(5.0)
b, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'If' input: 'A:1' }
"""))
self.assertTrue("Input tensor 'A:1' not found" in str(e.exception))
def testMissingControlInputInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
self.assertTrue("Control input '^A' not found" in str(e.exception))
def testInvalidTensorNameOutputIndexInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
self.assertEqual("Cannot convert 'A:B' to a tensor name.",
str(e.exception))
def testInvalidTensorNameInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
self.assertEqual("Cannot convert 'A:B:0' to a tensor name.",
str(e.exception))
def testMissingReturnOperation(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
self.assertTrue("return_element 'B' not found in graph_def." in
str(e.exception))
def testMissingReturnTensor(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:1"])
self.assertTrue("return_element 'A:1' not found in graph_def." in
str(e.exception))
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["B:0"])
self.assertTrue("return_element 'B:0' not found in graph_def." in
str(e.exception))
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:B:0"])
self.assertTrue("return_element 'A:B:0' not found in graph_def." in
str(e.exception))
def testMissingInputMap(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": tf.constant(5.0)})
self.assertTrue("not found in graph_def: [B:0]" in str(e.exception))
def testInputMapTypeMismatch(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A:0": tf.constant(5.0)})
self.assertTrue(
"Cannot convert a tensor of type float32 to an input of type int32."
in str(e.exception))
def testNoReturns(self):
with tf.Graph().as_default() as g:
ret = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with tf.Graph().as_default():
a, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"], name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with tf.Graph().as_default():
b, = tf.import_graph_def(original_graph_def,
return_elements=["B"], name="imported_graph")
self.assertProtoEqualsVersion("""
node { name: 'imported_graph/A' op: 'None' }
node { name: 'imported_graph/B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@imported_graph/A' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with tf.Graph().as_default():
b, = tf.import_graph_def(original_graph_def,
return_elements=["B"], name="")
_, = tf.import_graph_def(original_graph_def,
return_elements=["B"], name="")
self.assertProtoEqualsVersion("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'A_1' op: 'None' }
node { name: 'B_1' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A_1' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with tf.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "does not exist during import"):
tf.import_graph_def(original_graph_def,
return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with tf.Graph().as_default() as g:
init_version = g.version
tf.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def("")
self.assertEqual(
"graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def(self._MakeGraphDef(""),
input_map=[tf.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
with self.assertRaises(ValueError) as e:
tf.import_graph_def(self._MakeGraphDef(""),
input_map={"a:0": tf.constant(5.0)},
name="")
self.assertEqual("tf.import_graph_def() requires a non-empty `name` "
"if `input_map` is used.", str(e.exception))
def testInvalidInputForReturnOperations(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
self.assertEqual(
"return_elements must be a list of strings.", str(e.exception))
def testWithExtensionAndAttr(self):
with tf.Graph().as_default() as g:
c = tf.constant(5.0, dtype=tf.float32, name="c")
tf.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.test_session():
pack, = tf.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with tf.Graph().as_default() as g:
# No device.
a = tf.constant(3.0, name="a")
with tf.device("/cpu:0"):
b = tf.constant(4.0, name="b")
with tf.device("/job:worker"):
c = tf.constant(5.0, name="c")
gdef = g.as_graph_def()
with tf.Graph().as_default():
a2, b2, c2 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with tf.Graph().as_default():
with tf.device(device.merge_device("/task:0")):
a3, b3, c3 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with tf.Graph().as_default():
with tf.device(device.merge_device("/job:ps")):
a4, b4, c4 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with tf.Graph().as_default():
with tf.device(device.merge_device("/gpu:0")):
a5, b5, c5 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with tf.Graph().as_default() as g:
with tf.device("/job:ps"):
v = tf.Variable(1.0)
unused_assign_op = v.assign(2.0)
unused_assign_2_op = v.assign(3.0)
unused_add_t = v + v
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def input_counter(op):
if any(in_t.dtype._is_ref_dtype for in_t in op.inputs): # pylint: disable=protected-access
ops_with_two_inputs.append(op)
return ""
with tf.Graph().as_default() as g:
with tf.device(input_counter):
tf.import_graph_def(gdef)
# We expect to see the initializer, two assign operations, and the add op.
self.assertEqual(4, len(ops_with_two_inputs))
def testGradient(self):
with tf.Graph().as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None, 100], name="input")
weights = tf.placeholder(tf.float32, shape=[100, 10], name="weights")
biases = tf.placeholder(tf.float32, shape=[10], name="biases")
activations = tf.nn.relu(tf.matmul(inputs, weights) + biases,
name="activations")
loss = tf.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with tf.Graph().as_default() as g:
input_placeholder = tf.placeholder(tf.float32, shape=[32, 100])
weights_var = tf.Variable(tf.truncated_normal([100, 10]), name="weights")
biases_var = tf.Variable(tf.zeros([10]), name="biases")
activations, loss = tf.import_graph_def(
gdef,
input_map={"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = tf.gradients(loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = tf.constant(tensor_input, shape=input_shape)
g = tf.identity(t)
g.eval()
def testVersion(self):
v0 = tf.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = tf.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with tf.Graph().as_default():
a, = tf.import_graph_def(
self._MakeGraphDef("node { name: 'A' op: 'Oii' }",
producer=producer, min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with tf.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
tf.GRAPH_DEF_VERSION_MIN_PRODUCER)
tf.import_graph_def(self._MakeGraphDef("", producer=-1))
x = tf.constant(7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
with tf.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, tf.GRAPH_DEF_VERSION))
tf.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = tf.constant(7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testDefaultAttrsAdded(self):
with tf.Graph().as_default():
a = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with tf.Graph().as_default():
a = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"], producer_op_list=producer_op_list)
with self.assertRaisesRegexp(ValueError, "No attr named 'default_int'"):
a[0].get_attr("default_int")
# Attr only in producer_op_list with non-default value is preserved.
with tf.Graph().as_default():
a = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 987 } } }
"""),
return_elements=["A"], producer_op_list=producer_op_list)
self.assertEqual(987, a[0].get_attr("default_int"))
if __name__ == "__main__":
tf.test.main()
|
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object (blob) Storage benchmark tests.
There are two categories of tests here: 1) tests based on CLI tools, and 2)
tests that use APIs to access storage provider.
For 1), we aim to simulate one typical use case of common user using storage
provider: upload and downloads a set of files with different sizes from/to a
local directory.
For 2), we aim to measure more directly the performance of a storage provider
by accessing them via APIs. Here are the main scenarios covered in this
category:
a: Single byte object upload and download, measures latency.
b: List-after-write and list-after-update consistency measurement.
c: Single stream large object upload and download, measures throughput.
Documentation: https://goto.google.com/perfkitbenchmarker-storage
"""
import json
import logging
import os
import posixpath
import re
import threading
import time
import numpy as np
from perfkitbenchmarker import providers
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.sample import PercentileCalculator # noqa
flags.DEFINE_enum('storage', providers.GCP,
[providers.GCP, providers.AWS,
providers.AZURE, providers.OPENSTACK],
'storage provider (GCP/AZURE/AWS/OPENSTACK) to use.')
flags.DEFINE_string('object_storage_region', None,
'Storage region for object storage benchmark.')
flags.DEFINE_string('object_storage_gcs_multiregion', None,
'Storage multiregion for GCS in object storage benchmark.')
flags.DEFINE_string('object_storage_storage_class', None,
'Storage class to use in object storage benchmark.')
flags.DEFINE_enum('object_storage_scenario', 'all',
['all', 'cli', 'api_data', 'api_namespace',
'api_multistream'],
'select all, or one particular scenario to run: \n'
'ALL: runs all scenarios. This is the default. \n'
'cli: runs the command line only scenario. \n'
'api_data: runs API based benchmarking for data paths. \n'
'api_namespace: runs API based benchmarking for namespace '
'operations. \n'
'api_multistream: runs API-based benchmarking with multiple '
'upload/download streams.')
flags.DEFINE_enum('cli_test_size', 'normal',
['normal', 'large'],
'size of the cli tests. Normal means a mixture of various \n'
'object sizes up to 32MiB (see '
'data/cloud-storage-workload.sh). \n'
'Large means all objects are of at least 1GiB.')
flags.DEFINE_integer('object_storage_multistream_objects_per_stream', 1000,
'Number of objects to send and/or receive per stream. '
'Only applies to the api_multistream scenario.',
lower_bound=1)
flag_util.DEFINE_yaml('object_storage_object_sizes', '1KB',
'Size of objects to send and/or receive. Only applies to '
'the api_multistream scenario. Examples: 1KB, '
'{1KB: 50%, 10KB: 50%}')
flags.DEFINE_integer('object_storage_streams_per_vm', 10,
'Number of independent streams per VM. Only applies to '
'the api_multistream scenario.',
lower_bound=1)
flags.DEFINE_integer('object_storage_list_consistency_iterations', 200,
'Number of iterations to perform for the api_namespace '
'list consistency benchmark. This flag is mainly for '
'regression testing in the benchmarks. Reduce the number '
'to shorten the execution time of the api_namespace '
'scenario. However, to get useful metrics from the '
'api_namespace scenario, a high number of iterations '
'should be used (>=200).')
flags.DEFINE_enum('object_storage_object_naming_scheme', 'sequential_by_stream',
['sequential_by_stream',
'approximately_sequential'],
'How objects will be named. Only applies to the '
'api_multistream benchmark. '
'sequential_by_stream: object names from each stream '
'will be sequential, but different streams will have '
'different name prefixes. '
'approximately_sequential: object names from all '
'streams will roughly increase together.')
FLAGS = flags.FLAGS
# User a scratch disk here to simulate what most users would do when they
# use CLI tools to interact with the storage provider.
BENCHMARK_INFO = {'name': 'object_storage_service',
'description':
'Object/blob storage service benchmarks. Specify '
'--object_storage_scenario '
'to select a set of sub-benchmarks to run. default is all.',
'scratch_disk': True,
'num_machines': 1}
BENCHMARK_NAME = 'object_storage_service'
BENCHMARK_CONFIG = """
object_storage_service:
description: >
Object/blob storage service benchmarks. Specify
--object_storage_scenario
to select a set of sub-benchmarks to run. default is all.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: null
"""
DATA_FILE = 'cloud-storage-workload.sh'
# size of all data used in the CLI tests.
DATA_SIZE_IN_BYTES = 256.1 * 1024 * 1024
DATA_SIZE_IN_MBITS = 8 * DATA_SIZE_IN_BYTES / 1000 / 1000
LARGE_DATA_SIZE_IN_BYTES = 3 * 1024 * 1024 * 1024
LARGE_DATA_SIZE_IN_MBITS = 8 * LARGE_DATA_SIZE_IN_BYTES / 1000 / 1000
API_TEST_SCRIPT = 'object_storage_api_tests.py'
API_TEST_SCRIPTS_DIR = 'object_storage_api_test_scripts'
# Files that will be sent to the remote VM for API tests.
API_TEST_SCRIPT_FILES = ['object_storage_api_tests.py',
'object_storage_interface.py',
'azure_flags.py',
's3_flags.py']
# Various constants to name the result metrics.
THROUGHPUT_UNIT = 'Mbps'
LATENCY_UNIT = 'seconds'
NA_UNIT = 'na'
PERCENTILES_LIST = ['p0.1', 'p1', 'p5', 'p10', 'p50', 'p90', 'p95', 'p99',
'p99.9', 'average', 'stddev']
UPLOAD_THROUGHPUT_VIA_CLI = 'upload throughput via cli Mbps'
DOWNLOAD_THROUGHPUT_VIA_CLI = 'download throughput via cli Mbps'
CLI_TEST_ITERATION_COUNT = 100
LARGE_CLI_TEST_ITERATION_COUNT = 20
CLI_TEST_FAILURE_TOLERANCE = 0.05
# Azure does not parallelize operations in its CLI tools. We have to
# do the uploads or downloads of 100 test files sequentially, it takes
# a very long time for each iteration, so we are doing only 3 iterations.
CLI_TEST_ITERATION_COUNT_AZURE = 3
SINGLE_STREAM_THROUGHPUT = 'single stream %s throughput Mbps'
ONE_BYTE_LATENCY = 'one byte %s latency'
LIST_CONSISTENCY_SCENARIOS = ['list-after-write', 'list-after-update']
LIST_CONSISTENCY_PERCENTAGE = 'consistency percentage'
LIST_INCONSISTENCY_WINDOW = 'inconsistency window'
LIST_LATENCY = 'latency'
CONTENT_REMOVAL_RETRY_LIMIT = 5
# Some times even when a bucket is completely empty, the service provider would
# refuse to remove the bucket with "BucketNotEmpty" error until up to 1 hour
# later. We keep trying until we reach the one-hour limit. And this wait is
# necessary for some providers.
BUCKET_REMOVAL_RETRY_LIMIT = 120
RETRY_WAIT_INTERVAL_SECONDS = 30
# GCS has special region handling until we can remove it :(
DEFAULT_GCS_MULTIREGION = 'us'
# Keys for flag names and metadata values
OBJECT_STORAGE_REGION = 'object_storage_region'
REGIONAL_BUCKET_LOCATION = 'regional_bucket_location'
OBJECT_STORAGE_GCS_MULTIREGION = 'object_storage_gcs_multiregion'
GCS_MULTIREGION_LOCATION = 'gcs_multiregion_location'
DEFAULT = 'default'
# This accounts for the overhead of running RemoteCommand() on a VM.
MULTISTREAM_DELAY_PER_VM = 5.0 * units.second
# We wait this long for each stream. Note that this is multiplied by
# the number of streams per VM, not the total number of streams.
MULTISTREAM_DELAY_PER_STREAM = 0.1 * units.second
# And add a constant factor for PKB-side processing
MULTISTREAM_DELAY_CONSTANT = 10.0 * units.second
# The multistream write benchmark writes a file in the VM's /tmp with
# the objects it has written, which is used by the multistream read
# benchmark. This is the filename.
OBJECTS_WRITTEN_FILE = 'pkb-objects-written'
# If the gap between different stream starts and ends is above a
# certain proportion of the total time, we log a warning because we
# are throwing out a lot of information. We also put the warning in
# the sample metadata.
MULTISTREAM_STREAM_GAP_THRESHOLD = 0.2
# The API test script uses different names for providers than this
# script :(
STORAGE_TO_API_SCRIPT_DICT = {
providers.GCP: 'GCS',
providers.AWS: 'S3',
providers.AZURE: 'AZURE'}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
# Raised when we fail to remove a bucket or its content after many retries.
# TODO: add a new class of error "ObjectStorageError" to errors.py and remove
# this one.
class BucketRemovalError(Exception):
pass
class NotEnoughResultsError(Exception):
pass
def _JsonStringToPercentileResults(results, json_input, metric_name,
metric_unit, metadata):
"""This function parses a percentile result string in Json format.
Args:
results: The final result set to put result in.
json_input: The input in Json format about percentiles.
metric_name: Name of the metric.
metric_unit: Unit of the metric.
metadata: The metadata to be included.
"""
result = json.loads(json_input)
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (metric_name, percentile),
float(result[percentile]),
metric_unit,
metadata))
def _GetClientLibVersion(vm, library_name):
""" This function returns the version of client lib installed on a vm.
Args:
vm: the VM to get the client lib version from.
library_name: the name of the client lib.
Returns:
The version string of the client.
"""
version, _ = vm.RemoteCommand('pip show %s |grep Version' % library_name)
logging.info('%s client lib version is: %s', library_name, version)
return version
def MultiThreadStartDelay(num_vms, threads_per_vm):
"""Find how long in the future we can simultaneously start threads on VMs.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
A units.Quantity of time such that if we want to start
threads_per_vm threads on num_vms VMs, we can start the threads
sequentially, tell each of them to sleep for this number of
seconds, and we expect that we will be able to start the last
thread before the delay has finished.
"""
return (
MULTISTREAM_DELAY_CONSTANT +
MULTISTREAM_DELAY_PER_VM * num_vms +
MULTISTREAM_DELAY_PER_STREAM * threads_per_vm)
def _ProcessMultiStreamResults(start_times, latencies, sizes, operation,
all_sizes, results, metadata=None):
"""Read and process results from the api_multistream worker process.
Results will be reported per-object size and combined for all
objects.
Args:
start_times: a list of numpy arrays. Operation start times, as
POSIX timestamps.
latencies: a list of numpy arrays. Operation durations, in seconds.
sizes: a list of numpy arrays. Object sizes used in each
operation, in bytes.
operation: 'upload' or 'download'. The operation the results are from.
all_sizes: a sequence of integers. all object sizes in the
distribution used, in bytes.
results: a list to append Sample objects to.
metadata: dict. Base sample metadata
"""
num_streams = FLAGS.object_storage_streams_per_vm * FLAGS.num_vms
assert len(start_times) == num_streams
assert len(latencies) == num_streams
assert len(sizes) == num_streams
if metadata is None:
metadata = {}
metadata['num_streams'] = num_streams
metadata['objects_per_stream'] = (
FLAGS.object_storage_multistream_objects_per_stream)
num_records = sum((len(start_time) for start_time in start_times))
logging.info('Processing %s total operation records', num_records)
stop_times = [start_time + latency
for start_time, latency in zip(start_times, latencies)]
last_start_time = max((start_time[0] for start_time in start_times))
first_stop_time = min((stop_time[-1] for stop_time in stop_times))
# Compute how well our synchronization worked
first_start_time = min((start_time[0] for start_time in start_times))
last_stop_time = max((stop_time[-1] for stop_time in stop_times))
start_gap = last_start_time - first_start_time
stop_gap = last_stop_time - first_stop_time
if ((start_gap + stop_gap) / (last_stop_time - first_start_time) <
MULTISTREAM_STREAM_GAP_THRESHOLD):
logging.info(
'First stream started %s seconds before last stream started', start_gap)
logging.info(
'Last stream ended %s seconds after first stream ended', stop_gap)
else:
logging.warning(
'Difference between first and last stream start/end times was %s and '
'%s, which is more than %s of the benchmark time %s.',
start_gap, stop_gap, MULTISTREAM_STREAM_GAP_THRESHOLD,
(last_stop_time - first_start_time))
metadata['stream_gap_above_threshold'] = True
# Find the indexes in each stream where all streams are active,
# following Python's [inclusive, exclusive) index convention.
active_start_indexes = []
for start_time in start_times:
for i in xrange(len(start_time)):
if start_time[i] >= last_start_time:
active_start_indexes.append(i)
break
active_stop_indexes = []
for stop_time in stop_times:
for i in xrange(len(stop_time) - 1, -1, -1):
if stop_time[i] <= first_stop_time:
active_stop_indexes.append(i + 1)
break
active_latencies = [
latencies[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in xrange(num_streams)]
active_sizes = [
sizes[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in xrange(num_streams)]
all_active_latencies = np.concatenate(active_latencies)
all_active_sizes = np.concatenate(active_sizes)
# Don't publish the full distribution in the metadata because doing
# so might break regexp-based parsers that assume that all metadata
# values are simple Python objects. However, do add an
# 'object_size_B' metadata field even for the full results because
# searching metadata is easier when all records with the same metric
# name have the same set of metadata fields.
distribution_metadata = metadata.copy()
distribution_metadata['object_size_B'] = 'distribution'
latency_prefix = 'Multi-stream %s latency' % operation
logging.info('Processing %s multi-stream %s results for the full '
'distribution.', len(all_active_latencies), operation)
_AppendPercentilesToResults(
results,
all_active_latencies,
latency_prefix,
LATENCY_UNIT,
distribution_metadata)
# Publish by-size and full-distribution stats even if there's only
# one size in the distribution, because it simplifies postprocessing
# of results.
for size in all_sizes:
this_size_metadata = metadata.copy()
this_size_metadata['object_size_B'] = size
logging.info('Processing multi-stream %s results for object size %s',
operation, size)
_AppendPercentilesToResults(
results,
all_active_latencies[all_active_sizes == size],
latency_prefix,
LATENCY_UNIT,
this_size_metadata)
# Throughput metrics
total_active_times = [np.sum(latency) for latency in active_latencies]
active_durations = [stop_times[i][active_stop_indexes[i] - 1] -
start_times[i][active_start_indexes[i]]
for i in xrange(num_streams)]
total_active_sizes = [np.sum(size) for size in active_sizes]
# 'net throughput (with gap)' is computed by taking the throughput
# for each stream (total # of bytes transmitted / (stop_time -
# start_time)) and then adding the per-stream throughputs. 'net
# throughput' is the same, but replacing (stop_time - start_time)
# with the sum of all of the operation latencies for that thread, so
# we only divide by the time that stream was actually transmitting.
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput',
np.sum((size / active_time * 8
for size, active_time
in zip(total_active_sizes, total_active_times))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (with gap)',
np.sum((size / duration * 8
for size, duration in zip(total_active_sizes, active_durations))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (simplified)',
sum([np.sum(size) for size in sizes]) /
(last_stop_time - first_start_time) * 8,
'bit / second', metadata=distribution_metadata))
# QPS metrics
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (any stream active)',
num_records / (last_stop_time - first_start_time), 'operation / second',
metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (all streams active)',
len(all_active_latencies) / (first_stop_time - last_start_time),
'operation / second', metadata=distribution_metadata))
# Statistics about benchmarking overhead
gap_time = sum((active_duration - active_time
for active_duration, active_time
in zip(active_durations, total_active_times)))
results.append(sample.Sample(
'Multi-stream ' + operation + ' total gap time',
gap_time, 'second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' gap time proportion',
gap_time / (first_stop_time - last_start_time) * 100.0,
'percent', metadata=distribution_metadata))
def _DistributionToBackendFormat(dist):
"""Convert an object size distribution to the format needed by the backend.
Args:
dist: a distribution, given as a dictionary mapping size to
frequency. Size will be a string with a quantity and a
unit. Frequency will be a percentage, including a '%'
character. dist may also be a string, in which case it represents
a single object size which applies to 100% of objects.
Returns:
A dictionary giving an object size distribution. Sizes will be
integers representing bytes. Frequencies will be floating-point
numbers in [0,100], representing percentages.
Raises:
ValueError if dist is not a valid distribution.
"""
if isinstance(dist, dict):
val = {flag_util.StringToBytes(size):
flag_util.StringToRawPercent(frequency)
for size, frequency in dist.iteritems()}
else:
# We allow compact notation for point distributions. For instance,
# '1KB' is an abbreviation for '{1KB: 100%}'.
val = {flag_util.StringToBytes(dist): 100.0}
# I'm requiring exact addition to 100, which can always be satisfied
# with integer percentages. If we want to allow general decimal
# percentages, all we have to do is replace this equality check with
# approximate equality.
if sum(val.itervalues()) != 100.0:
raise ValueError("Frequencies in %s don't add to 100%%!" % dist)
return val
class APIScriptCommandBuilder(object):
"""Builds command lines for the API test script.
Attributes:
test_script_path: the path to the API test script on the remote machine.
storage: the storage provider to use, in the format expected by
the test script.
service: the ObjectStorageService object corresponding to the
storage provider.
"""
def __init__(self, test_script_path, storage, service):
self.test_script_path = test_script_path
self.storage = storage
self.service = service
def BuildCommand(self, args):
"""Build a command string for the API test script.
Args:
args: a list of strings. These will become space-separated
arguments to the test script.
Returns:
A string that can be passed to vm.RemoteCommand.
"""
cmd_parts = [
self.test_script_path,
'--storage_provider=%s' % self.storage
] + args + self.service.APIScriptArgs()
if FLAGS.object_storage_storage_class is not None:
cmd_parts += ['--object_storage_class',
FLAGS.object_storage_storage_class]
return ' '.join(cmd_parts)
class UnsupportedProviderCommandBuilder(APIScriptCommandBuilder):
"""A dummy command builder for unsupported providers.
When a provider isn't supported by the API test script yet, we
create this command builder for them. It will let us run the CLI
benchmark on that provider, but if the user tries to run an API
benchmark, it will throw an error.
Attributes:
provider: the name of the unsupported provider.
"""
def __init__(self, provider):
self.provider = provider
def BuildCommand(self, args):
raise NotImplementedError('API tests are not supported on provider %s.' %
self.provider)
def OneByteRWBenchmark(results, metadata, vm, command_builder,
service, bucket_name, regional_bucket_name):
"""A benchmark for small object latency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
regional_bucket_name: the secondary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
buckets = [bucket_name]
if regional_bucket_name is not None:
buckets.append(regional_bucket_name)
for bucket in buckets:
one_byte_rw_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket,
'--scenario=OneByteRW'])
_, raw_result = vm.RemoteCommand(one_byte_rw_cmd)
logging.info('OneByteRW raw result is %s', raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'One byte %s - (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = ONE_BYTE_LATENCY % up_and_down
if bucket == regional_bucket_name:
sample_name = 'regional %s' % sample_name
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
sample_name,
LATENCY_UNIT,
metadata)
else:
raise ValueError('Unexpected test outcome from OneByteRW api test: '
'%s.' % raw_result)
def SingleStreamThroughputBenchmark(results, metadata, vm, command_builder,
service, bucket_name, regional_bucket_name):
"""A benchmark for large object throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
regional_bucket_name: the secondary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
single_stream_throughput_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=SingleStreamThroughput'])
_, raw_result = vm.RemoteCommand(single_stream_throughput_cmd)
logging.info('SingleStreamThroughput raw result is %s', raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'Single stream %s throughput in Bps: (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = SINGLE_STREAM_THROUGHPUT % up_and_down
if not result_string:
raise ValueError('Unexpected test outcome from '
'SingleStreamThroughput api test: %s.' % raw_result)
# Convert Bytes per second to Mega bits per second
# We use MB (10^6) to be consistent with network
# bandwidth convention.
result = json.loads(result_string[0])
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (sample_name, percentile),
8 * float(result[percentile]) / 1000 / 1000,
THROUGHPUT_UNIT,
metadata))
def ListConsistencyBenchmark(results, metadata, vm, command_builder,
service, bucket_name, regional_bucket_name):
"""A benchmark for bucket list consistency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
regional_bucket_name: the secondary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
list_consistency_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--iterations=%d' % FLAGS.object_storage_list_consistency_iterations,
'--scenario=ListConsistency'])
_, raw_result = vm.RemoteCommand(list_consistency_cmd)
logging.info('ListConsistency raw result is %s', raw_result)
for scenario in LIST_CONSISTENCY_SCENARIOS:
metric_name = '%s %s' % (scenario, LIST_CONSISTENCY_PERCENTAGE)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if not result_string:
raise ValueError(
'Cannot get percentage from ListConsistency test.')
results.append(sample.Sample(
metric_name,
(float)(result_string[0]),
NA_UNIT,
metadata))
# Parse the list inconsistency window if there is any.
metric_name = '%s %s' % (scenario, LIST_INCONSISTENCY_WINDOW)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
# Also report the list latency. These latencies are from the lists
# that were consistent.
metric_name = '%s %s' % (scenario, LIST_LATENCY)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
def LoadWorkerOutput(output):
"""Load output from worker processes to our internal format.
Args:
output: list of strings. The stdouts of all worker processes.
Returns:
A tuple of start_time, latency, size. Each of these is a list of
numpy arrays, one array per worker process. start_time[i],
latency[i], and size[i] together form a table giving the start
time, latency, and size (bytes transmitted or received) of all
send/receive operations for worker i.
start_time holds POSIX timestamps, stored as np.float64. latency
holds times in seconds, stored as np.float64. size holds sizes in
bytes, stored as np.int64.
Example:
start_time[i] latency[i] size[i]
------------- ---------- -------
0.0 0.5 100
1.0 0.7 200
2.3 0.3 100
Raises:
AssertionError, if an individual worker's input includes
overlapping operations, or operations that don't move forward in
time, or if the input list isn't in stream number order.
"""
start_times = []
latencies = []
sizes = []
prev_stream_num = None
for worker_out in output:
json_out = json.loads(worker_out)
num_records = len(json_out)
assert (not prev_stream_num or
json_out[0]['stream_num'] == prev_stream_num + 1)
prev_stream_num = prev_stream_num + 1 if prev_stream_num else 0
start_time = np.zeros([num_records], dtype=np.float64)
latency = np.zeros([num_records], dtype=np.float64)
size = np.zeros([num_records], dtype=np.int64)
prev_start = None
prev_latency = None
for i in xrange(num_records):
start_time[i] = json_out[i]['start_time']
latency[i] = json_out[i]['latency']
size[i] = json_out[i]['size']
assert i == 0 or start_time[i] >= (prev_start + prev_latency)
prev_start = start_time[i]
prev_latency = latency[i]
start_times.append(start_time)
latencies.append(latency)
sizes.append(size)
return start_times, latencies, sizes
def MultiStreamRWBenchmark(results, metadata, vms, command_builder,
service, bucket_name, regional_bucket_name):
"""A benchmark for multi-stream latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
regional_bucket_name: the secondary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
size_distribution = _DistributionToBackendFormat(
FLAGS.object_storage_object_sizes)
logging.info('Distribution %s, backend format %s.',
FLAGS.object_storage_object_sizes, size_distribution)
streams_per_vm = FLAGS.object_storage_streams_per_vm
num_streams = streams_per_vm * len(vms)
def StartMultiStreamProcess(cmd_args, proc_idx, out_array):
vm_idx = proc_idx // streams_per_vm
logging.info('Running on VM %s.', vm_idx)
cmd = command_builder.BuildCommand(
cmd_args + ['--stream_num_start=%s' % proc_idx])
out, _ = vms[vm_idx].RobustRemoteCommand(cmd, should_log=True)
out_array[proc_idx] = out
def RunMultiStreamProcesses(command):
output = [None] * num_streams
# Each process has a thread managing it.
threads = [
threading.Thread(target=StartMultiStreamProcess,
args=(command, i, output))
for i in xrange(num_streams)]
for thread in threads:
thread.start()
logging.info('Started %s processes.', num_streams)
for thread in threads:
thread.join()
logging.info('All processes complete.')
return output
write_start_time = (
time.time() +
MultiThreadStartDelay(FLAGS.num_vms, streams_per_vm).m_as('second'))
logging.info('Write start time is %s', write_start_time)
multi_stream_write_args = [
'--bucket=%s' % bucket_name,
'--objects_per_stream=%s' % (
FLAGS.object_storage_multistream_objects_per_stream),
'--object_sizes="%s"' % size_distribution,
'--num_streams=1',
'--start_time=%s' % write_start_time,
'--object_naming_scheme=%s' % FLAGS.object_storage_object_naming_scheme,
'--objects_written_file=%s' % objects_written_file,
'--scenario=MultiStreamWrite']
write_out = RunMultiStreamProcesses(multi_stream_write_args)
start_times, latencies, sizes = LoadWorkerOutput(write_out)
_ProcessMultiStreamResults(start_times, latencies, sizes, 'upload',
size_distribution.iterkeys(), results,
metadata=metadata)
logging.info('Finished multi-stream write test. Starting multi-stream '
'read test.')
read_start_time = (
time.time() +
MultiThreadStartDelay(FLAGS.num_vms, streams_per_vm).m_as('second'))
logging.info('Read start time is %s', read_start_time)
multi_stream_read_args = [
'--bucket=%s' % bucket_name,
'--objects_per_stream=%s' % (
FLAGS.object_storage_multistream_objects_per_stream),
'--num_streams=1',
'--start_time=%s' % read_start_time,
'--objects_written_file=%s' % objects_written_file,
'--scenario=MultiStreamRead']
try:
read_out = RunMultiStreamProcesses(multi_stream_read_args)
start_times, latencies, sizes = LoadWorkerOutput(read_out)
_ProcessMultiStreamResults(start_times, latencies, sizes, 'download',
size_distribution.iterkeys(), results,
metadata=metadata)
except Exception as ex:
logging.info('MultiStreamRead test failed with exception %s. Still '
'recording write data.', ex.msg)
logging.info('Finished multi-stream read test.')
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(DATA_FILE)
def _AppendPercentilesToResults(output_results, input_results, metric_name,
metric_unit, metadata):
# PercentileCalculator will (correctly) raise an exception on empty
# input, but an empty input list makes semantic sense here.
if len(input_results) == 0:
return
percentiles = PercentileCalculator(input_results)
for percentile in PERCENTILES_LIST:
output_results.append(sample.Sample(('%s %s') % (metric_name, percentile),
percentiles[percentile],
metric_unit,
metadata))
def CLIThroughputBenchmark(output_results, metadata, vm, command_builder,
service, bucket, regional_bucket):
"""A benchmark for CLI tool throughput.
We will upload and download a set of files from/to a local directory
via cli tools and observe the throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
regional_bucket_name: the secondary bucket to benchmark.
Raises:
NotEnoughResultsError: if we failed too many times to upload or download.
"""
data_directory = '%s/run/data' % vm.GetScratchDir()
download_directory = '%s/run/temp' % vm.GetScratchDir()
# The real solution to the iteration count issue is dynamically
# choosing the number of iterations based on how long they
# take. This will work for now, though.
if FLAGS.storage == providers.AZURE:
iteration_count = CLI_TEST_ITERATION_COUNT_AZURE
elif FLAGS.cli_test_size == 'normal':
iteration_count = CLI_TEST_ITERATION_COUNT
else:
iteration_count = LARGE_CLI_TEST_ITERATION_COUNT
# The CLI-based tests require some provisioning on the VM first.
vm.RemoteCommand(
'cd %s/run/; bash cloud-storage-workload.sh %s' % (vm.GetScratchDir(),
FLAGS.cli_test_size))
# CLI tool based tests.
cli_upload_results = []
cli_download_results = []
if FLAGS.cli_test_size == 'normal':
data_size_in_mbits = DATA_SIZE_IN_MBITS
file_names = ['file-%s.dat' % i for i in range(100)]
else:
data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS
file_names = ['file_large_3gib.dat']
for _ in range(iteration_count):
try:
service.EmptyBucket(bucket)
except Exception:
pass
try:
_, res = service.CLIUploadDirectory(vm, data_directory,
file_names, bucket)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to upload, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli upload throughput %f', throughput)
cli_upload_results.append(throughput)
try:
vm.RemoveFile(posixpath.join(download_directory, '*'))
except Exception:
pass
try:
_, res = service.CLIDownloadBucket(vm, bucket,
file_names, download_directory)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to download, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli download throughput %f', throughput)
cli_download_results.append(throughput)
expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)
if (len(cli_download_results) < expected_successes or
len(cli_upload_results) < expected_successes):
raise NotEnoughResultsError('Failed to complete the required number of '
'iterations.')
# Report various percentiles.
metrics_prefix = ''
if FLAGS.cli_test_size != 'normal':
metrics_prefix = '%s ' % FLAGS.cli_test_size
_AppendPercentilesToResults(output_results,
cli_upload_results,
'%s%s' % (metrics_prefix,
UPLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
_AppendPercentilesToResults(output_results,
cli_download_results,
'%s%s' % (metrics_prefix,
DOWNLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
def PrepareVM(vm, service):
vm.Install('pip')
vm.RemoteCommand('sudo pip install python-gflags==2.0')
vm.RemoteCommand('sudo pip install pyyaml')
vm.Install('openssl')
# Prepare data on vm, create a run directory on scratch drive, and add
# permission.
scratch_dir = vm.GetScratchDir()
vm.RemoteCommand('sudo mkdir -p %s/run/' % scratch_dir)
vm.RemoteCommand('sudo chmod 777 %s/run/' % scratch_dir)
vm.RemoteCommand('sudo mkdir -p %s/run/temp/' % scratch_dir)
vm.RemoteCommand('sudo chmod 777 %s/run/temp/' % scratch_dir)
file_path = data.ResourcePath(DATA_FILE)
vm.PushFile(file_path, '%s/run/' % scratch_dir)
for file_name in API_TEST_SCRIPT_FILES + service.APIScriptFiles():
path = data.ResourcePath(os.path.join(API_TEST_SCRIPTS_DIR, file_name))
logging.info('Uploading %s to %s', path, vm)
vm.PushFile(path, '%s/run/' % scratch_dir)
def CleanupVM(vm):
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall python-gflags')
vm.RemoteCommand('rm -rf %s/run/' % vm.GetScratchDir())
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
vm.RemoteCommand('rm -f %s' % objects_written_file)
def Prepare(benchmark_spec):
"""Prepare vm with cloud provider tool and prepare vm with data file.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
providers.LoadProvider(FLAGS.storage)
service = object_storage_service.GetObjectStorageClass(FLAGS.storage)()
service.PrepareService(FLAGS.object_storage_region)
vms = benchmark_spec.vms
for vm in vms:
PrepareVM(vm, service)
service.PrepareVM(vm)
# We would like to always cleanup server side states when exception happens.
benchmark_spec.always_call_cleanup = True
# Make the bucket(s)
bucket_name = 'pkb%s' % FLAGS.run_uri
if FLAGS.storage != 'GCP':
service.MakeBucket(bucket_name)
buckets = [bucket_name]
else:
# TODO(nlavine): make GCP bucket name handling match other
# providers. Leaving it inconsistent for now to match previous
# behavior, but should change it after a reasonable deprecation
# period.
multiregional_service = gcs.GoogleCloudStorageService()
multiregional_service.PrepareService(FLAGS.object_storage_gcs_multiregion
or DEFAULT_GCS_MULTIREGION)
multiregional_service.MakeBucket(bucket_name)
region = FLAGS.object_storage_region or gcs.DEFAULT_GCP_REGION
regional_bucket_name = 'pkb%s-%s' % (FLAGS.run_uri, region)
regional_service = gcs.GoogleCloudStorageService()
regional_service.PrepareService(region)
regional_service.MakeBucket(regional_bucket_name)
buckets = [bucket_name, regional_bucket_name]
# Save the service and the buckets for later
benchmark_spec.service = service
benchmark_spec.buckets = buckets
def Run(benchmark_spec):
"""Run storage benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
logging.info('Start benchmarking object storage service, '
'scenario is %s, storage provider is %s.',
FLAGS.object_storage_scenario, FLAGS.storage)
service = benchmark_spec.service
buckets = benchmark_spec.buckets
metadata = {'storage provider': FLAGS.storage}
vms = benchmark_spec.vms
if FLAGS[OBJECT_STORAGE_REGION].present:
metadata[REGIONAL_BUCKET_LOCATION] = FLAGS.object_storage_region
else:
metadata[REGIONAL_BUCKET_LOCATION] = DEFAULT
if FLAGS[OBJECT_STORAGE_GCS_MULTIREGION].present:
metadata[GCS_MULTIREGION_LOCATION] = FLAGS.object_storage_gcs_multiregion
else:
metadata[GCS_MULTIREGION_LOCATION] = DEFAULT
metadata.update(service.Metadata(vms[0]))
results = []
test_script_path = '%s/run/%s' % (vms[0].GetScratchDir(), API_TEST_SCRIPT)
try:
command_builder = APIScriptCommandBuilder(
test_script_path, STORAGE_TO_API_SCRIPT_DICT[FLAGS.storage], service)
except KeyError:
command_builder = UnsupportedProviderCommandBuilder(FLAGS.storage)
regional_bucket_name = buckets[1] if len(buckets) == 2 else None
for name, benchmark in [('cli', CLIThroughputBenchmark),
('api_data', OneByteRWBenchmark),
('api_data', SingleStreamThroughputBenchmark),
('api_namespace', ListConsistencyBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms[0], command_builder,
service, buckets[0], regional_bucket_name)
# MultiStreamRW is the only benchmark that supports multiple VMs, so
# it has a slightly different calling convention than the others.
if FLAGS.object_storage_scenario in {'api_multistream', 'all'}:
MultiStreamRWBenchmark(results, metadata, vms, command_builder,
service, buckets[0], regional_bucket_name)
return results
def Cleanup(benchmark_spec):
"""Clean up storage bucket/container and clean up vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
service = benchmark_spec.service
buckets = benchmark_spec.buckets
vms = benchmark_spec.vms
for vm in vms:
service.CleanupVM(vm)
CleanupVM(vm)
for bucket in buckets:
service.DeleteBucket(bucket)
service.CleanupService()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, RestException, filtermodel, setResponseHeader, setContentDisposition
from girder.utility import ziputil
from girder.constants import AccessType, TokenScope
from girder.api import access
class Item(Resource):
def __init__(self):
super(Item, self).__init__()
self.resourceName = 'item'
self.route('DELETE', (':id',), self.deleteItem)
self.route('GET', (), self.find)
self.route('GET', (':id',), self.getItem)
self.route('GET', (':id', 'files'), self.getFiles)
self.route('GET', (':id', 'download'), self.download)
self.route('GET', (':id', 'rootpath'), self.rootpath)
self.route('POST', (), self.createItem)
self.route('PUT', (':id',), self.updateItem)
self.route('POST', (':id', 'copy'), self.copyItem)
self.route('PUT', (':id', 'metadata'), self.setMetadata)
self.route('DELETE', (':id', 'metadata'), self.deleteMetadata)
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model='item')
@autoDescribeRoute(
Description('List or search for items.')
.responseClass('Item', array=True)
.param('folderId', 'Pass this to list all items in a folder.',
required=False)
.param('text', 'Pass this to perform a full text search for items.',
required=False)
.param('name', 'Pass to lookup an item by exact name match. Must '
'pass folderId as well when using this.', required=False)
.pagingParams(defaultSort='lowerName')
.errorResponse()
.errorResponse('Read access was denied on the parent folder.', 403)
)
def find(self, folderId, text, name, limit, offset, sort):
"""
Get a list of items with given search parameters. Currently accepted
search modes are:
1. Searching by folderId, with optional additional filtering by the name
field (exact match) or using full text search within a single parent
folder. Pass a "name" parameter or "text" parameter to invoke these
additional filters.
2. Searching with full text search across all items in the system.
Simply pass a "text" parameter for this mode.
"""
user = self.getCurrentUser()
if folderId:
folder = self.model('folder').load(
id=folderId, user=user, level=AccessType.READ, exc=True)
filters = {}
if text:
filters['$text'] = {
'$search': text
}
if name:
filters['name'] = name
return list(self.model('folder').childItems(
folder=folder, limit=limit, offset=offset, sort=sort, filters=filters))
elif text is not None:
return list(self.model('item').textSearch(
text, user=user, limit=limit, offset=offset, sort=sort))
else:
raise RestException('Invalid search mode.')
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model='item')
@autoDescribeRoute(
Description('Get an item by ID.')
.responseClass('Item')
.modelParam('id', model='item', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403)
)
def getItem(self, item):
return item
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model='item')
@autoDescribeRoute(
Description('Create a new item.')
.responseClass('Item')
.modelParam('folderId', 'The ID of the parent folder.',
level=AccessType.WRITE, paramType='query')
.param('name', 'Name for the item.', strip=True)
.param('description', 'Description for the item.', required=False,
default='', strip=True)
.param('reuseExisting', 'Return existing item (by name) if it exists.',
required=False, dataType='boolean', default=False)
.jsonParam('metadata', 'A JSON object containing the metadata keys to add',
paramType='form', requireObject=True, required=False)
.errorResponse()
.errorResponse('Write access was denied on the parent folder.', 403)
)
def createItem(self, folder, name, description, reuseExisting, metadata):
newItem = self.model('item').createItem(
folder=folder, name=name, creator=self.getCurrentUser(), description=description,
reuseExisting=reuseExisting)
if metadata:
newItem = self.model('item').setMetadata(newItem, metadata)
return newItem
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model='item')
@autoDescribeRoute(
Description('Edit an item or move it to another folder.')
.responseClass('Item')
.modelParam('id', model='item', level=AccessType.WRITE)
.param('name', 'Name for the item.', required=False, strip=True)
.param('description', 'Description for the item.', required=False)
.modelParam('folderId', 'Pass this to move the item to a new folder.',
required=False, paramType='query', level=AccessType.WRITE)
.jsonParam('metadata', 'A JSON object containing the metadata keys to add',
paramType='form', requireObject=True, required=False)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the item or folder.', 403)
)
def updateItem(self, item, name, description, folder, metadata):
if name is not None:
item['name'] = name
if description is not None:
item['description'] = description
self.model('item').updateItem(item)
if folder and folder['_id'] != item['folderId']:
self.model('item').move(item, folder)
if metadata:
item = self.model('item').setMetadata(item, metadata)
return item
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model='item')
@autoDescribeRoute(
Description('Set metadata fields on an item.')
.responseClass('Item')
.notes('Set metadata fields to null in order to delete them.')
.modelParam('id', model='item', level=AccessType.WRITE)
.jsonParam('metadata', 'A JSON object containing the metadata keys to add',
paramType='body', requireObject=True)
.param('allowNull', 'Whether "null" is allowed as a metadata value.', required=False,
dataType='boolean', default=False)
.errorResponse(('ID was invalid.',
'Invalid JSON passed in request body.',
'Metadata key name was invalid.'))
.errorResponse('Write access was denied for the item.', 403)
)
def setMetadata(self, item, metadata, allowNull):
return self.model('item').setMetadata(item, metadata, allowNull=allowNull)
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel('item')
@autoDescribeRoute(
Description('Delete metadata fields on an item.')
.responseClass('Item')
.modelParam('id', model='item', level=AccessType.WRITE)
.jsonParam(
'fields', 'A JSON list containing the metadata fields to delete',
paramType='body', schema={
'type': 'array',
'items': {
'type': 'string'
}
}
)
.errorResponse(('ID was invalid.',
'Invalid JSON passed in request body.',
'Metadata key name was invalid.'))
.errorResponse('Write access was denied for the item.', 403)
)
def deleteMetadata(self, item, fields):
return self.model('item').deleteMetadata(item, fields)
def _downloadMultifileItem(self, item, user):
setResponseHeader('Content-Type', 'application/zip')
setContentDisposition(item['name'] + '.zip')
def stream():
zip = ziputil.ZipGenerator(item['name'])
for (path, file) in self.model('item').fileList(item, subpath=False):
for data in zip.addFile(file, path):
yield data
yield zip.footer()
return stream
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model='file')
@autoDescribeRoute(
Description('Get the files within an item.')
.responseClass('File', array=True)
.modelParam('id', model='item', level=AccessType.READ)
.pagingParams(defaultSort='name')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403)
)
def getFiles(self, item, limit, offset, sort):
return list(self.model('item').childFiles(
item=item, limit=limit, offset=offset, sort=sort))
@access.cookie
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Download the contents of an item.')
.modelParam('id', model='item', level=AccessType.READ)
.param('offset', 'Byte offset into the file.', dataType='int',
required=False, default=0)
.param('format', 'If unspecified, items with one file are downloaded '
'as that file, and other items are downloaded as a zip '
'archive. If \'zip\', a zip archive is always sent.',
required=False)
.param('contentDisposition', 'Specify the Content-Disposition response '
'header disposition-type value, only applied for single file '
'items.', required=False, enum=['inline', 'attachment'],
default='attachment')
.param('extraParameters', 'Arbitrary data to send along with the '
'download request, only applied for single file '
'items.', required=False)
# single file items could produce other types, too.
.produces(['application/zip', 'application/octet-stream'])
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403)
)
def download(self, item, offset, format, contentDisposition, extraParameters):
user = self.getCurrentUser()
files = list(self.model('item').childFiles(item=item, limit=2))
if format not in (None, '', 'zip'):
raise RestException('Unsupported format: %s.' % format)
if len(files) == 1 and format != 'zip':
if contentDisposition not in {None, 'inline', 'attachment'}:
raise RestException(
'Unallowed contentDisposition type "%s".' % contentDisposition)
return self.model('file').download(
files[0], offset, contentDisposition=contentDisposition,
extraParameters=extraParameters)
else:
return self._downloadMultifileItem(item, user)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Delete an item by ID.')
.modelParam('id', model='item', level=AccessType.WRITE)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the item.', 403)
)
def deleteItem(self, item):
self.model('item').remove(item)
return {'message': 'Deleted item %s.' % item['name']}
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get the path to the root of the item\'s hierarchy.')
.modelParam('id', model='item', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403)
)
def rootpath(self, item):
return self.model('item').parentsToRoot(item, self.getCurrentUser())
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model='item')
@autoDescribeRoute(
Description('Copy an item.')
.notes('If no folderId parameter is specified, creates a copy of the item in '
'its current containing folder.')
.responseClass('Item')
.modelParam('id', 'The ID of the original item.', model='item', level=AccessType.READ)
.modelParam('folderId', 'The ID of the parent folder.', required=False,
level=AccessType.WRITE)
.param('name', 'Name for the new item.', required=False, strip=True)
.param('description', 'Description for the new item.', required=False, strip=True)
.errorResponse(('A parameter was invalid.',
'ID was invalid.'))
.errorResponse('Read access was denied on the original item.\n\n'
'Write access was denied on the parent folder.', 403)
)
def copyItem(self, item, folder, name, description):
user = self.getCurrentUser()
if folder is None:
folder = self.model('folder').load(
id=item['folderId'], user=user, level=AccessType.WRITE, exc=True)
return self.model('item').copyItem(
item, creator=user, name=name, folder=folder, description=description)
|
|
# Authors:
# Trevor Perrin
# Dave Baggett (Arcode Corporation) - cleanup handling of constants
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""Class for setting handshake parameters."""
from .constants import CertificateType
from .utils import cryptomath
from .utils import cipherfactory
CIPHER_NAMES = ["aes128gcm", "rc4", "aes256", "aes128", "3des"]
MAC_NAMES = ["sha", "sha256", "aead"] # Don't allow "md5" by default.
ALL_MAC_NAMES = MAC_NAMES + ["md5"]
KEY_EXCHANGE_NAMES = ["rsa", "dhe_rsa", "srp_sha", "srp_sha_rsa", "dh_anon"]
CIPHER_IMPLEMENTATIONS = ["openssl", "pycrypto", "python"]
CERTIFICATE_TYPES = ["x509"]
TLS_INTOLERANCE_TYPES = ["alert", "close", "reset"]
class HandshakeSettings(object):
"""This class encapsulates various parameters that can be used with
a TLS handshake.
@sort: minKeySize, maxKeySize, cipherNames, macNames, certificateTypes,
minVersion, maxVersion
@type minKeySize: int
@ivar minKeySize: The minimum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters smaller than this length, an alert will be
signalled. The default is 1023.
@type maxKeySize: int
@ivar maxKeySize: The maximum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters larger than this length, an alert will be signalled.
The default is 8193.
@type cipherNames: list
@ivar cipherNames: The allowed ciphers.
The allowed values in this list are 'aes256', 'aes128', '3des', and
'rc4'. If these settings are used with a client handshake, they
determine the order of the ciphersuites offered in the ClientHello
message.
If these settings are used with a server handshake, the server will
choose whichever ciphersuite matches the earliest entry in this
list.
NOTE: If '3des' is used in this list, but TLS Lite can't find an
add-on library that supports 3DES, then '3des' will be silently
removed.
The default value is ['rc4', 'aes256', 'aes128', '3des'].
@type macNames: list
@ivar macNames: The allowed MAC algorithms.
The allowed values in this list are 'sha' and 'md5'.
The default value is ['sha'].
@type certificateTypes: list
@ivar certificateTypes: The allowed certificate types.
The only allowed certificate type is 'x509'. This list is only used with a
client handshake. The client will advertise to the server which certificate
types are supported, and will check that the server uses one of the
appropriate types.
@type minVersion: tuple
@ivar minVersion: The minimum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for TLS 1.0, (3,2) for
TLS 1.1, or (3,3) for TLS 1.2. If the other party wishes to use a lower
version, a protocol_version alert will be signalled. The default is (3,1).
@type maxVersion: tuple
@ivar maxVersion: The maximum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for TLS 1.0, (3,2) for
TLS 1.1, or (3,3) for TLS 1.2. If the other party wishes to use a higher
version, a protocol_version alert will be signalled. The default is (3,3).
(WARNING: Some servers may (improperly) reject clients which offer support
for TLS 1.1. In this case, try lowering maxVersion to (3,1)).
@type tlsIntolerant: tuple
@ivar tlsIntolerant: The TLS ClientHello version which the server
simulates intolerance of.
If tlsIntolerant is not None, the server will simulate TLS version
intolerance by aborting the handshake in response to all TLS versions
tlsIntolerant or higher.
@type tlsIntoleranceType: str
@ivar tlsIntoleranceType: How the server should react when simulating TLS
intolerance.
The allowed values are "alert" (return a fatal handshake_failure alert),
"close" (abruptly close the connection), and "reset" (send a TCP reset).
@type useExperimentalTackExtension: bool
@ivar useExperimentalTackExtension: Whether to enabled TACK support.
Note that TACK support is not standardized by IETF and uses a temporary
TLS Extension number, so should NOT be used in production software.
"""
def __init__(self):
self.minKeySize = 1023
self.maxKeySize = 8193
self.cipherNames = CIPHER_NAMES
self.macNames = MAC_NAMES
self.keyExchangeNames = KEY_EXCHANGE_NAMES
self.cipherImplementations = CIPHER_IMPLEMENTATIONS
self.certificateTypes = CERTIFICATE_TYPES
self.minVersion = (3,1)
self.maxVersion = (3,3)
self.tlsIntolerant = None
self.tlsIntoleranceType = 'alert'
self.useExperimentalTackExtension = False
# Validates the min/max fields, and certificateTypes
# Filters out unsupported cipherNames and cipherImplementations
def _filter(self):
other = HandshakeSettings()
other.minKeySize = self.minKeySize
other.maxKeySize = self.maxKeySize
other.cipherNames = self.cipherNames
other.macNames = self.macNames
other.keyExchangeNames = self.keyExchangeNames
other.cipherImplementations = self.cipherImplementations
other.certificateTypes = self.certificateTypes
other.minVersion = self.minVersion
other.maxVersion = self.maxVersion
other.tlsIntolerant = self.tlsIntolerant
other.tlsIntoleranceType = self.tlsIntoleranceType
if not cipherfactory.tripleDESPresent:
other.cipherNames = [e for e in self.cipherNames if e != "3des"]
if len(other.cipherNames)==0:
raise ValueError("No supported ciphers")
if len(other.certificateTypes)==0:
raise ValueError("No supported certificate types")
if not cryptomath.m2cryptoLoaded:
other.cipherImplementations = \
[e for e in other.cipherImplementations if e != "openssl"]
if not cryptomath.pycryptoLoaded:
other.cipherImplementations = \
[e for e in other.cipherImplementations if e != "pycrypto"]
if len(other.cipherImplementations)==0:
raise ValueError("No supported cipher implementations")
if other.minKeySize<512:
raise ValueError("minKeySize too small")
if other.minKeySize>16384:
raise ValueError("minKeySize too large")
if other.maxKeySize<512:
raise ValueError("maxKeySize too small")
if other.maxKeySize>16384:
raise ValueError("maxKeySize too large")
for s in other.cipherNames:
if s not in CIPHER_NAMES:
raise ValueError("Unknown cipher name: '%s'" % s)
for s in other.macNames:
if s not in ALL_MAC_NAMES:
raise ValueError("Unknown MAC name: '%s'" % s)
for s in other.keyExchangeNames:
if s not in KEY_EXCHANGE_NAMES:
raise ValueError("Unknown key exchange name: '%s'" % s)
for s in other.cipherImplementations:
if s not in CIPHER_IMPLEMENTATIONS:
raise ValueError("Unknown cipher implementation: '%s'" % s)
for s in other.certificateTypes:
if s not in CERTIFICATE_TYPES:
raise ValueError("Unknown certificate type: '%s'" % s)
if other.tlsIntoleranceType not in TLS_INTOLERANCE_TYPES:
raise ValueError(
"Unknown TLS intolerance type: '%s'" % other.tlsIntoleranceType)
if other.minVersion > other.maxVersion:
raise ValueError("Versions set incorrectly")
if not other.minVersion in ((3,0), (3,1), (3,2), (3,3)):
raise ValueError("minVersion set incorrectly")
if not other.maxVersion in ((3,0), (3,1), (3,2), (3,3)):
raise ValueError("maxVersion set incorrectly")
return other
def _getCertificateTypes(self):
l = []
for ct in self.certificateTypes:
if ct == "x509":
l.append(CertificateType.x509)
else:
raise AssertionError()
return l
|
|
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus, AxiStreamFrame, AxiStreamSource, AxiStreamSink
from cocotbext.axi.stream import define_stream
StatusBus, StatusTransaction, StatusSource, StatusSink, StatusMonitor = define_stream("Status",
signals=["frame_pad", "frame_truncate", "frame_length", "frame_original_length", "valid"],
optional_signals=["ready"]
)
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.clk, 10, units="ns").start())
self.source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "s_axis"), dut.clk, dut.rst)
self.sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "m_axis"), dut.clk, dut.rst)
# Status
self.status_sink = StatusSink(StatusBus.from_prefix(dut, "status"), dut.clk, dut.rst)
self.dut.length_min.setimmediatevalue(0)
self.dut.length_max.setimmediatevalue(2048)
def set_idle_generator(self, generator=None):
if generator:
self.source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.sink.set_pause_generator(generator())
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
data_width = len(tb.source.bus.tkeep)
byte_width = data_width // 8
id_count = 2**len(tb.source.bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
for length_max in range(1, byte_width*2+2):
for length_min in range(0, length_max+1):
tb.log.info("length_min %d, length_max %d", length_min, length_max)
await RisingEdge(dut.clk)
tb.dut.length_min <= length_min
tb.dut.length_max <= length_max
await RisingEdge(dut.clk)
test_frames = []
for test_data in [payload_data(x) for x in payload_lengths()]:
test_frame = AxiStreamFrame(test_data, tid=cur_id, tdest=cur_id)
test_frames.append(test_frame)
await tb.source.send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink.recv()
len_rx = len(rx_frame.tdata)
len_test = len(test_frame.tdata)
len_min = min(len_rx, len_test)
assert len_rx >= length_min
assert len_rx <= length_max
assert rx_frame.tdata[:len_min] == test_frame.tdata[:len_min]
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
status = await tb.status_sink.recv()
tb.log.info("Status: %s", status)
assert status.frame_pad == int(len_test < length_min)
assert status.frame_truncate == int(len_test > length_max)
assert status.frame_length == len_rx
assert status.frame_original_length == len_test
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_tuser_assert(dut):
tb = TB(dut)
await tb.reset()
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data, tuser=1)
await tb.source.send(test_frame)
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
def size_list():
data_width = len(cocotb.top.m_axis_tdata)
byte_width = data_width // 8
return list(range(1, byte_width*4+1))+[512]+[1]*64
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
if cocotb.SIM_NAME:
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
for test in [run_test_tuser_assert]:
factory = TestFactory(test)
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("data_width", [8, 16, 32])
def test_axis_frame_length_adjust(request, data_width):
dut = "axis_frame_length_adjust"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = data_width
parameters['KEEP_ENABLE'] = int(parameters['DATA_WIDTH'] > 8)
parameters['KEEP_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['ID_ENABLE'] = 1
parameters['ID_WIDTH'] = 8
parameters['DEST_ENABLE'] = 1
parameters['DEST_WIDTH'] = 8
parameters['USER_ENABLE'] = 1
parameters['USER_WIDTH'] = 1
parameters['LEN_WIDTH'] = 16
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import sql_dataset_test_base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SqlDatasetTest(sql_dataset_test_base.SqlDatasetTestBase):
# Test that SqlDataset can read from a database table.
def testReadResultSet(self):
for _ in range(2): # Run twice to verify statelessness of db operations.
dataset = self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string),
num_repeats=2)
self.assertDatasetProduces(
dataset,
expected_output=[(b"John", b"Doe", b"Hi!"),
(b"Jane", b"Moe", b"Hi again!")] * 2,
num_test_iterations=2)
# Test that SqlDataset works on a join query.
def testReadResultSetJoinQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT students.first_name, state, motto FROM students "
"INNER JOIN people "
"ON students.first_name = people.first_name "
"AND students.last_name = people.last_name",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"California", b"Hi!"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset can read a database entry with a null-terminator
# in the middle of the text and place the entry in a `string` tensor.
def testReadResultSetNullTerminator(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, favorite_nonsense_word "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"n\0nsense"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"nonsense\0"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset works when used on two different queries.
# Because the output types of the dataset must be determined at graph-creation
# time, the two queries must have the same number and types of columns.
def testReadResultSetReuseSqlDataset(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"Hi!"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, state FROM people "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"California"),
self.evaluate(get_next()))
self.assertEqual((b"Benjamin", b"Franklin", b"Pennsylvania"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an `OutOfRangeError` is raised on the first call to
# `get_next_str_only` if result set is empty.
def testReadEmptyResultSet(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"WHERE first_name = 'Nonexistent'",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an error is raised when `driver_name` is invalid.
def testReadResultSetWithInvalidDriverName(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = self._createSqlDataset(
driver_name="sqlfake",
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string))
self.assertDatasetProduces(dataset, expected_output=[])
# Test that an error is raised when a column name in `query` is nonexistent
def testReadResultSetWithInvalidColumnName(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, fake_column FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when there is a syntax error in `query`.
def testReadResultSetOfQueryWithSyntaxError(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELEmispellECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when the number of columns in `query`
# does not match the length of `, output_types`.
def testReadResultSetWithMismatchBetweenColumnsAndOutputTypes(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that no results are returned when `query` is an insert query rather
# than a select query. In particular, the error refers to the number of
# output types passed to the op not matching the number of columns in the
# result set of the query (namely, 0 for an insert statement.)
def testReadResultSetOfInsertQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="INSERT INTO students (first_name, last_name, motto) "
"VALUES ('Foo', 'Bar', 'Baz'), ('Fizz', 'Buzz', 'Fizzbuzz')",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int8` tensor.
def testReadResultSetInt8(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8, dtypes.int8)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT desk_number, favorite_negative_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.int8, dtypes.int8)))
self.assertEqual((9, -2), self.evaluate(get_next()))
# Max and min values of int8
self.assertEqual((127, -128), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int16` tensor.
def testReadResultSetInt16(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16, dtypes.int16)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
# Max value of int16
self.assertEqual((b"John", 32767), self.evaluate(get_next()))
# Min value of int16
self.assertEqual((b"Jane", -32768), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int32` tensor.
def testReadResultSetInt32(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 0), self.evaluate(get_next()))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
# Max value of int32
self.assertEqual((b"John", 2147483647), self.evaluate(get_next()))
# Min value of int32
self.assertEqual((b"Jane", -2147483648), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a numeric `varchar` from a SQLite database
# table and place it in an `int32` tensor.
def testReadResultSetInt32VarCharColumnAsInt(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, school_id FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 123), self.evaluate(get_next()))
self.assertEqual((b"Jane", 1000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in an `int64` tensor.
def testReadResultSetInt64(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
self.assertEqual((b"John", 0), self.evaluate(get_next()))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_big_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
# Max value of int64
self.assertEqual((b"John", 9223372036854775807), self.evaluate(get_next()))
# Min value of int64
self.assertEqual((b"Jane", -9223372036854775808), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in a `uint8` tensor.
def testReadResultSetUInt8(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint8)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read the minimum and maximum uint8 values from a
# SQLite database table and place them in `uint8` tensors.
def testReadResultSetUInt8MinAndMaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, brownie_points FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint8)))
# Min value of uint8
self.assertEqual((b"John", 0), self.evaluate(get_next()))
# Max value of uint8
self.assertEqual((b"Jane", 255), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in a `uint16` tensor.
def testReadResultSetUInt16(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint16)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read the minimum and maximum uint16 values from a
# SQLite database table and place them in `uint16` tensors.
def testReadResultSetUInt16MinAndMaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, account_balance FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint16)))
# Min value of uint16
self.assertEqual((b"John", 0), self.evaluate(get_next()))
# Max value of uint16
self.assertEqual((b"Jane", 65535), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a 0-valued and 1-valued integer from a
# SQLite database table and place them as `True` and `False` respectively
# in `bool` tensors.
def testReadResultSetBool(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, registration_complete FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.bool)))
self.assertEqual((b"John", True), self.evaluate(get_next()))
self.assertEqual((b"Jane", False), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer that is not 0-valued or 1-valued
# from a SQLite database table and place it as `True` in a `bool` tensor.
def testReadResultSetBoolNotZeroOrOne(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.bool)))
self.assertEqual((b"John", True), self.evaluate(get_next()))
self.assertEqual((b"Jane", True), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table
# and place it in a `float64` tensor.
def testReadResultSetFloat64(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, victories FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertEqual((b"George", b"Washington", 20.0),
self.evaluate(get_next()))
self.assertEqual((b"John", b"Adams", -19.95), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table beyond
# the precision of 64-bit IEEE, without throwing an error. Test that
# `SqlDataset` identifies such a value as equal to itself.
def testReadResultSetFloat64OverlyPrecise(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, accolades FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertEqual(
(b"George", b"Washington",
1331241.321342132321324589798264627463827647382647382643874),
self.evaluate(get_next()))
self.assertEqual(
(b"John", b"Adams",
1331241321342132321324589798264627463827647382647382643874.0),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table,
# representing the largest integer representable as a 64-bit IEEE float
# such that the previous integer is also representable as a 64-bit IEEE float.
# Test that `SqlDataset` can distinguish these two numbers.
def testReadResultSetFloat64LargestConsecutiveWholeNumbersNotEqual(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, triumphs FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertNotEqual((b"George", b"Washington", 9007199254740992.0),
self.evaluate(get_next()))
self.assertNotEqual((b"John", b"Adams", 9007199254740991.0),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset can stop correctly when combined with batch
def testReadResultSetWithBatchStop(self):
dataset = self._createSqlDataset(
query="SELECT * FROM data", output_types=(dtypes.int32))
dataset = dataset.map(lambda x: array_ops.identity(x))
get_next = self.getNext(dataset.batch(2))
self.assertAllEqual(self.evaluate(get_next()), [0, 1])
self.assertAllEqual(self.evaluate(get_next()), [2])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
|
import unittest
from mock import patch
from quickbooks.exceptions import QuickbooksException, SevereException
from quickbooks import client
from quickbooks.objects.salesreceipt import SalesReceipt
class ClientTest(unittest.TestCase):
def setUp(self):
"""
Use a consistent set of defaults.
"""
client.QuickBooks(
sandbox=True,
consumer_key="update_consumer_key",
consumer_secret="update_consumer_secret",
access_token="update_access_token",
access_token_secret="update_access_token_secret",
company_id="update_company_id",
callback_url="update_callback_url"
)
def tearDown(self):
client.QuickBooks.enable_global()
self.qb_client = client.QuickBooks()
self.qb_client._drop()
def test_client_new(self):
self.qb_client = client.QuickBooks(
sandbox=False,
consumer_key="consumer_key",
consumer_secret="consumer_secret",
access_token="access_token",
access_token_secret="access_token_secret",
company_id="company_id",
callback_url="callback_url",
verbose=True,
minorversion=4
)
self.assertEquals(self.qb_client.sandbox, False)
self.assertEquals(self.qb_client.consumer_key, "consumer_key")
self.assertEquals(self.qb_client.consumer_secret, "consumer_secret")
self.assertEquals(self.qb_client.access_token, "access_token")
self.assertEquals(self.qb_client.access_token_secret, "access_token_secret")
self.assertEquals(self.qb_client.company_id, "company_id")
self.assertEquals(self.qb_client.callback_url, "callback_url")
self.assertEquals(self.qb_client.minorversion, 4)
def test_client_updated(self):
self.qb_client = client.QuickBooks(
sandbox=False,
consumer_key="consumer_key",
consumer_secret="consumer_secret",
access_token="access_token",
access_token_secret="access_token_secret",
company_id="company_id",
callback_url="callback_url",
)
self.qb_client2 = client.QuickBooks(
sandbox=True,
consumer_key="update_consumer_key",
consumer_secret="update_consumer_secret",
access_token="update_access_token",
access_token_secret="update_access_token_secret",
company_id="update_company_id",
callback_url="update_callback_url",
)
self.assertEquals(self.qb_client.sandbox, True)
self.assertEquals(self.qb_client.consumer_key, "update_consumer_key")
self.assertEquals(self.qb_client.consumer_secret, "update_consumer_secret")
self.assertEquals(self.qb_client.access_token, "update_access_token")
self.assertEquals(self.qb_client.access_token_secret, "update_access_token_secret")
self.assertEquals(self.qb_client.company_id, "update_company_id")
self.assertEquals(self.qb_client.callback_url, "update_callback_url")
self.assertEquals(self.qb_client2.sandbox, True)
self.assertEquals(self.qb_client2.consumer_key, "update_consumer_key")
self.assertEquals(self.qb_client2.consumer_secret, "update_consumer_secret")
self.assertEquals(self.qb_client2.access_token, "update_access_token")
self.assertEquals(self.qb_client2.access_token_secret, "update_access_token_secret")
self.assertEquals(self.qb_client2.company_id, "update_company_id")
self.assertEquals(self.qb_client2.callback_url, "update_callback_url")
def test_disable_global(self):
client.QuickBooks.disable_global()
self.qb_client = client.QuickBooks()
self.assertFalse(self.qb_client.sandbox)
self.assertFalse(self.qb_client.consumer_key)
self.assertFalse(self.qb_client.consumer_secret)
self.assertFalse(self.qb_client.access_token)
self.assertFalse(self.qb_client.access_token_secret)
self.assertFalse(self.qb_client.company_id)
self.assertFalse(self.qb_client.callback_url)
self.assertFalse(self.qb_client.minorversion)
def test_api_url(self):
qb_client = client.QuickBooks(sandbox=False)
api_url = qb_client.api_url
self.assertFalse("sandbox" in api_url)
def test_api_url_sandbox(self):
qb_client = client.QuickBooks(sandbox=True)
api_url = qb_client.api_url
self.assertTrue("sandbox" in api_url)
def test_isvalid_object_name_valid(self):
qb_client = client.QuickBooks()
result = qb_client.isvalid_object_name("Customer")
self.assertEquals(result, True)
def test_isvalid_object_name_invalid(self):
qb_client = client.QuickBooks()
self.assertRaises(Exception, qb_client.isvalid_object_name, "invalid")
@patch('quickbooks.client.QuickBooks.make_request')
def test_batch_operation(self, make_req):
qb_client = client.QuickBooks()
qb_client.batch_operation("request_body")
self.assertTrue(make_req.called)
@patch('quickbooks.client.QuickBooks.make_request')
def test_create_object(self, make_req):
qb_client = client.QuickBooks()
qb_client.create_object("Customer", "request_body")
self.assertTrue(make_req.called)
@patch('quickbooks.client.QuickBooks.make_request')
def test_query(self, make_req):
qb_client = client.QuickBooks()
qb_client.query("select")
self.assertTrue(make_req.called)
@patch('quickbooks.client.QuickBooks.make_request')
def test_update_object(self, make_req):
qb_client = client.QuickBooks()
qb_client.update_object("Customer", "request_body")
self.assertTrue(make_req.called)
def test_get_authorize_url(self):
qb_client = client.QuickBooks()
qb_client.set_up_service()
with patch.object(qb_client.qbService, "get_raw_request_token",
return_value=MockResponse()):
results = qb_client.get_authorize_url()
self.assertTrue('https://appcenter.intuit.com/Connect/Begin' in results)
self.assertTrue('oauth_token' in results)
self.assertEqual(qb_client.request_token, 'tokenvalue')
self.assertTrue(qb_client.request_token_secret, 'secretvalue')
@patch('quickbooks.client.QuickBooks.make_request')
def test_get_current_user(self, make_req):
qb_client = client.QuickBooks()
qb_client.company_id = "1234"
qb_client.get_current_user()
url = "https://appcenter.intuit.com/api/v1/user/current"
make_req.assert_called_with("GET", url)
@patch('quickbooks.client.QuickBooks.qbService')
def test_get_access_tokens(self, qbService):
qb_client = client.QuickBooks()
qb_client.request_token = "token"
qb_client.request_token_secret = "secret"
session = qb_client.get_access_tokens("oauth_verifier")
qbService.get_auth_session.assert_called_with('token', 'secret', data={'oauth_verifier': 'oauth_verifier'})
self.assertFalse(session is None)
@patch('quickbooks.client.QuickBooks.make_request')
def test_disconnect_account(self, make_req):
qb_client = client.QuickBooks()
qb_client.company_id = "1234"
qb_client.disconnect_account()
url = "https://appcenter.intuit.com/api/v1/connection/disconnect"
make_req.assert_called_with("GET", url)
@patch('quickbooks.client.QuickBooks.make_request')
def test_reconnect_account(self, make_req):
qb_client = client.QuickBooks()
qb_client.company_id = "1234"
qb_client.reconnect_account()
url = "https://appcenter.intuit.com/api/v1/connection/reconnect"
make_req.assert_called_with("GET", url)
@patch('quickbooks.client.QuickBooks.make_request')
def test_get_report(self, make_req):
qb_client = client.QuickBooks()
qb_client.company_id = "1234"
qb_client.get_report("profitandloss", {1: 2})
url = "https://sandbox-quickbooks.api.intuit.com/v3/company/1234/reports/profitandloss"
make_req.assert_called_with("GET", url, params={1: 2})
def test_get_instance(self):
qb_client = client.QuickBooks()
instance = qb_client.get_instance()
self.assertEquals(qb_client, instance)
@patch('quickbooks.client.OAuth1Session')
def test_create_session(self, auth_Session):
qb_client = client.QuickBooks()
session = qb_client.create_session()
self.assertTrue(auth_Session.called)
self.assertFalse(session is None)
def test_create_session_missing_auth_info_exception(self):
qb_client = client.QuickBooks()
qb_client.consumer_secret = None
self.assertRaises(QuickbooksException, qb_client.create_session)
@patch('quickbooks.client.QuickBooks.make_request')
def test_get_single_object(self, make_req):
qb_client = client.QuickBooks()
qb_client.company_id = "1234"
qb_client.get_single_object("test", 1)
url = "https://sandbox-quickbooks.api.intuit.com/v3/company/1234/test/1/"
make_req.assert_called_with("GET", url, {})
@patch('quickbooks.client.QuickBooks.session')
def test_make_request(self, qb_session):
qb_session.request.return_value = MockResponse()
qb_client = client.QuickBooks()
qb_client.company_id = "1234"
url = "https://sandbox-quickbooks.api.intuit.com/v3/company/1234/test/1/"
qb_client.make_request("GET", url, request_body=None, content_type='application/json')
qb_session.request.assert_called_with(
"GET", url, True, "1234", data={},
headers={'Content-Type': 'application/json', 'Accept': 'application/json'}, params={})
def test_make_request_create_session(self):
receipt = SalesReceipt()
receipt.Id = 1
self.assertRaises(QuickbooksException, receipt.save)
def test_handle_exceptions(self):
qb_client = client.QuickBooks()
error_data = {
"Error": [{
"Message": "message",
"Detail": "detail",
"code": "2030",
"element": "Id"}],
"type": "ValidationFault"
}
self.assertRaises(QuickbooksException, qb_client.handle_exceptions, error_data)
def test_handle_exceptions_severe(self):
qb_client = client.QuickBooks()
error_data = {
"Error": [{
"Message": "message",
"Detail": "detail",
"code": "10001",
"element": "Id"}],
"type": "ValidationFault"
}
self.assertRaises(SevereException, qb_client.handle_exceptions, error_data)
@patch('quickbooks.client.QuickBooks.session')
def test_download_pdf(self, qb_session):
qb_client = client.QuickBooks(sandbox=True)
qb_client.company_id = "1234"
receipt = SalesReceipt()
receipt.Id = 1
receipt.download_pdf(qb_client)
url = "https://sandbox-quickbooks.api.intuit.com/v3/company/1234/salesreceipt/1/pdf"
qb_session.request.assert_called_with(
"GET", url, True, "1234",
headers={'Content-Type': 'application/pdf', 'Accept': 'application/pdf, application/json'})
qb_session.request.return_value = MockPdfResponse()
response = receipt.download_pdf(qb_client)
self.assertEqual(response, 'sample pdf content')
def test_download_nonexistent_pdf(self):
receipt = SalesReceipt()
receipt.Id = 666
self.assertRaises(QuickbooksException, receipt.download_pdf)
class MockResponse(object):
@property
def text(self):
return "oauth_token_secret=secretvalue&oauth_callback_confirmed=true&oauth_token=tokenvalue"
@property
def status_code(self):
try:
import httplib # python 2
except ImportError:
import http.client as httplib # python 3
return httplib.OK
def json(self):
return "{}"
class MockPdfResponse(object):
@property
def status_code(self):
try:
import httplib # python 2
except ImportError:
import http.client as httplib # python 3
return httplib.OK
@property
def content(self):
return "sample pdf content"
|
|
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute receptive field of a fully-convolutional network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
from receptive_field.python.util import graph_compute_order
from receptive_field.python.util import parse_layer_parameters
import tensorflow as tf
def _get_rf_size_node_input(stride, kernel_size, rf_size_output):
"""Computes RF size at the input of a given layer.
Args:
stride: Stride of given layer (integer).
kernel_size: Kernel size of given layer (integer).
rf_size_output: RF size at output of given layer (integer).
Returns:
rf_size_input: RF size at input of given layer (integer).
"""
return stride * rf_size_output + kernel_size - stride
def _get_effective_stride_node_input(stride, effective_stride_output):
"""Computes effective stride at the input of a given layer.
Args:
stride: Stride of given layer (integer).
effective_stride_output: Effective stride at output of given layer
(integer).
Returns:
effective_stride_input: Effective stride at input of given layer
(integer).
"""
return stride * effective_stride_output
def _get_effective_padding_node_input(stride, padding,
effective_padding_output):
"""Computes effective padding at the input of a given layer.
Args:
stride: Stride of given layer (integer).
padding: Padding of given layer (integer).
effective_padding_output: Effective padding at output of given layer
(integer).
Returns:
effective_padding_input: Effective padding at input of given layer
(integer).
"""
return stride * effective_padding_output + padding
class ReceptiveField(object):
"""Receptive field of a convolutional neural network.
Args:
size: Receptive field size.
stride: Effective stride.
padding: Effective padding.
"""
def __init__(self, size, stride, padding):
self.size = np.asarray(size)
self.stride = np.asarray(stride)
self.padding = np.asarray(padding)
def compute_input_center_coordinates(self, y, axis=None):
"""Computes the center of the receptive field that generated a feature.
Args:
y: An array of feature coordinates with shape `(..., d)`, where `d` is the
number of dimensions of the coordinates.
axis: The dimensions for which to compute the input center coordinates. If
`None` (the default), compute the input center coordinates for all
dimensions.
Returns:
x: Center of the receptive field that generated the features, at the input
of the network.
Raises:
ValueError: If the number of dimensions of the feature coordinates does
not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
y = np.asarray(y)
if y.shape[-1] != len(axis):
raise ValueError("Dimensionality of the feature coordinates `y` (%d) "
"does not match dimensionality of `axis` (%d)" %
(y.shape[-1], len(axis)))
return -self.padding[axis] + y * self.stride[axis] + (self.size[axis] -
1) / 2
def compute_feature_coordinates(self, x, axis=None):
"""Computes the position of a feature given the center of a receptive field.
Args:
x: An array of input center coordinates with shape `(..., d)`, where `d`
is the number of dimensions of the coordinates.
axis: The dimensions for which to compute the feature coordinates. If
`None` (the default), compute the feature coordinates for all
dimensions.
Returns:
y: Coordinates of the features.
Raises:
ValueError: If the number of dimensions of the input center coordinates
does not match the number of elements in `axis`.
"""
# Use all dimensions.
if axis is None:
axis = range(self.size.size)
# Ensure axis is a list because tuples have different indexing behavior.
axis = list(axis)
x = np.asarray(x)
if x.shape[-1] != len(axis):
raise ValueError("Dimensionality of the input center coordinates `x` "
"(%d) does not match dimensionality of `axis` (%d)" %
(x.shape[-1], len(axis)))
return (x + self.padding[axis] +
(1 - self.size[axis]) / 2) / self.stride[axis]
def __iter__(self):
return iter(np.concatenate([self.size, self.stride, self.padding]))
def compute_receptive_field_from_graph_def(graph_def,
input_node,
output_node,
stop_propagation=None,
input_resolution=None):
"""Computes receptive field (RF) parameters from a Graph or GraphDef object.
The algorithm stops the calculation of the receptive field whenever it
encounters an operation in the list `stop_propagation`. Stopping the
calculation early can be useful to calculate the receptive field of a
subgraph such as a single branch of the
[inception network](https://arxiv.org/abs/1512.00567).
Args:
graph_def: Graph or GraphDef object.
input_node: Name of the input node or Tensor object from graph.
output_node: Name of the output node or Tensor object from graph.
stop_propagation: List of operations or scope names for which to stop the
propagation of the receptive field.
input_resolution: 2D list. If the input resolution to the model is fixed and
known, this may be set. This is helpful for cases where the RF parameters
vary depending on the input resolution (this happens since SAME padding in
tensorflow depends on input resolution in general). If this is None, it is
assumed that the input resolution is unknown, so some RF parameters may be
unknown (depending on the model architecture).
Returns:
rf_size_x: Receptive field size of network in the horizontal direction, with
respect to specified input and output.
rf_size_y: Receptive field size of network in the vertical direction, with
respect to specified input and output.
effective_stride_x: Effective stride of network in the horizontal direction,
with respect to specified input and output.
effective_stride_y: Effective stride of network in the vertical direction,
with respect to specified input and output.
effective_padding_x: Effective padding of network in the horizontal
direction, with respect to specified input and output.
effective_padding_y: Effective padding of network in the vertical
direction, with respect to specified input and output.
Raises:
ValueError: If network is not aligned or if either input or output nodes
cannot be found.
"""
# Convert a graph to graph_def if necessary.
if isinstance(graph_def, tf.Graph):
graph_def = graph_def.as_graph_def()
# Convert tensors to names.
if isinstance(input_node, tf.Tensor):
input_node = input_node.op.name
if isinstance(output_node, tf.Tensor):
output_node = output_node.op.name
stop_propagation = stop_propagation or []
# Computes order of computation for a given graph.
node_info, name_to_node = graph_compute_order.get_compute_order(
graph_def=graph_def,
input_node_name=input_node,
input_node_size=input_resolution)
# Sort in reverse topological order.
ordered_node_info = sorted(node_info.items(), key=lambda x: -x[1].order)
# Dictionaries to keep track of receptive field, effective stride and
# effective padding of different nodes.
rf_sizes_x = {}
rf_sizes_y = {}
effective_strides_x = {}
effective_strides_y = {}
effective_paddings_x = {}
effective_paddings_y = {}
# Initialize dicts for output_node.
rf_sizes_x[output_node] = 1
rf_sizes_y[output_node] = 1
effective_strides_x[output_node] = 1
effective_strides_y[output_node] = 1
effective_paddings_x[output_node] = 0
effective_paddings_y[output_node] = 0
# Flag to denote if we found output node yet. If we have not, we skip nodes
# until the output node is found.
found_output_node = False
# Flag to denote if padding is undefined. This happens when SAME padding mode
# is used in conjunction with stride and kernel sizes which make it such that
# the padding to be applied would depend on the input size. In this case,
# alignment checks are skipped, and the effective padding is None.
undefined_padding = False
for _, (o, node, _, _) in ordered_node_info:
if node:
logging.vlog(3, "%10d %-100s %-20s" % (o, node.name[:90], node.op))
else:
continue
# When we find input node, we can stop.
if node.name == input_node:
break
# Loop until we find the output node. All nodes before finding the output
# one are irrelevant, so they can be skipped.
if not found_output_node:
if node.name == output_node:
found_output_node = True
if found_output_node:
if node.name not in rf_sizes_x:
assert node.name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % node.name)
# In this case, node is not relevant since it's not part of the
# computation we're interested in.
logging.vlog(3, "Irrelevant node %s, skipping it...", node.name)
continue
# Get params for this layer.
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y,
_, _) = parse_layer_parameters.get_layer_params(
node, name_to_node, node_info[node.name].input_size)
logging.vlog(
3, "kernel_size_x = %s, kernel_size_y = %s, "
"stride_x = %s, stride_y = %s, "
"padding_x = %s, padding_y = %s, input size = %s" %
(kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x,
padding_y, node_info[node.name].input_size))
if padding_x is None or padding_y is None:
undefined_padding = True
# Get parameters at input of this layer which may or may not be propagated
# to the input layers.
rf_size_input_x = _get_rf_size_node_input(stride_x, kernel_size_x,
rf_sizes_x[node.name])
rf_size_input_y = _get_rf_size_node_input(stride_y, kernel_size_y,
rf_sizes_y[node.name])
effective_stride_input_x = _get_effective_stride_node_input(
stride_x, effective_strides_x[node.name])
effective_stride_input_y = _get_effective_stride_node_input(
stride_y, effective_strides_y[node.name])
if not undefined_padding:
effective_padding_input_x = _get_effective_padding_node_input(
stride_x, padding_x, effective_paddings_x[node.name])
effective_padding_input_y = _get_effective_padding_node_input(
stride_y, padding_y, effective_paddings_y[node.name])
else:
effective_padding_input_x = None
effective_padding_input_y = None
logging.vlog(
4, "rf_size_input_x = %s, rf_size_input_y = %s, "
"effective_stride_input_x = %s, effective_stride_input_y = %s, "
"effective_padding_input_x = %s, effective_padding_input_y = %s" %
(rf_size_input_x, rf_size_input_y, effective_stride_input_x,
effective_stride_input_y, effective_padding_input_x,
effective_padding_input_y))
# Loop over this node's inputs and potentially propagate information down.
for inp_name in node.input:
# Stop the propagation of the receptive field.
if any(inp_name.startswith(stop) for stop in stop_propagation):
logging.vlog(3, "Skipping explicitly ignored node %s.", inp_name)
continue
logging.vlog(4, "inp_name = %s", inp_name)
if inp_name.startswith("^"):
# The character "^" denotes a control dependency, so this input node
# can be safely ignored.
continue
inp_node = name_to_node[inp_name]
logging.vlog(4, "inp_node = \n%s", inp_node)
if inp_name in rf_sizes_x:
assert inp_name in rf_sizes_y, ("Node %s is in rf_sizes_x, but "
"not in rf_sizes_y" % inp_name)
logging.vlog(
4, "rf_sizes_x[inp_name] = %s,"
" rf_sizes_y[inp_name] = %s, "
"effective_strides_x[inp_name] = %s,"
" effective_strides_y[inp_name] = %s, "
"effective_paddings_x[inp_name] = %s,"
" effective_paddings_y[inp_name] = %s" %
(rf_sizes_x[inp_name], rf_sizes_y[inp_name],
effective_strides_x[inp_name], effective_strides_y[inp_name],
effective_paddings_x[inp_name], effective_paddings_y[inp_name]))
# This node was already discovered through a previous path, so we need
# to make sure that graph is aligned. This alignment check is skipped
# if the padding is not defined, since in this case alignment cannot
# be checked.
if not undefined_padding:
if effective_strides_x[inp_name] != effective_stride_input_x:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in horizontal direction")
if effective_strides_y[inp_name] != effective_stride_input_y:
raise ValueError(
"Graph is not aligned since effective stride from different "
"paths is different in vertical direction")
if (rf_sizes_x[inp_name] -
1) / 2 - effective_paddings_x[inp_name] != (
rf_size_input_x - 1) / 2 - effective_padding_input_x:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in horizontal direction")
if (rf_sizes_y[inp_name] -
1) / 2 - effective_paddings_y[inp_name] != (
rf_size_input_y - 1) / 2 - effective_padding_input_y:
raise ValueError(
"Graph is not aligned since center shift from different "
"paths is different in vertical direction")
# Keep track of path with largest RF, for both directions.
if rf_sizes_x[inp_name] < rf_size_input_x:
rf_sizes_x[inp_name] = rf_size_input_x
effective_strides_x[inp_name] = effective_stride_input_x
effective_paddings_x[inp_name] = effective_padding_input_x
if rf_sizes_y[inp_name] < rf_size_input_y:
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_y[inp_name] = effective_padding_input_y
else:
assert inp_name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but "
"not in rf_sizes_x" % inp_name)
# In this case, it is the first time we encounter this node. So we
# propagate the RF parameters.
rf_sizes_x[inp_name] = rf_size_input_x
rf_sizes_y[inp_name] = rf_size_input_y
effective_strides_x[inp_name] = effective_stride_input_x
effective_strides_y[inp_name] = effective_stride_input_y
effective_paddings_x[inp_name] = effective_padding_input_x
effective_paddings_y[inp_name] = effective_padding_input_y
if not found_output_node:
raise ValueError("Output node was not found")
if input_node not in rf_sizes_x:
raise ValueError("Input node was not found")
return ReceptiveField(
(rf_sizes_x[input_node], rf_sizes_y[input_node]),
(effective_strides_x[input_node], effective_strides_y[input_node]),
(effective_paddings_x[input_node], effective_paddings_y[input_node]))
|
|
#----------- ArgoJob Transitions ---------------
import multiprocessing,sys,logging
logger = logging.getLogger(__name__)
from django.db import utils,connections,DEFAULT_DB_ALIAS,models
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.core.validators import validate_comma_separated_integer_list
from argo import QueueMessage,ArgoJobStatus
from common import log_uncaught_exceptions,MessageInterface
from common import Serializer,transfer,Mail,db_tools
from balsam.models import BalsamJob
from balsam.models import STATES_BY_NAME as BALSAM_STATES_BY_NAME
# assign this function to the system exception hook
sys.excepthook = log_uncaught_exceptions.log_uncaught_exceptions
def submit_subjob(job):
logger.debug('in submit_subjob pk=' + str(job.pk) + ' job_id='+str(job.job_id))
message = 'Subjob submitted'
try:
# get the current subjob
subjob = job.get_current_subjob()
# use subjob to fill BalsamJobMessage that will be sent to balsam
#balsamJobMsg = subjob.get_balsam_job_message()
# determine site name
logger.info('Submitting Subjob ' + str(subjob.job_id) + ' from ArgoJob '
+ str(subjob.job_id) + ' (pk=' + str(job.pk) + ') to ' + subjob.site )
# create and configure message interface
msgInt = MessageInterface.MessageInterface(
host = settings.RABBITMQ_SERVER_NAME,
port = settings.RABBITMQ_SERVER_PORT,
exchange_name = settings.RABBITMQ_BALSAM_EXCHANGE_NAME,
ssl_cert = settings.RABBITMQ_SSL_CERT,
ssl_key = settings.RABBITMQ_SSL_KEY,
ssl_ca_certs = settings.RABBITMQ_SSL_CA_CERTS,
)
# opening blocking connection which will close at the end of this function
msgInt.open_blocking_connection()
# create message queue for site in case not already done
msgInt.create_queue(subjob.site,subjob.site)
# serialize subjob for message
body = subjob.serialize()
logger.debug('sending job message: \n' + body)
# submit job
msgInt.send_msg(body,subjob.site)
# close connection
msgInt.close()
job.state = SUBJOB_SUBMITTED.name
except SubJobIndexOutOfRange:
message = 'All Subjobs Completed'
job.state = SUBJOBS_COMPLETED.name
except Exception,e:
message = ('Exception received while submitting subjob to '
+ subjob.site + ' for job pk=' + str(job.pk) + ' argo_id='
+ str(job.job_id) + ': ' + str(e))
logger.exception(message)
job.state = SUBJOB_SUBMIT_FAILED.name
job.save(update_fields=['state'],using=db_tools.get_db_connection_id(job.pk))
send_status_message(job,message)
def increment_subjob(job):
''' increments subjob index '''
logger.debug('in increment subjob pk='+str(job.pk))
message = 'subjob incremented'
job.current_subjob_pk_index += 1
logger.debug(' setting current_subjob_pk_index = ' + str(job.current_subjob_pk_index))
job.state = SUBJOB_INCREMENTED.name
job.save(update_fields=['state','current_subjob_pk_index'],
using=db_tools.get_db_connection_id(job.pk))
def stage_in(job):
''' stages data in from the user if an input_url is specified '''
logger.debug('in stage_in pk=' + str(job.pk))
message = 'Job staged in'
if job.input_url != '':
try:
transfer.stage_in(job.input_url + '/',job.working_directory + '/')
job.state = STAGED_IN.name
except Exception,e:
message = 'Exception received during stage_in: ' + str(e)
logger.exception(message)
job.state = STAGE_IN_FAILED.name
else:
# no input url specified so stage in is complete
job.state = STAGED_IN.name
job.save(update_fields=['state'],using=db_tools.get_db_connection_id(job.pk))
def stage_out(job):
''' stages data out to the user if an output_url is specified '''
logger.debug('in stage_out pk=' + str(job.pk))
message = 'Job staged out'
if job.output_url != '':
try:
transfer.stage_out(str(job.working_directory) + '/', str(job.output_url) + '/')
job.state = STAGED_OUT.name
except Exception,e:
message = 'Exception received during stage_out: ' + str(e)
logger.exception(message)
job.state = STAGE_OUT_FAILED.name
else:
# no input url specified so stage in is complete
job.state = STAGED_OUT.name
job.save(update_fields=['state'],using=db_tools.get_db_connection_id(job.pk))
def make_history(job):
logger.debug('job ' + str(job.pk) + ' in make_history ')
job.state = HISTORY.name
job.save(update_fields=['state'],using=db_tools.get_db_connection_id(job.pk))
def send_status_message(job,message=None):
''' this function sends status messages back to the users via email and message queue '''
logger.debug('in send_status_message pk=' + str(job.pk) + ' job_id='+str(job.job_id))
try:
receiver = ''
if len(job.email) > 0 and '@' in job.email:
receiver = job.email
else:
logger.warning(' no email address specified, not sending mail, email=' + str(job.email))
return
# construct body of email
body = ' Your job has reached state ' + job.state + '\n'
if message is not None:
body += ' with the message: ' + str(message)
body += '------------------------------------------------------------------- \n'
body += 'Job Data: \n'
body += django_serializers.serialize('json',[job])
body += '------------------------------------------------------------------- \n'
body += 'Subjob Data: \n'
body += django_serializers.serialize('json',ArgoSubJob.objects.filter(pk__in=Serializer.deserialize(job.subjob_pk_list)))
# send notification email to user
Mail.send_mail(
sender = settings.ARGO_JOB_STATUS_EMAIL_SENDER,
receiver = receiver,
subject = 'ARGO Job Status Report',
body = body,
)
except Exception,e:
logger.exception('exception received while trying to send status email. Exception: ' + str(e))
# if job has an argo job status routing key, send a message there
if job.job_status_routing_key != '' and send_status_message:
logger.info('sending job status message with routing key: ' + job.job_status_routing_key)
try:
msg = ArgoJobStatus.ArgoJobStatus()
msg.state = job.state
msg.message = message
msg.job_id = job.job_id
mi = MessageInterface.MessageInterface()
mi.host = settings.RABBITMQ_SERVER_NAME
mi.port = settings.RABBITMQ_SERVER_PORT
mi.exchange_name = settings.RABBITMQ_USER_EXCHANGE_NAME
mi.ssl_cert = settings.RABBITMQ_SSL_CERT
mi.ssl_key = settings.RABBITMQ_SSL_KEY
mi.ssl_ca_certs = settings.RABBITMQ_SSL_CA_CERTS
logger.debug( ' open blocking connection to send status message ' )
mi.open_blocking_connection()
mi.send_msg(msg.get_serialized_message(),job.job_status_routing_key)
mi.close()
except:
logger.exception('Exception while sending status message to user job queue')
# ------------ Job States ----------------------------
from common.JobState import JobState
# Job States
CREATE_FAILED = JobState('CREATE_FAILED')
CREATED = JobState('CREATED',CREATE_FAILED,stage_in)
STAGE_IN_FAILED = JobState('STAGE_IN_FAILED')
STAGED_IN = JobState('STAGED_IN',STAGE_IN_FAILED,submit_subjob)
SUBJOB_SUBMITTED = JobState('SUBJOB_SUBMITTED')
SUBJOB_SUBMIT_FAILED = JobState('SUBJOB_SUBMIT_FAILED')
SUBJOB_IN_PREPROCESS = JobState('SUBJOB_IN_PREPROCESS')
SUBJOB_PREPROCESS_FAILED = JobState('SUBJOB_PREPROCESS_FAILED')
SUBJOB_QUEUED = JobState('SUBJOB_QUEUED')
SUBJOB_RUNNING = JobState('SUBJOB_RUNNING')
SUBJOB_RUN_FINISHED = JobState('SUBJOB_RUN_FINISHED')
SUBJOB_RUN_FAILED = JobState('SUBJOB_RUN_FAILED')
SUBJOB_IN_POSTPROCESS = JobState('SUBJOB_IN_POSTPROCESS')
SUBJOB_POSTPROCESS_FAILED = JobState('SUBJOB_POSTPROCESS_FAILED')
SUBJOB_COMPLETE_FAILED = JobState('SUBJOB_COMPLETE_FAILED')
SUBJOB_COMPLETED = JobState('SUBJOB_COMPLETED',SUBJOB_COMPLETE_FAILED,increment_subjob)
SUBJOB_REJECTED = JobState('SUBJOB_REJECTED')
SUBJOB_INCREMENT_FAILED = JobState('SUBJOB_INCREMENT_FAILED')
SUBJOB_INCREMENTED = JobState('SUBJOB_INCREMENTED',SUBJOB_INCREMENT_FAILED,submit_subjob)
SUBJOBS_COMPLETED = JobState('SUBJOBS_COMPLETED',stage_out)
STAGE_OUT_FAILED = JobState('STAGE_OUT_FAILED')
STAGED_OUT = JobState('STAGED_OUT',STAGE_OUT_FAILED,make_history)
HISTORY = JobState('HISTORY')
FAILED = JobState('FAILED')
REJECTED = JobState('REJECTED')
STATES = [
CREATED,
CREATE_FAILED,
STAGED_IN,
STAGE_IN_FAILED,
SUBJOB_SUBMITTED,
SUBJOB_SUBMIT_FAILED,
SUBJOB_IN_PREPROCESS,
SUBJOB_PREPROCESS_FAILED,
SUBJOB_QUEUED,
SUBJOB_RUNNING,
SUBJOB_RUN_FINISHED,
SUBJOB_RUN_FAILED,
SUBJOB_IN_POSTPROCESS,
SUBJOB_POSTPROCESS_FAILED,
SUBJOB_COMPLETED,
SUBJOB_COMPLETE_FAILED,
SUBJOB_REJECTED,
SUBJOB_INCREMENTED,
SUBJOB_INCREMENT_FAILED,
SUBJOBS_COMPLETED,
STAGED_OUT,
STAGE_OUT_FAILED,
HISTORY,
FAILED,
REJECTED,
]
TRANSITIONABLE_STATES = []
for state in STATES:
if state.transition_function is not None:
TRANSITIONABLE_STATES.append(state.name)
STATES_BY_NAME = { x.name:x for x in STATES }
BALSAM_JOB_TO_SUBJOB_STATE_MAP = {
BALSAM_STATES_BY_NAME['CREATED'].name:SUBJOB_IN_PREPROCESS,
BALSAM_STATES_BY_NAME['CREATE_FAILED'].name:SUBJOB_PREPROCESS_FAILED,
BALSAM_STATES_BY_NAME['STAGED_IN'].name:SUBJOB_IN_PREPROCESS,
BALSAM_STATES_BY_NAME['STAGE_IN_FAILED'].name:SUBJOB_PREPROCESS_FAILED,
BALSAM_STATES_BY_NAME['PREPROCESSED'].name:SUBJOB_IN_PREPROCESS,
BALSAM_STATES_BY_NAME['PREPROCESS_FAILED'].name:SUBJOB_PREPROCESS_FAILED,
BALSAM_STATES_BY_NAME['SUBMITTED'].name:SUBJOB_IN_PREPROCESS,
BALSAM_STATES_BY_NAME['SUBMIT_FAILED'].name:SUBJOB_PREPROCESS_FAILED,
BALSAM_STATES_BY_NAME['SUBMIT_DISABLED'].name:SUBJOB_COMPLETED,
BALSAM_STATES_BY_NAME['QUEUED'].name:SUBJOB_QUEUED,
BALSAM_STATES_BY_NAME['RUNNING'].name:SUBJOB_RUNNING,
BALSAM_STATES_BY_NAME['EXECUTION_FINISHED'].name:SUBJOB_RUN_FINISHED,
BALSAM_STATES_BY_NAME['EXECUTION_FAILED'].name:SUBJOB_RUN_FAILED,
BALSAM_STATES_BY_NAME['POSTPROCESSED'].name:SUBJOB_IN_POSTPROCESS,
BALSAM_STATES_BY_NAME['POSTPROCESS_FAILED'].name:SUBJOB_POSTPROCESS_FAILED,
BALSAM_STATES_BY_NAME['STAGED_OUT'].name:SUBJOB_IN_POSTPROCESS,
BALSAM_STATES_BY_NAME['STAGE_OUT_FAILED'].name:SUBJOB_POSTPROCESS_FAILED,
BALSAM_STATES_BY_NAME['JOB_FINISHED'].name:SUBJOB_COMPLETED,
BALSAM_STATES_BY_NAME['JOB_FAILED'].name:SUBJOB_COMPLETE_FAILED,
BALSAM_STATES_BY_NAME['JOB_REJECTED'].name:SUBJOB_REJECTED,
}
# ------------- ArgoJob DB Object ----------------------
import time,os,shutil
#from django.db import models
class SubJobIndexOutOfRange(Exception): pass
class ArgoJob(models.Model):
# ARGO DB table columns
job_id = models.BigIntegerField(default=0)
user_id = models.BigIntegerField(default=0)
name = models.TextField(default='')
description = models.TextField(default='')
group_identifier = models.TextField(default='')
working_directory = models.TextField(default='')
time_created = models.DateTimeField(auto_now_add=True)
time_modified = models.DateTimeField(auto_now=True)
time_finished = models.DateTimeField(null=True)
state = models.TextField(default=CREATED.name)
username = models.TextField(default='')
email = models.TextField(default='')
input_url = models.TextField(default='')
output_url = models.TextField(default='')
subjob_pk_list = models.TextField(default='',validators=[validate_comma_separated_integer_list])
current_subjob_pk_index = models.IntegerField(default=0)
job_status_routing_key = models.TextField(default='')
def get_current_subjob(self):
subjob_list = self.get_subjob_pk_list()
if self.current_subjob_pk_index < len(subjob_list):
logger.debug('getting subjob index ' + str(self.current_subjob_pk_index) + ' of ' + str(len(subjob_list)))
return ArgoSubJob.objects.get(pk=subjob_list[self.current_subjob_pk_index])
else:
logger.debug('current_subjob_pk_index=' + str(self.current_subjob_pk_index) + ' number of subjobs = ' + str(len(subjob_list)) + ' subjobs = ' + str(subjob_list))
raise SubJobIndexOutOfRange
def add_subjob(self,subjob):
subjob_list = self.get_subjob_pk_list()
subjob_list.append(subjob.pk)
self.subjob_pk_list = Serializer.serialize(subjob_list)
def get_subjob_pk_list(self):
return Serializer.deserialize(self.subjob_pk_list)
def get_line_string(self):
format = " %10i | %20i | %20s | %35s | %15s | %20s "
output = format % (self.pk,self.job_id,self.state,str(self.time_modified),self.username,self.subjob_pk_list)
return output
@staticmethod
def get_header():
format = " %10s | %20s | %20s | %35s | %15s | %20s "
output = format % ('pk','job_id','state','time_modified','username','subjob_pk_list')
return output
@staticmethod
def generate_job_id():
# time.time() is a double with units seconds
# so grabing the number of microseconds
job_id = int(time.time()*1e6)
# make sure no jobs with the same job_id
same_jobs = ArgoJob.objects.filter(job_id=job_id)
while len(same_jobs) > 0:
job_id = int(time.time()*1e6)
same_jobs = ArgoJob.objects.filter(job_id=job_id)
return job_id
def delete(self,delete_subjobs=True):
# delete local argo job path
if os.path.exists(self.working_directory):
try:
shutil.rmtree(self.working_directory)
logger.info('removed job path: ' + str(self.working_directory))
except Exception,e:
logger.error('Error trying to remove argo job path: ' + str(self.working_directory) + ' Exception: ' + str(e))
# delete subjobs
if delete_subjobs:
subjobs = ArgoSubJob.objects.filter(pk__in=self.get_subjob_pk_list())
for subjob in subjobs:
subjob.delete()
# call base class delete function
try:
super(ArgoJob,self).delete()
except Exception,e:
logger.error('pk='+str(self.pk) + ' Received exception during "delete": ' + str(e))
# must do this to force django to create a DB table for ARGO independent of the one created for Balsam
class ArgoSubJob(BalsamJob): pass
'''
class ArgoSubJob(models.Model):
# ArgoSubJob DB table columns
site = models.TextField(default='')
state = models.TextField(default='PRESUBMIT')
name = models.TextField(default='')
description = models.TextField(default='')
subjob_id = models.BigIntegerField(default=0)
job_id = models.BigIntegerField(default=0)
queue = models.TextField(default=settings.BALSAM_DEFAULT_QUEUE)
project = models.TextField(default=settings.BALSAM_DEFAULT_PROJECT)
wall_time_minutes = models.IntegerField(default=0)
num_nodes = models.IntegerField(default=0)
processes_per_node = models.IntegerField(default=0)
scheduler_config = models.TextField(default='None')
scheduler_id = models.IntegerField(default=0)
application = models.TextField(default='')
config_file = models.TextField(default='')
input_url = models.TextField(default='')
output_url = models.TextField(default='')
def get_balsam_job_message(self):
msg = BalsamJobMessage.BalsamJobMessage()
msg.origin_id = self.subjob_id
msg.site = self.site
msg.name = self.name
msg.description = self.description
msg.queue = self.queue
msg.project = self.project
msg.wall_time_minutes = self.wall_time_minutes
msg.num_nodes = self.num_nodes
msg.processes_per_node = self.processes_per_node
msg.scheduler_config = self.scheduler_config
msg.application = self.application
msg.config_file = self.config_file
msg.input_url = self.input_url
msg.output_url = self.output_url
return msg
def get_line_string(self):
format = ' %10i | %20i | %20i | %10s | %20s | %10i | %10i | %10s | %10s | %10s | %15s '
output = format % (self.pk,self.subjob_id,self.job_id,self.state,self.site,
self.num_nodes,self.processes_per_node,self.scheduler_id,self.queue,
self.project,self.application)
return output
@staticmethod
def get_header():
format = ' %10s | %20s | %20s | %10s | %20s | %10s | %10s | %10s | %10s | %10s | %15s '
output = format % ('pk','subjob_id','job_id','state','site',
'num_nodes','procs','sched_id','queue','project','application')
return output
'''
|
|
#!/usr/bin/python
'''
This script generates a WebSocket frame in the form of a C array declaration
that can be pasted directly into a C source file.
Example output:
byte data0[] = {
0x81, 0x84, 0x4b, 0xfc, 0xec, 0x7b, 0x2a, 0x9e, 0x8f, 0x71
};
Usage: makewsmessage.py [options]
Options:
-h, --help show this help message and exit
-m MAXSIZE, --maxsize=MAXSIZE
maximum size of payload per frame
--prefix=PREFIX prefix of variable that stores a frame
--unmasked generate unmasked frames
--type=TYPE frame type (text|binary|ping|pong|close)
If a file is specified as a positional argument, then its contents are
used as data. Otherwise, standard input is used.
Example using standard input:
$ echo "abc" | ./makewsmessage.py --type=text -m 5
reading stdin
Generating text frames
Maximum size of payload per frame: 5
Prefix: data
Generating masked frames
Frame type: text
byte data0[] = {
0x81, 0x84, 0x4b, 0xfc, 0xec, 0x7b, 0x2a, 0x9e, 0x8f, 0x71
};
'''
from optparse import OptionParser
import sys
import os
import time
import random
def random_mask():
'''
random_mask generates a random 4-byte array that will be used
as a mask for WebSocket frame
'''
mask = []
for i in range(4):
mask.append( chr( random.randint(0, 0xff) ) )
return mask
def serialize(num, len):
'Serialize num to an array of len bytes (big-endian)'
t = []
while num > 0:
r = num % 0x100
t = [r] + t
num = num / 0x100
t = ([0x00] * len) + t
return t[-len:]
def generate_frame(data, **options):
'''
generate_frame generates a WebSocket frame as
an array of bytes
'''
# print 'generate_frame: data=%s' % data
masked = False
fragment = False
frame_type = None
mask = None
if 'mask' in options:
# print 'mask: %s' % options['mask']
mask = options['mask']
if 'masked' in options:
# print 'masked: %s' % options['masked']
masked = True
if 'fragment' in options:
fragment = options['fragment']
# print 'fragment:', fragment
if masked and ('mask' not in options):
raise Exception('masked=True and mask not specified')
if 'type' in options:
frame_type = options['type']
# print 'frame type: %s' % frame_type
if not frame_type:
opcode = 0
else:
opcodes = {'text': 1, 'binary': 2, 'close': 8, 'ping': 9, 'pong': 10}
if frame_type not in opcodes:
raise Exception('unknown frame type %s' % frame_type)
opcode = opcodes[frame_type]
# print 'opcode: %d' % opcode
# byte 0: FIN, opcode
if fragment:
b0 = 0
else:
b0 = 0x80
b0 = b0 + opcode
# print 'byte 0: %x' % b0
b = []
b.append(b0)
# byte 1: MASK, payload length (7 bits)
b1 = 0
if masked:
b1 = 0x80
if len(data) < 126:
l0 = len(data)
l = []
elif len(data) < (2 << 32 - 1):
l0 = 126
l = serialize(len(data), 2)
else:
l0 = 127
l = serialize(len(data), 8)
b1 = b1 + l0
b.append(b1)
# print 'byte 1: %x' % b1
b = b + l
if masked:
b = b + mask
index = 0
for d in data:
d1 = chr(ord(d) ^ ord(mask[index]))
b.append(d1)
index = index + 1
if index == 4:
index = 0
else:
b = b + data
return b
def create_c_array(name, data):
'''
create_c_array returns a string that is suitable for pasting into a C
source file.
E.g.:
byte data0 = {
0x1, 0x85, 0xa5, 0x55, 0xc0, 0x55, 0xc4, 0x37, 0xa3, 0x31, 0xc0
};
'''
s = 'byte %s[] = {\n\t' % name
j = 0
for i in range(len(data)):
c = data[i]
if type(c) == str:
c = ord(c)
s1 = hex(c)
s = s + s1
if i + 1 < len(data):
s = s + ','
j = j + 1
if j < 16:
s = s + ' '
else:
s = s + '\n\t'
j = 0
if j > 0:
s = s + '\n'
s = s + '};\n'
return s
def main():
parser = OptionParser()
parser.add_option('-m', '--maxsize', dest='maxsize', type='int',
default=False, help='maximum size of payload per frame')
parser.add_option('--prefix', dest='prefix',
default='data', help='prefix of variable that stores a frame')
parser.add_option('--unmasked', default=False, action='store_true',
help='generate unmasked frames')
parser.add_option('--type', type='choice', choices=['text', 'binary',
'ping', 'pong', 'close'], default='text',
help='frame type (text|binary|ping|pong|close)')
(options, args) = parser.parse_args()
read_stdin = False
if len(args) > 0:
filename = args[0]
print 'filename: %s' % filename
fin = open(filename, 'r')
else:
print 'reading stdin'
fin = sys.stdin
read_stdin = True
if options.maxsize:
print 'Maximum size of payload per frame: %d' % options.maxsize
else:
print 'Sending message in a single frame'
print 'Prefix: %s' % options.prefix
if options.unmasked:
print 'Generating unmasked frames'
else:
print 'Generating masked frames'
print 'Frame type: %s' % options.type
random.seed(time.time())
# Read from fin (file input)
data = fin.read()
if options.maxsize:
first = True
i = 0
while len(data) > 0:
if len(data) < options.maxsize:
chunk = data
data = []
else:
chunk = data[:options.maxsize]
data = data[options.maxsize:]
if len(data) > 0:
fragment = True
else:
fragment = False
# print ' read %d bytes, fragment=%s' % (len(chunk), fragment)
# print ' (data=%s)' % data
if first:
b = generate_frame(chunk, masked=not options.unmasked,
fragment=fragment,
mask=random_mask(), type=options.type)
else:
b = generate_frame(chunk, masked=not options.unmasked,
fragment=fragment,
mask=random_mask())
s = create_c_array(options.prefix + str(i), b)
print s
first = False
i = i + 1
else:
b = generate_frame(data, masked=not options.unmasked,
mask=['x', 'y', 'z', 't'], type=options.type)
s = create_c_array(options.prefix, b)
print s
if __name__ == '__main__':
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live variable analysis.
See https://en.wikipedia.org/wiki/Live_variable_analysis for a definition of
the following idioms: live variable, live in, live out, which are used
throughout this file.
This analysis attaches the following:
* symbols that are live at the exit of control flow statements
* symbols that are live at the entry of control flow statements
Requires activity analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import annos
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that performs liveness analysis at statement level."""
def __init__(self, graph):
super(Analyzer, self).__init__(graph)
# This allows communicating that nodes generate extra symbols,
# e.g. those that a function definition closes over.
self.extra_gen = {}
def init_state(self, _):
return set()
def visit_node(self, node):
prev_live_in = self.in_[node]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
gen = node_scope.read | self.extra_gen.get(node.ast_node, frozenset())
# TODO(mdan): verify whether composites' parents need to be added.
# E.g. whether x needs to be added if x.y is live. Theoretically the
# activity analysis should have both so that wouldn't be needed.
kill = node_scope.modified | node_scope.deleted
live_out = set()
for n in node.next:
live_out |= self.in_[n]
live_in = gen | (live_out - kill)
else:
# Nodes that don't have a scope annotation are assumed not to touch any
# symbols.
# This Name node below is a literal name, e.g. False
assert isinstance(node.ast_node,
(gast.Name, gast.Continue, gast.Break)), type(
node.ast_node)
live_in = prev_live_in
live_out = live_in
self.in_[node] = live_in
self.out[node] = live_out
# TODO(mdan): Move this to the superclass?
return prev_live_in != live_in
class WholeTreeAnalyzer(transformer.Base):
"""Runs liveness analysis on each of the functions defined in the AST.
If a function defined other local functions, those will have separate CFGs.
However, dataflow analysis needs to tie up these CFGs to properly emulate the
effect of closures. In the case of liveness, the parent function's live
variables must account for the variables that are live at the entry of each
subfunction. For example:
def foo():
# baz is live here
def bar():
print(baz)
This analyzer runs liveness analysis on each individual function, accounting
for the effect above.
"""
def __init__(self, source_info, graphs):
super(WholeTreeAnalyzer, self).__init__(source_info)
self.graphs = graphs
self.current_analyzer = None
self.analyzers = {}
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
# Postorder tree processing makes this a bit complicated:
# 1. construct an analyzer object and put it on stack
# 2. recursively walk the subtree; this will initialize the analyzer's
# in_ state properly (done in a block below)
# 3. run the final analysis
analyzer = Analyzer(subgraph)
self.current_analyzer = analyzer
node = self.generic_visit(node)
analyzer.visit_reverse()
if parent_analyzer is not None:
# Wire the state between the two subgraphs' analyzers.
child_in_state = analyzer.in_[subgraph.entry]
# Exception: symbols modified in the child function are local to it
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
for qn in body_scope.modified:
# Note: a function modifying the symbol doesn't make that symbol
# live at the function's entry. In fact when that happens it is
# probably a case of undefined assignment, like this:
#
# bar = 0
# def foo():
# print(bar) # bar is undefined here!
# bar = 1
#
# Hence we use discard and not remove below.
child_in_state.discard(qn)
parent_analyzer.extra_gen[node] = frozenset(child_in_state,)
self.analyzers[node] = analyzer
self.current_analyzer = parent_analyzer
return node
def visit_nonlocal(self, node):
raise NotImplementedError()
def visit_global(self, node):
raise NotImplementedError()
class Annotator(transformer.Base):
"""AST visitor that annotates each control flow block with live symbols."""
# Note: additional nodes may be added as needed.
def __init__(self, source_info, cross_function_analyzer):
super(Annotator, self).__init__(source_info)
self.cross_function_analyzer = cross_function_analyzer
self.current_analyzer = None
def visit(self, node):
node = super(Annotator, self).visit(node)
if (self.current_analyzer is not None and
isinstance(node, gast.stmt) and
node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.LIVE_VARS_IN,
frozenset(self.current_analyzer.in_[cfg_node]))
return node
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
self.current_analyzer = self.cross_function_analyzer.analyzers[node]
node = self.generic_visit(node)
self.current_analyzer = parent_analyzer
return node
def _block_statement_live_out(self, node):
successors = self.current_analyzer.graph.stmt_next[node]
stmt_live_out = set()
for s in successors:
stmt_live_out.update(self.current_analyzer.in_[s])
anno.setanno(node, anno.Static.LIVE_VARS_OUT, frozenset(stmt_live_out))
return node
def _block_statement_live_in(self, node, entry_node):
cfg_node = self.current_analyzer.graph.index[entry_node]
stmt_live_in = frozenset(self.current_analyzer.in_[cfg_node])
anno.setanno(node, anno.Static.LIVE_VARS_IN, stmt_live_in)
return node
def visit_If(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.test)
def visit_For(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.iter)
def visit_While(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.test)
def visit_With(self, node):
node = self.generic_visit(node)
return self._block_statement_live_in(node, node.items[0])
def visit_Expr(self, node):
node = self.generic_visit(node)
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.LIVE_VARS_OUT,
frozenset(self.current_analyzer.out[cfg_node]))
return node
def visit_ExceptHandler(self, node):
# TODO(b/123995141) Add Exception Handlers to the CFG
return node
def resolve(node, source_info, graphs):
"""Resolves the live symbols at the exit of control flow statements.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
Returns:
ast.AST
"""
cross_function_analyzer = WholeTreeAnalyzer(source_info, graphs)
node = cross_function_analyzer.visit(node)
visitor = Annotator(source_info, cross_function_analyzer)
node = visitor.visit(node)
return node
|
|
"""The WaveBlocks Project
This file contains the class for representing the simplex
basis shape which is a special type of sparse basis set.
@author: R. Bourquin
@copyright: Copyright (C) 2015 R. Bourquin
@license: Modified BSD License
"""
from numpy import eye, vstack, integer
from WaveBlocksND.BasisShape import BasisShape
__all__ = ["SimplexShape"]
class SimplexShape(BasisShape):
r"""This class implements the simplex basis shape which is a special type
of sparse basis set. A basis shape is essentially all information and operations
related to the set :math:`\mathfrak{K}` of multi-indices :math:`k`. The simplex
shape in :math:`D` dimensions and with maximal 1-norm :math:`K` is defined as the set
.. math::
\mathfrak{K}(D, K) := \{ (k_0, \ldots, k_{D-1}) |
\sum_{d=0}^{D-1} k_d = \| k \|_1 \leq K \}
"""
def __init__(self, D, K):
r"""
:param D: The dimension :math:`D`
:param K: The maximal 1-norm :math:`K`
"""
# The dimension of K
self._dimension = D
# The maximal 1-norm parameter
self._maxnorm = K
# The linear mapping k -> index for the basis
iil = self._get_index_iterator_lex()
self._lima = {k: index for index, k in enumerate(iil)}
# And the inverse mapping
self._lima_inv = {v: k for k, v in self._lima.items()}
# The basis size
self._basissize = len(self._lima)
def __str__(self):
r""":return: A string describing the basis shape :math:`\mathfrak{K}`.
"""
s = ("Simplex basis shape of dimension "+str(self._dimension)+" and maximal 1-norm "+str(self._maxnorm)+".")
return s
def __hash__(self):
r"""Compute a unique hash for the basis shape. In the case of simplex
basis shapes :math:`\mathfrak{K}` the basis is fully specified by its
dimension :math:`D` and the maximal 1-norm parameter :math:`K`.
"""
return hash(("SimplexShape", self._dimension, self._maxnorm))
def __getitem__(self, k):
r"""Make map look ups.
"""
if type(k) is tuple or type(k) is list:
k = tuple(k)
assert len(k) == self._dimension
if k in self._lima:
return self._lima[k]
elif type(k) is int:
if k in self._lima_inv:
return self._lima_inv[k]
else:
raise IndexError("Wrong index type")
def __contains__(self, k):
r"""
Checks if a given multi-index :math:`k` is part of the basis set :math:`\mathfrak{K}`.
:param k: The multi-index :math:`k` we want to test.
:type k: tuple
"""
assert len(tuple(k)) == self._dimension
return tuple(k) in self._lima
def __iter__(self):
r"""Implements iteration over the multi-indices :math:`k`
of the basis set :math:`\mathfrak{K}`.
Note: The order of iteration is NOT fixed. If you need a special
iteration scheme, use :py:meth:`get_node_iterator`.
"""
# TODO: Better remove this as it may cause unexpected behaviour?
return iter(self._lima)
def contains(self, k):
r"""
Checks if a given multi-index :math:`k` is part of the basis set :math:`\mathfrak{K}`.
:param k: The multi-index :math:`k` we want to test.
:type k: tuple
"""
return tuple(k) in self._lima
def get_description(self):
r"""Return a description of this basis shape object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current basis shape. A description
never contains any data.
"""
d = {}
d["type"] = "SimplexShape"
d["dimension"] = self._dimension
d["K"] = self._maxnorm
return d
def extend(self):
r"""Extend the basis shape such that (at least) all neighbours of all
boundary nodes are included in the extended basis shape.
"""
D = self._dimension
K = self._maxnorm
return SimplexShape(D, K + 1)
def _get_index_iterator_lex(self):
r"""
"""
# The maximal 1-norm
Kmax = self._maxnorm
def index_iterator_lex(Kmax):
# Initialize a counter
z = [0 for i in range(self._dimension + 1)]
while z[self._dimension] == 0:
# Yield the current index vector
yield tuple(reversed(z[:-1]))
# Increment fastest varying bit
z[0] += 1
# Reset overflows
for d in range(self._dimension):
K = sum(z[:-1])
if K > Kmax:
z[d] = 0
z[d + 1] += 1
return index_iterator_lex(Kmax)
def _get_index_iterator_chain(self, direction=0):
r"""
"""
def index_iterator_chain(Kmax, d):
D = self._dimension
# The counter
z = [0 for i in range(D + 1)]
# Iterate over all valid stencil points
while z[D] == 0:
yield tuple(reversed(z[:-1]))
# Increase index in the dimension we build the chain
z[D - d - 1] += 1
# Check if we are done with the current base point
# If yes, move base point and start a new chain
# Reset overflows
for i in range(D - d - 1, D):
K = sum(z[(D - d - 1):-1])
if K > Kmax:
z[i] = 0
z[i + 1] += 1
return index_iterator_chain(self._maxnorm, direction)
def _get_index_iterator_mag(self):
r"""
"""
# Nodes sorted by l_1 magnitude
nodes = sorted(self._lima.keys(), key=sum)
def index_iterator_mag(nodes):
for node in nodes:
yield node
return index_iterator_mag(nodes)
def get_node_iterator(self, mode="lex", direction=None):
r"""
Returns an iterator to iterate over all basis elements :math:`k \in \mathfrak{K}`.
:param mode: The mode by which we iterate over the indices. Default is ``lex``
for lexicographical order. Supported is also ``chain``, for
the chain-like mode, details see the manual.
:type mode: string
:param direction: If iterating in `chainmode` this specifies the direction
the chains go.
:type direction: integer.
"""
if mode == "lex":
return self._get_index_iterator_lex()
elif mode == "chain":
if direction < self._dimension:
return self._get_index_iterator_chain(direction=direction)
else:
raise ValueError("Can not build iterator for this direction.")
elif mode == "mag":
return self._get_index_iterator_mag()
# TODO: Consider boundary node only iterator
else:
raise ValueError("Unknown iterator mode: {}.".format(mode))
def get_limits(self):
r"""Returns the upper limit :math:`K` which is the same for all directions :math:`d`.
:return: A tuple of the maximum of the multi-index in each direction.
"""
return tuple(self._dimension * [self._maxnorm])
def get_neighbours(self, k, selection=None, direction=None):
r"""
Returns a list of all multi-indices that are neighbours of a given
multi-index :math:`k`. A direct neighbour is defined as
:math:`(k_0, \ldots, k_d \pm 1, \ldots, k_{D-1}) \forall d \in [0 \ldots D-1]`.
:param k: The multi-index of which we want to get the neighbours.
:type k: tuple
:param selection:
:type selection: string with fixed values ``forward``, ``backward`` or ``all``.
The values ``all`` is equivalent to the value ``None`` (default).
:param direction: The direction :math:`0 \leq d < D` in which we want to find
the neighbours :math:`k \pm e_d`.
:type direction: int
:return: A list containing the pairs :math:`(d, k^\prime)`.
"""
assert len(tuple(k)) == self._dimension
# First build a list of potential neighbours
I = eye(self._dimension, dtype=integer)
ki = vstack(k)
# Forward and backward direct neighbours
nbfw = ki + I
nbbw = ki - I
# Keep only the valid ones
nbh = []
if direction is not None:
directions = [direction]
else:
directions = range(self._dimension)
for d in directions:
nfw = tuple(nbfw[:, d])
nbw = tuple(nbbw[:, d])
# TODO: Try to simplify these nested if blocks
if selection in ("backward", "all", None):
if not k[d] == 0:
nbh.append((d, nbw))
if selection in ("forward", "all", None):
if not sum(k) == self._maxnorm:
nbh.append((d, nfw))
return nbh
|
|
#!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
from datetime import datetime
import pyrfc3339
class Field(object):
"""Generic field on a k8s model"""
def __init__(self, field_type, default_value=None, alt_type=None, name="__unset__"):
self.type = field_type
self.alt_type = alt_type
self.name = name
self._default_value = default_value
self.default_value_create_instance = True
def dump(self, instance):
value = getattr(instance, self.attr_name)
return self._as_dict(value)
def load(self, instance, value):
new_value = self._from_dict(value)
instance._values[self.name] = new_value
def set(self, instance, kwargs):
value = kwargs.get(self.name, self.default_value)
self.__set__(instance, value)
def is_valid(self, instance):
return True
def is_set(self, instance):
return instance._values.get(self.name) != self.default_value
def __get__(self, instance, obj_type=None):
value = instance._values.get(self.name, self.default_value)
return value
def __set__(self, instance, new_value):
current_value = instance._values.get(self.name)
if new_value == current_value:
return
if new_value is not None:
try:
current_value.merge(new_value)
return
except AttributeError:
pass
instance._values[self.name] = new_value
def __delete__(self, instance):
del instance._values[self.name]
@property
def default_value(self):
from .base import Model
if issubclass(self.type, Model) and self.default_value_create_instance and self._default_value is None:
return self.type(new=False)
return copy.copy(self._default_value)
def _as_dict(self, value):
try:
return value.as_dict()
except AttributeError:
""" If we encounter a dict with all None-elements, we return None.
This is because the Kubernetes-API does not support empty string values, or "null" in json.
"""
if isinstance(value, dict):
d = {k: v for k, v in value.items() if v is not None}
return d if d else None
elif datetime in (self.type, self.alt_type) and isinstance(value, datetime):
return pyrfc3339.generate(value, accept_naive=True)
else:
return value
def _from_dict(self, value):
if value is None:
return self.default_value
try:
return self.type.from_dict(value)
except AttributeError:
if isinstance(value, self.type) or (self.alt_type and isinstance(value, self.alt_type)):
return value
if self.type is datetime:
return pyrfc3339.parse(value)
return self.type(value)
def __repr__(self):
return "{}(name={}, type={}, default_value={}, alt_type={})".format(
self.__class__.__name__,
self.name,
self.type,
self._default_value,
self.alt_type
)
class ReadOnlyField(Field):
"""ReadOnlyField can only be set by the API-server"""
def __set__(self, instance, value):
pass
class OnceField(Field):
"""OnceField can only be set on new instances, and is immutable after creation on the server"""
def __set__(self, instance, value):
if instance._new:
super(OnceField, self).__set__(instance, value)
class ListField(Field):
"""ListField is a list (array) of a single type on a model"""
def __init__(self, field_type, default_value=None, name='__unset__'):
if default_value is None:
default_value = []
super(ListField, self).__init__(field_type, default_value, name=name)
def dump(self, instance):
return [self._as_dict(v) for v in getattr(instance, self.attr_name)]
def load(self, instance, value):
if value is None:
value = self.default_value
instance._values[self.name] = [self._from_dict(v) for v in value]
class RequiredField(Field):
"""Required field must have a value from the start"""
def is_valid(self, instance):
value = self.__get__(instance)
return value is not None and super(RequiredField, self).is_valid(instance)
class JSONField(Field):
"""
Field with allowed types `bool`, `int`, `float`, `str`, `dict`, `list`
Items of dicts and lists have the same allowed types
"""
def __init__(self, default_value=None, name="__unset__"):
self.type = None
self.alt_type = None
self.allowed_types = [bool, int, float, str, dict, list]
self.name = name
self._default_value = default_value
def load(self, instance, value):
if value is None:
value = self.default_value
self.__set__(instance, value)
def is_valid(self, instance):
value = self.__get__(instance)
if value is None:
return True
try:
return self._check_allowed_types(value)
except TypeError:
return False
def __set__(self, instance, new_value):
if (new_value is None) or self._check_allowed_types(new_value, chain=[type(instance).__name__, self.name]):
instance._values[self.name] = new_value
def _check_allowed_types(self, value, chain=None):
if chain is None:
chain = []
if any(isinstance(value, t) for t in self.allowed_types):
if isinstance(value, dict):
for k, v in value.items():
self._check_allowed_types(k, chain.append(k))
self._check_allowed_types(v, chain.append(k))
if isinstance(value, list):
for v in value:
self._check_allowed_types(v, chain.append("[\"{value}\"]".format(value=v)))
return True
else:
def typename(i):
return i.__name__
raise TypeError("{name} has invalid type {type}. Allowed types are {allowed_types}.".format(
name=".".join(chain),
type=type(value).__name__,
allowed_types=", ".join(map(typename, self.allowed_types))
))
@property
def default_value(self):
return copy.copy(self._default_value)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import random
import unittest
import warnings
from pathlib import Path
import pytest # type: ignore
import numpy as np
from monty.os.path import which
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import Element, Species
from pymatgen.core.structure import (
IMolecule,
IStructure,
Molecule,
Structure,
StructureError,
)
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
enum_cmd = which("enum.x") or which("multienum.x")
mcsqs_cmd = which("mcsqs")
class IStructureTest(PymatgenTest):
def setUp(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
self.lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.struct = IStructure(self.lattice, ["Si"] * 2, coords)
self.assertEqual(len(self.struct), 2, "Wrong number of sites in structure!")
self.assertTrue(self.struct.is_ordered)
self.assertTrue(self.struct.ntypesp == 1)
coords = list()
coords.append([0, 0, 0])
coords.append([0.0, 0, 0.0000001])
self.assertRaises(
StructureError,
IStructure,
self.lattice,
["Si"] * 2,
coords,
validate_proximity=True,
)
self.propertied_structure = IStructure(self.lattice, ["Si"] * 2, coords, site_properties={"magmom": [5, -5]})
@unittest.skipIf(not (mcsqs_cmd and enum_cmd), "enumlib or mcsqs executable not present")
def test_get_orderings(self):
ordered = Structure.from_spacegroup("Im-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
self.assertEqual(ordered.get_orderings()[0], ordered)
disordered = Structure.from_spacegroup("Im-3m", Lattice.cubic(3), [Composition("Fe0.5Mn0.5")], [[0, 0, 0]])
orderings = disordered.get_orderings()
self.assertEqual(len(orderings), 1)
super_cell = disordered * 2
orderings = super_cell.get_orderings()
self.assertEqual(len(orderings), 59)
sqs = disordered.get_orderings(mode="sqs", scaling=[2, 2, 2])
self.assertEqual(sqs[0].formula, "Mn8 Fe8")
sqs = super_cell.get_orderings(mode="sqs")
self.assertEqual(sqs[0].formula, "Mn8 Fe8")
def test_as_dataframe(self):
df = self.propertied_structure.as_dataframe()
self.assertEqual(df.attrs["Reduced Formula"], "Si")
self.assertEqual(df.shape, (2, 8))
def test_matches(self):
ss = self.struct * 2
self.assertTrue(ss.matches(self.struct))
def test_bad_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.5, 0.75])
self.assertRaises(
StructureError,
IStructure,
self.lattice,
["Si"] * 3,
coords,
validate_proximity=True,
)
# these shouldn't raise an error
IStructure(self.lattice, ["Si"] * 2, coords[:2], True)
IStructure(self.lattice, ["Si"], coords[:1], True)
def test_volume_and_density(self):
self.assertAlmostEqual(self.struct.volume, 40.04, 2, "Volume wrong!")
self.assertAlmostEqual(self.struct.density, 2.33, 2, "Incorrect density")
def test_specie_init(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Species("O", -2): 1.0}, {Species("Mg", 2): 0.8}], coords)
self.assertEqual(s.composition.formula, "Mg0.8 O1")
def test_get_sorted_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, ["O", "Li"], coords, site_properties={"charge": [-2, 1]})
sorted_s = s.get_sorted_structure()
self.assertEqual(sorted_s[0].species, Composition("Li"))
self.assertEqual(sorted_s[1].species, Composition("O"))
self.assertEqual(sorted_s[0].charge, 1)
self.assertEqual(sorted_s[1].charge, -2)
s = IStructure(
self.lattice,
["Se", "C", "Se", "C"],
[[0] * 3, [0.5] * 3, [0.25] * 3, [0.75] * 3],
)
self.assertEqual(
[site.specie.symbol for site in s.get_sorted_structure()],
["C", "C", "Se", "Se"],
)
def test_get_space_group_data(self):
self.assertEqual(self.struct.get_space_group_info(), ("Fd-3m", 227))
def test_fractional_occupations(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{"O": 1.0}, {"Mg": 0.8}], coords)
self.assertEqual(s.composition.formula, "Mg0.8 O1")
self.assertFalse(s.is_ordered)
def test_get_distance(self):
self.assertAlmostEqual(self.struct.get_distance(0, 1), 2.35, 2, "Distance calculated wrongly!")
pt = [0.9, 0.9, 0.8]
self.assertAlmostEqual(
self.struct[0].distance_from_point(pt),
1.50332963784,
2,
"Distance calculated wrongly!",
)
def test_as_dict(self):
si = Species("Si", 4)
mn = Element("Mn")
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, [{si: 0.5, mn: 0.5}, {si: 0.5}], coords)
self.assertIn("lattice", struct.as_dict())
self.assertIn("sites", struct.as_dict())
d = self.propertied_structure.as_dict()
self.assertEqual(d["sites"][0]["properties"]["magmom"], 5)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(
self.lattice,
[
{Species("O", -2, properties={"spin": 3}): 1.0},
{Species("Mg", 2, properties={"spin": 2}): 0.8},
],
coords,
site_properties={"magmom": [5, -5]},
)
d = s.as_dict()
self.assertEqual(d["sites"][0]["properties"]["magmom"], 5)
self.assertEqual(d["sites"][0]["species"][0]["properties"]["spin"], 3)
d = s.as_dict(0)
self.assertNotIn("volume", d["lattice"])
self.assertNotIn("xyz", d["sites"][0])
def test_from_dict(self):
d = self.propertied_structure.as_dict()
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
d = self.propertied_structure.as_dict(0)
s2 = IStructure.from_dict(d)
self.assertEqual(s, s2)
d = {
"lattice": {
"a": 3.8401979337,
"volume": 40.044794644251596,
"c": 3.8401979337177736,
"b": 3.840198994344244,
"matrix": [
[3.8401979337, 0.0, 0.0],
[1.9200989668, 3.3257101909, 0.0],
[0.0, -2.2171384943, 3.1355090603],
],
"alpha": 119.9999908639842,
"beta": 90.0,
"gamma": 60.000009137322195,
},
"sites": [
{
"properties": {"magmom": 5},
"abc": [0.0, 0.0, 0.0],
"occu": 1.0,
"species": [
{
"occu": 1.0,
"oxidation_state": -2,
"properties": {"spin": 3},
"element": "O",
}
],
"label": "O2-",
"xyz": [0.0, 0.0, 0.0],
},
{
"properties": {"magmom": -5},
"abc": [0.75, 0.5, 0.75],
"occu": 0.8,
"species": [
{
"occu": 0.8,
"oxidation_state": 2,
"properties": {"spin": 2},
"element": "Mg",
}
],
"label": "Mg2+:0.800",
"xyz": [3.8401979336749994, 1.2247250003039056e-06, 2.351631795225],
},
],
}
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
self.assertEqual(s[0].specie.spin, 3)
self.assertEqual(type(s), IStructure)
def test_site_properties(self):
site_props = self.propertied_structure.site_properties
self.assertEqual(site_props["magmom"], [5, -5])
self.assertEqual(self.propertied_structure[0].magmom, 5)
self.assertEqual(self.propertied_structure[1].magmom, -5)
def test_copy(self):
new_struct = self.propertied_structure.copy(site_properties={"charge": [2, 3]})
self.assertEqual(new_struct[0].magmom, 5)
self.assertEqual(new_struct[1].magmom, -5)
self.assertEqual(new_struct[0].charge, 2)
self.assertEqual(new_struct[1].charge, 3)
coords = list()
coords.append([0, 0, 0])
coords.append([0.0, 0, 0.0000001])
structure = IStructure(self.lattice, ["O", "Si"], coords, site_properties={"magmom": [5, -5]})
new_struct = structure.copy(site_properties={"charge": [2, 3]}, sanitize=True)
self.assertEqual(new_struct[0].magmom, -5)
self.assertEqual(new_struct[1].magmom, 5)
self.assertEqual(new_struct[0].charge, 3)
self.assertEqual(new_struct[1].charge, 2)
self.assertAlmostEqual(new_struct.volume, structure.volume)
def test_interpolate(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 10)
for s in int_s:
self.assertIsNotNone(s, "Interpolation Failed!")
self.assertEqual(int_s[0].lattice, s.lattice)
self.assertArrayEqual(int_s[1][1].frac_coords, [0.725, 0.5, 0.725])
# test ximages
int_s = struct.interpolate(struct2, nimages=np.linspace(0.0, 1.0, 3))
for s in int_s:
self.assertIsNotNone(s, "Interpolation Failed!")
self.assertEqual(int_s[0].lattice, s.lattice)
self.assertArrayEqual(int_s[1][1].frac_coords, [0.625, 0.5, 0.625])
badlattice = [[1, 0.00, 0.00], [0, 1, 0.00], [0.00, 0, 1]]
struct2 = IStructure(badlattice, ["Si"] * 2, coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si", "Fe"], coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
# Test autosort feature.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
s1.pop(0)
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
s2.pop(2)
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[0].frac_coords, s[0].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
# Make sure autosort has no effect on simpler interpolations,
# and with shuffled sites.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
s2[0] = "Fe", [0.01, 0.01, 0.01]
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[1].frac_coords, s[1].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
self.assertArrayAlmostEqual(s1[3].frac_coords, s[3].frac_coords)
# Test non-hexagonal setting.
lattice = Lattice.rhombohedral(4.0718, 89.459)
species = [{"S": 1.0}, {"Ni": 1.0}]
coordinate = [(0.252100, 0.252100, 0.252100), (0.500000, 0.244900, -0.244900)]
s = Structure.from_spacegroup("R32:R", lattice, species, coordinate)
self.assertEqual(s.formula, "Ni3 S2")
def test_interpolate_lattice(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
l2 = Lattice.from_parameters(3, 4, 4, 100, 100, 70)
struct2 = IStructure(l2, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 2, interpolate_lattices=True)
self.assertArrayAlmostEqual(struct.lattice.abc, int_s[0].lattice.abc)
self.assertArrayAlmostEqual(struct.lattice.angles, int_s[0].lattice.angles)
self.assertArrayAlmostEqual(struct2.lattice.abc, int_s[2].lattice.abc)
self.assertArrayAlmostEqual(struct2.lattice.angles, int_s[2].lattice.angles)
int_angles = [110.3976469, 94.5359731, 64.5165856]
self.assertArrayAlmostEqual(int_angles, int_s[1].lattice.angles)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct.lattice.volume)
def test_interpolate_lattice_rotation(self):
l1 = Lattice([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
l2 = Lattice([[-1.01, 0, 0], [0, -1.01, 0], [0, 0, 1]])
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
struct1 = IStructure(l1, ["Si"] * 2, coords)
struct2 = IStructure(l2, ["Si"] * 2, coords)
int_s = struct1.interpolate(struct2, 2, interpolate_lattices=True)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct1.lattice.volume)
def test_get_primitive_structure(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(fcc_ag.get_primitive_structure()), 1)
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
bcc_li = IStructure(Lattice.cubic(4.09), ["Li"] * 2, coords)
bcc_prim = bcc_li.get_primitive_structure()
self.assertEqual(len(bcc_prim), 1)
self.assertAlmostEqual(bcc_prim.lattice.alpha, 109.47122, 3)
bcc_li = IStructure(Lattice.cubic(4.09), ["Li"] * 2, coords, site_properties={"magmom": [1, -1]})
bcc_prim = bcc_li.get_primitive_structure()
self.assertEqual(len(bcc_prim), 1)
self.assertAlmostEqual(bcc_prim.lattice.alpha, 109.47122, 3)
bcc_prim = bcc_li.get_primitive_structure(use_site_props=True)
self.assertEqual(len(bcc_prim), 2)
self.assertAlmostEqual(bcc_prim.lattice.alpha, 90, 3)
coords = [[0] * 3, [0.5] * 3, [0.25] * 3, [0.26] * 3]
s = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(s.get_primitive_structure()), 4)
def test_primitive_cell_site_merging(self):
l = Lattice.cubic(10)
coords = [[0, 0, 0], [0, 0, 0.5], [0, 0, 0.26], [0, 0, 0.74]]
sp = ["Ag", "Ag", "Be", "Be"]
s = Structure(l, sp, coords)
dm = s.get_primitive_structure().distance_matrix
self.assertArrayAlmostEqual(dm, [[0, 2.5], [2.5, 0]])
def test_primitive_on_large_supercell(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = Structure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
fcc_ag.make_supercell([2, 2, 2])
fcc_ag_prim = fcc_ag.get_primitive_structure()
self.assertEqual(len(fcc_ag_prim), 1)
self.assertAlmostEqual(fcc_ag_prim.volume, 17.10448225)
def test_primitive_positions(self):
coords = [[0, 0, 0], [0.3, 0.35, 0.45]]
s = Structure(Lattice.from_parameters(1, 2, 3, 50, 66, 88), ["Ag"] * 2, coords)
a = [[-1, 2, -3], [3, 2, -4], [1, 0, -1]]
b = [[4, 0, 0], [1, 1, 0], [3, 0, 1]]
c = [[2, 0, 0], [1, 3, 0], [1, 1, 1]]
for sc_matrix in [c]:
sc = s.copy()
sc.make_supercell(sc_matrix)
prim = sc.get_primitive_structure(0.01)
self.assertEqual(len(prim), 2)
self.assertAlmostEqual(prim.distance_matrix[0, 1], 1.0203432356739286)
def test_primitive_structure_volume_check(self):
l = Lattice.tetragonal(10, 30)
coords = [
[0.5, 0.8, 0],
[0.5, 0.2, 0],
[0.5, 0.8, 0.333],
[0.5, 0.5, 0.333],
[0.5, 0.5, 0.666],
[0.5, 0.2, 0.666],
]
s = IStructure(l, ["Ag"] * 6, coords)
sprim = s.get_primitive_structure(tolerance=0.1)
self.assertEqual(len(sprim), 6)
def test_get_miller_index(self):
"""Test for get miller index convenience method"""
struct = Structure(
[2.319, -4.01662582, 0.0, 2.319, 4.01662582, 0.0, 0.0, 0.0, 7.252],
["Sn", "Sn", "Sn"],
[
[2.319, 1.33887527, 6.3455],
[1.1595, 0.66943764, 4.5325],
[1.1595, 0.66943764, 0.9065],
],
coords_are_cartesian=True,
)
hkl = struct.get_miller_index_from_site_indexes([0, 1, 2])
self.assertEqual(hkl, (2, -1, 0))
def test_get_all_neighbors_and_get_neighbors(self):
s = self.struct
nn = s.get_neighbors_in_shell(s[0].frac_coords, 2, 4, include_index=True, include_image=True)
self.assertEqual(len(nn), 47)
r = random.uniform(3, 6)
all_nn = s.get_all_neighbors(r, True, True)
for i in range(len(s)):
self.assertEqual(4, len(all_nn[i][0]))
self.assertEqual(len(all_nn[i]), len(s.get_neighbors(s[i], r)))
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
s = Structure(Lattice.cubic(1), ["Li"], [[0, 0, 0]])
s.make_supercell([2, 2, 2])
self.assertEqual(sum(map(len, s.get_all_neighbors(3))), 976)
all_nn = s.get_all_neighbors(0.05)
self.assertEqual([len(nn) for nn in all_nn], [0] * len(s))
def test_get_neighbor_list(self):
s = self.struct
c_indices1, c_indices2, c_offsets, c_distances = s.get_neighbor_list(3)
p_indices1, p_indices2, p_offsets, p_distances = s._get_neighbor_list_py(3)
self.assertArrayAlmostEqual(sorted(c_distances), sorted(p_distances))
# @unittest.skipIf(not os.environ.get("CI"), "Only run this in CI tests.")
# def test_get_all_neighbors_crosscheck_old(self):
# warnings.simplefilter("ignore")
# for i in range(100):
# alpha, beta = np.random.rand(2) * 90
# a, b, c = 3 + np.random.rand(3) * 5
# species = ["H"] * 5
# frac_coords = np.random.rand(5, 3)
# try:
# latt = Lattice.from_parameters(a, b, c, alpha, beta, 90)
# s = Structure.from_spacegroup("P1", latt,
# species, frac_coords)
# for nn_new, nn_old in zip(s.get_all_neighbors(4),
# s.get_all_neighbors_old(4)):
# sites1 = [i[0] for i in nn_new]
# sites2 = [i[0] for i in nn_old]
# self.assertEqual(set(sites1), set(sites2))
# break
# except Exception as ex:
# pass
# else:
# raise ValueError("No valid structure tested.")
#
# from pymatgen.electronic_structure.core import Spin
# d = {'@module': 'pymatgen.core.structure', '@class': 'Structure', 'charge': None, 'lattice': {
# 'matrix': [[0.0, 0.0, 5.5333], [5.7461, 0.0, 3.518471486290303e-16],
# [-4.692662837312786e-16, 7.6637, 4.692662837312786e-16]], 'a': 5.5333, 'b': 5.7461,
# 'c': 7.6637,
# 'alpha': 90.0, 'beta': 90.0, 'gamma': 90.0, 'volume': 243.66653780778103}, 'sites': [
# {'species': [{'element': 'Mn', 'oxidation_state': 0, 'properties': {'spin': Spin.down}, 'occu': 1}],
# 'abc': [0.0, 0.5, 0.5], 'xyz': [2.8730499999999997, 3.83185, 4.1055671618015446e-16],
# 'label': 'Mn0+,spin=-1',
# 'properties': {}},
# {'species': [{'element': 'Mn', 'oxidation_state': None, 'occu': 1.0}],
# 'abc': [1.232595164407831e-32, 0.5, 0.5],
# 'xyz': [2.8730499999999997, 3.83185, 4.105567161801545e-16], 'label': 'Mn', 'properties': {}}]}
# struct = Structure.from_dict(d)
# self.assertEqual(set([i[0] for i in struct.get_neighbors(struct[0], 0.05)]),
# set([i[0] for i in struct.get_neighbors_old(struct[0], 0.05)]))
#
# warnings.simplefilter("default")
def test_get_all_neighbors_outside_cell(self):
s = Structure(
Lattice.cubic(2),
["Li", "Li", "Li", "Si"],
[[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3],
)
all_nn = s.get_all_neighbors(0.2, True)
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
self.assertEqual(list(map(len, all_nn)), [2, 2, 2, 0])
def test_get_all_neighbors_small_cutoff(self):
s = Structure(
Lattice.cubic(2),
["Li", "Li", "Li", "Si"],
[[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3],
)
all_nn = s.get_all_neighbors(1e-5, True)
self.assertEqual(len(all_nn), len(s))
self.assertEqual([], all_nn[0])
all_nn = s.get_all_neighbors(0, True)
self.assertEqual(len(all_nn), len(s))
self.assertEqual([], all_nn[0])
def test_coincide_sites(self):
s = Structure(
Lattice.cubic(5),
["Li", "Li", "Li"],
[[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [3, 3, 3]],
coords_are_cartesian=True,
)
all_nn = s.get_all_neighbors(1e-5, True)
self.assertEqual([len(i) for i in all_nn], [0, 0, 0])
def test_get_all_neighbors_equal(self):
with pytest.warns(FutureWarning, match="get_all_neighbors_old is deprecated"):
s = Structure(
Lattice.cubic(2),
["Li", "Li", "Li", "Si"],
[[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3],
)
nn_traditional = s.get_all_neighbors_old(4, include_index=True, include_image=True, include_site=True)
nn_cell_lists = s.get_all_neighbors(4, include_index=True, include_image=True)
for i in range(4):
self.assertEqual(len(nn_traditional[i]), len(nn_cell_lists[i]))
self.assertTrue(
np.linalg.norm(
np.array(sorted([j[1] for j in nn_traditional[i]]))
- np.array(sorted([j[1] for j in nn_cell_lists[i]]))
)
< 1e-3
)
def test_get_dist_matrix(self):
ans = [[0.0, 2.3516318], [2.3516318, 0.0]]
self.assertArrayAlmostEqual(self.struct.distance_matrix, ans)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr"]:
s = self.struct.to(fmt=fmt)
self.assertIsNotNone(s)
ss = IStructure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(ss.lattice.parameters, self.struct.lattice.parameters, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords, self.struct.frac_coords)
self.assertIsInstance(ss, IStructure)
self.assertTrue("Fd-3m" in self.struct.to(fmt="CIF", symprec=0.1))
self.struct.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.struct.to(filename="Si_testing.yaml")
self.assertTrue(os.path.exists("Si_testing.yaml"))
s = Structure.from_file("Si_testing.yaml")
self.assertEqual(s, self.struct)
self.assertRaises(ValueError, self.struct.to, filename="whatever")
self.assertRaises(ValueError, self.struct.to, fmt="badformat")
# Test Path support.
s = Structure.from_file(Path("Si_testing.yaml"))
self.assertEqual(s, self.struct)
os.remove("Si_testing.yaml")
self.struct.to(filename="POSCAR.testing.gz")
s = Structure.from_file("POSCAR.testing.gz")
self.assertEqual(s, self.struct)
os.remove("POSCAR.testing.gz")
class StructureTest(PymatgenTest):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.structure = Structure(lattice, ["Si", "Si"], coords)
def test_mutable_sequence_methods(self):
s = self.structure
s[0] = "Fe"
self.assertEqual(s.formula, "Fe1 Si1")
s[0] = "Fe", [0.5, 0.5, 0.5]
self.assertEqual(s.formula, "Fe1 Si1")
self.assertArrayAlmostEqual(s[0].frac_coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("Si"))
self.assertArrayAlmostEqual(s[0].frac_coords, [0.75, 0.5, 0.75])
s[0] = {"Mn": 0.5}
self.assertEqual(s.formula, "Mn0.5 Fe1")
del s[1]
self.assertEqual(s.formula, "Mn0.5")
s[0] = "Fe", [0.9, 0.9, 0.9], {"magmom": 5}
self.assertEqual(s.formula, "Fe1")
self.assertEqual(s[0].magmom, 5)
# Test atomic replacement.
s["Fe"] = "Mn"
self.assertEqual(s.formula, "Mn1")
# Test slice replacement.
s = PymatgenTest.get_structure("Li2O")
s[0:2] = "S"
self.assertEqual(s.formula, "Li1 S2")
def test_non_hash(self):
self.assertRaises(TypeError, dict, [(self.structure, 1)])
def test_sort(self):
s = self.structure
s[0] = "F"
s.sort()
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
s.sort(key=lambda site: site.species_string)
self.assertEqual(s[0].species_string, "F")
self.assertEqual(s[1].species_string, "Si")
s.sort(key=lambda site: site.species_string, reverse=True)
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
def test_append_insert_remove_replace_substitute(self):
s = self.structure
s.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "Si2 O1")
self.assertTrue(s.ntypesp == 2)
self.assertTrue(s.symbol_set == ("O", "Si"))
self.assertTrue(s.indices_from_symbol("Si") == (0, 2))
self.assertTrue(s.indices_from_symbol("O") == (1,))
del s[2]
self.assertEqual(s.formula, "Si1 O1")
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
s.append("N", [0.25, 0.25, 0.25])
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
self.assertTrue(s.symbol_set == ("N", "O", "Si"))
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
self.assertTrue(s.indices_from_symbol("N") == (2,))
s[0] = "Ge"
self.assertEqual(s.formula, "Ge1 N1 O1")
self.assertTrue(s.symbol_set == ("Ge", "N", "O"))
s.replace_species({"Ge": "Si"})
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
s.replace_species({"Si": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.5 Ge0.5 N1 O1")
# this should change the .5Si .5Ge sites to .75Si .25Ge
s.replace_species({"Ge": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.75 Ge0.25 N1 O1")
self.assertEqual(s.ntypesp, 4)
s.replace_species({"Ge": "Si"})
s.substitute(1, "hydroxyl")
self.assertEqual(s.formula, "Si1 H1 N1 O1")
self.assertTrue(s.symbol_set == ("H", "N", "O", "Si"))
# Distance between O and H
self.assertAlmostEqual(s.get_distance(2, 3), 0.96)
# Distance between Si and H
self.assertAlmostEqual(s.get_distance(0, 3), 2.09840889)
s.remove_species(["H"])
self.assertEqual(s.formula, "Si1 N1 O1")
s.remove_sites([1, 2])
self.assertEqual(s.formula, "Si1")
def test_add_remove_site_property(self):
s = self.structure
s.add_site_property("charge", [4.1, -5])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[1].charge, -5)
s.add_site_property("magmom", [3, 2])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[0].magmom, 3)
s.remove_site_property("magmom")
self.assertRaises(AttributeError, getattr, s[0], "magmom")
def test_propertied_structure(self):
# Make sure that site properties are set to None for missing values.
s = self.structure
s.add_site_property("charge", [4.1, -5])
s.append("Li", [0.3, 0.3, 0.3])
self.assertEqual(len(s.site_properties["charge"]), 3)
def test_perturb(self):
d = 0.1
pre_perturbation_sites = self.structure.copy()
self.structure.perturb(distance=d)
post_perturbation_sites = self.structure.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(
x.distance(post_perturbation_sites[i]),
d,
3,
"Bad perturbation distance",
)
structure2 = pre_perturbation_sites.copy()
structure2.perturb(distance=d, min_distance=0)
post_perturbation_sites2 = structure2.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertLessEqual(x.distance(post_perturbation_sites2[i]), d)
self.assertGreaterEqual(x.distance(post_perturbation_sites2[i]), 0)
def test_add_oxidation_states(self):
oxidation_states = {"Si": -4}
self.structure.add_oxidation_state_by_element(oxidation_states)
for site in self.structure:
for k in site.species.keys():
self.assertEqual(
k.oxi_state,
oxidation_states[k.symbol],
"Wrong oxidation state assigned!",
)
oxidation_states = {"Fe": 2}
self.assertRaises(ValueError, self.structure.add_oxidation_state_by_element, oxidation_states)
self.structure.add_oxidation_state_by_site([2, -4])
self.assertEqual(self.structure[0].specie.oxi_state, 2)
self.assertRaises(ValueError, self.structure.add_oxidation_state_by_site, [1])
def test_remove_oxidation_states(self):
co_elem = Element("Co")
o_elem = Element("O")
co_specie = Species("Co", 2)
o_specie = Species("O", -2)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice.cubic(10)
s_elem = Structure(lattice, [co_elem, o_elem], coords)
s_specie = Structure(lattice, [co_specie, o_specie], coords)
s_specie.remove_oxidation_states()
self.assertEqual(s_elem, s_specie, "Oxidation state remover " "failed")
def test_add_oxidation_states_by_guess(self):
s = PymatgenTest.get_structure("Li2O")
s.add_oxidation_state_by_guess()
for i in s:
self.assertTrue(i.specie in [Species("Li", 1), Species("O", -2)])
def test_add_remove_spin_states(self):
latt = Lattice.cubic(4.17)
species = ["Ni", "O"]
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
nio = Structure.from_spacegroup(225, latt, species, coords)
# should do nothing, but not fail
nio.remove_spin()
spins = {"Ni": 5}
nio.add_spin_by_element(spins)
self.assertEqual(nio[0].specie.spin, 5, "Failed to add spin states")
nio.remove_spin()
self.assertRaises(AttributeError, getattr, nio[0].specie, "spin")
spins = [5, -5, -5, 5, 0, 0, 0, 0] # AFM on (001)
nio.add_spin_by_site(spins)
self.assertEqual(nio[1].specie.spin, -5, "Failed to add spin states")
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
s = self.structure.copy()
s.apply_operation(op)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[
[0.000000, 3.840198, 0.000000],
[-3.325710, 1.920099, 0.000000],
[2.217138, -0.000000, 3.135509],
],
5,
)
op = SymmOp([[1, 1, 0, 0.5], [1, 0, 0, 0.5], [0, 0, 1, 0.5], [0, 0, 0, 1]])
s = self.structure.copy()
s.apply_operation(op, fractional=True)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[
[5.760297, 3.325710, 0.000000],
[3.840198, 0.000000, 0.000000],
[0.000000, -2.217138, 3.135509],
],
5,
)
def test_apply_strain(self):
s = self.structure
initial_coord = s[1].coords
s.apply_strain(0.01)
self.assertAlmostEqual(s.lattice.abc, (3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(s[1].coords, initial_coord * 1.01)
a1, b1, c1 = s.lattice.abc
s.apply_strain([0.1, 0.2, 0.3])
a2, b2, c2 = s.lattice.abc
self.assertAlmostEqual(a2 / a1, 1.1)
self.assertAlmostEqual(b2 / b1, 1.2)
self.assertAlmostEqual(c2 / c1, 1.3)
def test_scale_lattice(self):
initial_coord = self.structure[1].coords
self.structure.scale_lattice(self.structure.volume * 1.01 ** 3)
self.assertArrayAlmostEqual(
self.structure.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516),
)
self.assertArrayAlmostEqual(self.structure[1].coords, initial_coord * 1.01)
def test_translate_sites(self):
self.structure.translate_sites([0, 1], [0.5, 0.5, 0.5], frac_coords=True)
self.assertArrayAlmostEqual(self.structure.frac_coords[0], [0.5, 0.5, 0.5])
self.structure.translate_sites([0], [0.5, 0.5, 0.5], frac_coords=False)
self.assertArrayAlmostEqual(self.structure.cart_coords[0], [3.38014845, 1.05428585, 2.06775453])
self.structure.translate_sites([0], [0.5, 0.5, 0.5], frac_coords=True, to_unit_cell=False)
self.assertArrayAlmostEqual(self.structure.frac_coords[0], [1.00187517, 1.25665291, 1.15946374])
def test_rotate_sites(self):
self.structure.rotate_sites(
indices=[1],
theta=2.0 * np.pi / 3.0,
anchor=self.structure.sites[0].coords,
to_unit_cell=False,
)
self.assertArrayAlmostEqual(self.structure.frac_coords[1], [-1.25, 1.5, 0.75], decimal=6)
self.structure.rotate_sites(
indices=[1],
theta=2.0 * np.pi / 3.0,
anchor=self.structure.sites[0].coords,
to_unit_cell=True,
)
self.assertArrayAlmostEqual(self.structure.frac_coords[1], [0.75, 0.5, 0.75], decimal=6)
def test_mul(self):
self.structure *= [2, 1, 1]
self.assertEqual(self.structure.formula, "Si4")
s = [2, 1, 1] * self.structure
self.assertEqual(s.formula, "Si8")
self.assertIsInstance(s, Structure)
s = self.structure * [[1, 0, 0], [2, 1, 0], [0, 0, 2]]
self.assertEqual(s.formula, "Si8")
self.assertArrayAlmostEqual(s.lattice.abc, [7.6803959, 17.5979979, 7.6803959])
def test_make_supercell(self):
self.structure.make_supercell([2, 1, 1])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell([[1, 0, 0], [2, 1, 0], [0, 0, 1]])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell(2)
self.assertEqual(self.structure.formula, "Si32")
self.assertArrayAlmostEqual(self.structure.lattice.abc, [15.360792, 35.195996, 7.680396], 5)
def test_disordered_supercell_primitive_cell(self):
l = Lattice.cubic(2)
f = [[0.5, 0.5, 0.5]]
sp = [{"Si": 0.54738}]
s = Structure(l, sp, f)
# this supercell often breaks things
s.make_supercell([[0, -1, 1], [-1, 1, 0], [1, 1, 1]])
self.assertEqual(len(s.get_primitive_structure()), 1)
def test_another_supercell(self):
# this is included b/c for some reason the old algo was failing on it
s = self.structure.copy()
s.make_supercell([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
self.assertEqual(s.formula, "Si32")
s = self.structure.copy()
s.make_supercell([[0, 2, 0], [1, 0, 0], [0, 0, 1]])
self.assertEqual(s.formula, "Si4")
def test_to_from_dict(self):
d = self.structure.as_dict()
s2 = Structure.from_dict(d)
self.assertEqual(type(s2), Structure)
def test_to_from_abivars(self):
"""Test as_dict, from_dict with fmt == abivars."""
d = self.structure.as_dict(fmt="abivars")
s2 = Structure.from_dict(d, fmt="abivars")
self.assertEqual(s2, self.structure)
self.assertEqual(type(s2), Structure)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr", "yaml", "xsf"]:
s = self.structure.to(fmt=fmt)
self.assertIsNotNone(s)
ss = Structure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(ss.lattice.parameters, self.structure.lattice.parameters, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords, self.structure.frac_coords)
self.assertIsInstance(ss, Structure)
self.structure.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.structure.to(filename="structure_testing.json")
self.assertTrue(os.path.exists("structure_testing.json"))
s = Structure.from_file("structure_testing.json")
self.assertEqual(s, self.structure)
os.remove("structure_testing.json")
def test_from_spacegroup(self):
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Li", "O"], [[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1.formula, "Li8 O4")
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"], [[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1, s2)
s2 = Structure.from_spacegroup(
225,
Lattice.cubic(3),
["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]],
site_properties={"charge": [1, -2]},
)
self.assertEqual(sum(s2.site_properties["charge"]), 0)
s = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertEqual(s.formula, "Cs1 Cl1")
self.assertRaises(
ValueError,
Structure.from_spacegroup,
"Pm-3m",
Lattice.tetragonal(1, 3),
["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]],
)
self.assertRaises(
ValueError,
Structure.from_spacegroup,
"Pm-3m",
Lattice.cubic(3),
["Cs"],
[[0, 0, 0], [0.5, 0.5, 0.5]],
)
from fractions import Fraction
s = Structure.from_spacegroup(139, np.eye(3), ["H"], [[Fraction(1, 2), Fraction(1, 4), Fraction(0)]])
self.assertEqual(len(s), 8)
def test_from_magnetic_spacegroup(self):
# AFM MnF
s1 = Structure.from_magnetic_spacegroup(
"P4_2'/mnm'",
Lattice.tetragonal(4.87, 3.30),
["Mn", "F"],
[[0, 0, 0], [0.30, 0.30, 0.00]],
{"magmom": [4, 0]},
)
self.assertEqual(s1.formula, "Mn2 F4")
self.assertEqual(sum(map(float, s1.site_properties["magmom"])), 0)
self.assertEqual(max(map(float, s1.site_properties["magmom"])), 4)
self.assertEqual(min(map(float, s1.site_properties["magmom"])), -4)
# AFM LaMnO3, ordered on (001) planes
s2 = Structure.from_magnetic_spacegroup(
"Pn'ma'",
Lattice.orthorhombic(5.75, 7.66, 5.53),
["La", "Mn", "O", "O"],
[
[0.05, 0.25, 0.99],
[0.00, 0.00, 0.50],
[0.48, 0.25, 0.08],
[0.31, 0.04, 0.72],
],
{"magmom": [0, Magmom([4, 0, 0]), 0, 0]},
)
self.assertEqual(s2.formula, "La4 Mn4 O12")
self.assertEqual(sum(map(float, s2.site_properties["magmom"])), 0)
self.assertEqual(max(map(float, s2.site_properties["magmom"])), 4)
self.assertEqual(min(map(float, s2.site_properties["magmom"])), -4)
def test_merge_sites(self):
species = [
{"Ag": 0.5},
{"Cl": 0.25},
{"Cl": 0.1},
{"Ag": 0.5},
{"F": 0.15},
{"F": 0.1},
]
coords = [
[0, 0, 0],
[0.5, 0.5, 0.5],
[0.5, 0.5, 0.5],
[0, 0, 0],
[0.5, 0.5, 1.501],
[0.5, 0.5, 1.501],
]
s = Structure(Lattice.cubic(1), species, coords)
s.merge_sites(mode="s")
self.assertEqual(s[0].specie.symbol, "Ag")
self.assertEqual(s[1].species, Composition({"Cl": 0.35, "F": 0.25}))
self.assertArrayAlmostEqual(s[1].frac_coords, [0.5, 0.5, 0.5005])
# Test for TaS2 with spacegroup 166 in 160 setting.
l = Lattice.hexagonal(3.374351, 20.308941)
species = ["Ta", "S", "S"]
coords = [
[0.000000, 0.000000, 0.944333],
[0.333333, 0.666667, 0.353424],
[0.666667, 0.333333, 0.535243],
]
tas2 = Structure.from_spacegroup(160, l, species, coords)
assert len(tas2) == 13
tas2.merge_sites(mode="d")
assert len(tas2) == 9
l = Lattice.hexagonal(3.587776, 19.622793)
species = ["Na", "V", "S", "S"]
coords = [
[0.333333, 0.666667, 0.165000],
[0.000000, 0.000000, 0.998333],
[0.333333, 0.666667, 0.399394],
[0.666667, 0.333333, 0.597273],
]
navs2 = Structure.from_spacegroup(160, l, species, coords)
assert len(navs2) == 18
navs2.merge_sites(mode="d")
assert len(navs2) == 12
# Test that we can average the site properties that are floats
l = Lattice.hexagonal(3.587776, 19.622793)
species = ["Na", "V", "S", "S"]
coords = [
[0.333333, 0.666667, 0.165000],
[0.000000, 0.000000, 0.998333],
[0.333333, 0.666667, 0.399394],
[0.666667, 0.333333, 0.597273],
]
site_props = {"prop1": [3.0, 5.0, 7.0, 11.0]}
navs2 = Structure.from_spacegroup(160, l, species, coords, site_properties=site_props)
navs2.insert(0, "Na", coords[0], properties={"prop1": 100.0})
navs2.merge_sites(mode="a")
self.assertEqual(len(navs2), 12)
self.assertEqual(51.5 in [itr.properties["prop1"] for itr in navs2.sites], True)
def test_properties(self):
self.assertEqual(self.structure.num_sites, len(self.structure))
self.structure.make_supercell(2)
self.structure[1] = "C"
sites = list(self.structure.group_by_types())
self.assertEqual(sites[-1].specie.symbol, "C")
self.structure.add_oxidation_state_by_element({"Si": 4, "C": 2})
self.assertEqual(self.structure.charge, 62)
def test_set_item(self):
s = self.structure.copy()
s[0] = "C"
self.assertEqual(s.formula, "Si1 C1")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2")
s = self.structure.copy()
s["Si"] = "C"
self.assertEqual(s.formula, "C2")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1 C0.5")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1.25 C0.125")
def test_init_error(self):
self.assertRaises(
StructureError,
Structure,
Lattice.cubic(3),
["Si"],
[[0, 0, 0], [0.5, 0.5, 0.5]],
)
def test_from_sites(self):
self.structure.add_site_property("hello", [1, 2])
s = Structure.from_sites(self.structure, to_unit_cell=True)
self.assertEqual(s.site_properties["hello"][1], 2)
def test_magic(self):
s = Structure.from_sites(self.structure)
self.assertEqual(s, self.structure)
self.assertNotEqual(s, None)
s.apply_strain(0.5)
self.assertNotEqual(s, self.structure)
self.assertNotEqual(self.structure * 2, self.structure)
def test_charge(self):
s = Structure.from_sites(self.structure)
self.assertEqual(
s.charge,
0,
"Initial Structure not defaulting to behavior in SiteCollection",
)
s.add_oxidation_state_by_site([1, 1])
self.assertEqual(
s.charge,
2,
"Initial Structure not defaulting to behavior in SiteCollection",
)
s = Structure.from_sites(s, charge=1)
self.assertEqual(s.charge, 1, "Overall charge not being stored in seperate property")
s = s.copy()
self.assertEqual(s.charge, 1, "Overall charge not being copied properly with no sanitization")
s = s.copy(sanitize=True)
self.assertEqual(s.charge, 1, "Overall charge not being copied properly with sanitization")
super_cell = s * 3
self.assertEqual(
super_cell.charge,
27,
"Overall charge is not being properly multiplied in IStructure __mul__",
)
self.assertIn("Overall Charge: +1", str(s), "String representation not adding charge")
sorted_s = super_cell.get_sorted_structure()
self.assertEqual(
sorted_s.charge,
27,
"Overall charge is not properly copied during structure sorting",
)
super_cell.set_charge(25)
self.assertEqual(super_cell.charge, 25, "Set charge not properly modifying _charge")
def test_vesta_lattice_matrix(self):
silica_zeolite = Molecule.from_file(self.TEST_FILES_DIR / "CON_vesta.xyz")
s_vesta = Structure(
lattice=Lattice.from_parameters(22.6840, 13.3730, 12.5530, 90, 69.479, 90, True),
species=silica_zeolite.species,
coords=silica_zeolite.cart_coords,
coords_are_cartesian=True,
to_unit_cell=True,
)
s_vesta = s_vesta.get_primitive_structure()
s_vesta.merge_sites(0.01, "delete")
self.assertEqual(s_vesta.formula, "Si56 O112")
broken_s = Structure(
lattice=Lattice.from_parameters(22.6840, 13.3730, 12.5530, 90, 69.479, 90),
species=silica_zeolite.species,
coords=silica_zeolite.cart_coords,
coords_are_cartesian=True,
to_unit_cell=True,
)
broken_s.merge_sites(0.01, "delete")
self.assertEqual(broken_s.formula, "Si56 O134")
def test_extract_cluster(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
ch4 = ["C", "H", "H", "H", "H"]
species = []
allcoords = []
for vec in ([0, 0, 0], [4, 0, 0], [0, 4, 0], [4, 4, 0]):
species.extend(ch4)
for c in coords:
allcoords.append(np.array(c) + vec)
structure = Structure(Lattice.cubic(10), species, allcoords, coords_are_cartesian=True)
for site in structure:
if site.specie.symbol == "C":
cluster = Molecule.from_sites(structure.extract_cluster([site]))
self.assertEqual(cluster.formula, "H4 C1")
class IMoleculeTest(PymatgenTest):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
self.coords = coords
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_set_item(self):
s = self.mol.copy()
s[0] = "Si"
self.assertEqual(s.formula, "Si1 H4")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2 H3")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2 H3")
s = self.mol.copy()
s["H"] = "F"
self.assertEqual(s.formula, "C1 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.5 C0.25 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.625 C0.0625 F4")
def test_bad_molecule(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
[-0.513360, 0.889165, -0.36301],
]
self.assertRaises(
StructureError,
Molecule,
["C", "H", "H", "H", "H", "H"],
coords,
validate_proximity=True,
)
def test_get_angle_dihedral(self):
self.assertAlmostEqual(self.mol.get_angle(1, 0, 2), 109.47122144618737)
self.assertAlmostEqual(self.mol.get_angle(3, 1, 2), 60.00001388659683)
self.assertAlmostEqual(self.mol.get_dihedral(0, 1, 2, 3), -35.26438851071765)
coords = list()
coords.append([0, 0, 0])
coords.append([0, 0, 1])
coords.append([0, 1, 1])
coords.append([1, 1, 1])
self.mol2 = Molecule(["C", "O", "N", "S"], coords)
self.assertAlmostEqual(self.mol2.get_dihedral(0, 1, 2, 3), -90)
def test_get_covalent_bonds(self):
self.assertEqual(len(self.mol.get_covalent_bonds()), 4)
def test_properties(self):
self.assertEqual(len(self.mol), 5)
self.assertTrue(self.mol.is_ordered)
self.assertEqual(self.mol.formula, "H4 C1")
def test_repr_str(self):
ans = """Full Formula (H4 C1)
Reduced Formula: H4C
Charge = 0.0, Spin Mult = 1
Sites (5)
0 C 0.000000 0.000000 0.000000
1 H 0.000000 0.000000 1.089000
2 H 1.026719 0.000000 -0.363000
3 H -0.513360 -0.889165 -0.363000
4 H -0.513360 0.889165 -0.363000"""
self.assertEqual(self.mol.__str__(), ans)
ans = """Molecule Summary
Site: C (0.0000, 0.0000, 0.0000)
Site: H (0.0000, 0.0000, 1.0890)
Site: H (1.0267, 0.0000, -0.3630)
Site: H (-0.5134, -0.8892, -0.3630)
Site: H (-0.5134, 0.8892, -0.3630)"""
self.assertEqual(repr(self.mol), ans)
def test_site_properties(self):
propertied_mol = Molecule(
["C", "H", "H", "H", "H"],
self.coords,
site_properties={"magmom": [0.5, -0.5, 1, 2, 3]},
)
self.assertEqual(propertied_mol[0].magmom, 0.5)
self.assertEqual(propertied_mol[1].magmom, -0.5)
def test_get_boxed_structure(self):
s = self.mol.get_boxed_structure(9, 9, 9)
# C atom should be in center of box.
self.assertArrayAlmostEqual(s[4].frac_coords, [0.50000001, 0.5, 0.5])
self.assertArrayAlmostEqual(s[1].frac_coords, [0.6140799, 0.5, 0.45966667])
self.assertRaises(ValueError, self.mol.get_boxed_structure, 1, 1, 1)
s2 = self.mol.get_boxed_structure(5, 5, 5, (2, 3, 4))
self.assertEqual(len(s2), 24 * 5)
self.assertEqual(s2.lattice.abc, (10, 15, 20))
# Test offset option
s3 = self.mol.get_boxed_structure(9, 9, 9, offset=[0.5, 0.5, 0.5])
self.assertArrayAlmostEqual(s3[4].coords, [5, 5, 5])
# Test no_cross option
self.assertRaises(
ValueError,
self.mol.get_boxed_structure,
5,
5,
5,
offset=[10, 10, 10],
no_cross=True,
)
# Test reorder option
no_reorder = self.mol.get_boxed_structure(10, 10, 10, reorder=False)
self.assertEqual(str(s3[0].specie), "H")
self.assertEqual(str(no_reorder[0].specie), "C")
def test_get_distance(self):
self.assertAlmostEqual(self.mol.get_distance(0, 1), 1.089)
def test_get_neighbors(self):
nn = self.mol.get_neighbors(self.mol[0], 1)
self.assertEqual(len(nn), 0)
nn = self.mol.get_neighbors(self.mol[0], 2)
self.assertEqual(len(nn), 4)
def test_get_neighbors_in_shell(self):
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 0, 1)
self.assertEqual(len(nn), 1)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 2, 0.1)
self.assertEqual(len(nn), 0)
def test_get_dist_matrix(self):
ans = [
[0.0, 1.089, 1.08899995636, 1.08900040717, 1.08900040717],
[1.089, 0.0, 1.77832952654, 1.7783298026, 1.7783298026],
[1.08899995636, 1.77832952654, 0.0, 1.77833003783, 1.77833003783],
[1.08900040717, 1.7783298026, 1.77833003783, 0.0, 1.77833],
[1.08900040717, 1.7783298026, 1.77833003783, 1.77833, 0.0],
]
self.assertArrayAlmostEqual(self.mol.distance_matrix, ans)
def test_break_bond(self):
(mol1, mol2) = self.mol.break_bond(0, 1)
self.assertEqual(mol1.formula, "H3 C1")
self.assertEqual(mol2.formula, "H1")
def test_prop(self):
self.assertEqual(self.mol.charge, 0)
self.assertEqual(self.mol.spin_multiplicity, 1)
self.assertEqual(self.mol.nelectrons, 10)
self.assertArrayAlmostEqual(self.mol.center_of_mass, [0, 0, 0])
self.assertRaises(
ValueError,
Molecule,
["C", "H", "H", "H", "H"],
self.coords,
charge=1,
spin_multiplicity=1,
)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertEqual(mol.spin_multiplicity, 2)
self.assertEqual(mol.nelectrons, 9)
# Triplet O2
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]], spin_multiplicity=3)
self.assertEqual(mol.spin_multiplicity, 3)
def test_equal(self):
mol = IMolecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertNotEqual(mol, self.mol)
def test_get_centered_molecule(self):
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]], spin_multiplicity=3)
centered = mol.get_centered_molecule()
self.assertArrayAlmostEqual(centered.center_of_mass, [0, 0, 0])
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = IMolecule.from_dict(d)
self.assertEqual(type(mol2), IMolecule)
propertied_mol = Molecule(
["C", "H", "H", "H", "H"],
self.coords,
charge=1,
site_properties={"magmom": [0.5, -0.5, 1, 2, 3]},
)
d = propertied_mol.as_dict()
self.assertEqual(d["sites"][0]["properties"]["magmom"], 0.5)
mol = Molecule.from_dict(d)
self.assertEqual(propertied_mol, mol)
self.assertEqual(mol[0].magmom, 0.5)
self.assertEqual(mol.formula, "H4 C1")
self.assertEqual(mol.charge, 1)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03", "yaml"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = IMolecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, IMolecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
self.mol.to(filename="CH4_testing.yaml")
self.assertTrue(os.path.exists("CH4_testing.yaml"))
mol = Molecule.from_file("CH4_testing.yaml")
self.assertEqual(self.mol, mol)
os.remove("CH4_testing.yaml")
class MoleculeTest(PymatgenTest):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_mutable_sequence_methods(self):
s = self.mol
s[1] = ("F", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "H3 C1 F1")
self.assertArrayAlmostEqual(s[1].coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("H"))
self.assertArrayAlmostEqual(s[0].coords, [-0.513360, 0.889165, -0.363000])
del s[1]
self.assertEqual(s.formula, "H2 C1 F1")
s[3] = "N", [0, 0, 0], {"charge": 4}
self.assertEqual(s.formula, "H2 N1 F1")
self.assertEqual(s[3].charge, 4)
def test_insert_remove_append(self):
mol = self.mol
mol.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol.formula, "H4 C1 O1")
del mol[2]
self.assertEqual(mol.formula, "H3 C1 O1")
mol.set_charge_and_spin(0)
self.assertEqual(mol.spin_multiplicity, 2)
mol.append("N", [1, 1, 1])
self.assertEqual(mol.formula, "H3 C1 N1 O1")
self.assertRaises(TypeError, dict, [(mol, 1)])
mol.remove_sites([0, 1])
self.assertEqual(mol.formula, "H3 N1")
def test_translate_sites(self):
self.mol.translate_sites([0, 1], [0.5, 0.5, 0.5])
self.assertArrayEqual(self.mol.cart_coords[0], [0.5, 0.5, 0.5])
def test_rotate_sites(self):
self.mol.rotate_sites(theta=np.radians(30))
self.assertArrayAlmostEqual(self.mol.cart_coords[2], [0.889164737, 0.513359500, -0.363000000])
def test_replace(self):
self.mol[0] = "Ge"
self.assertEqual(self.mol.formula, "Ge1 H4")
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5, Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.5 Ge0.5 H4")
# this should change the .5Si .5Ge sites to .75Si .25Ge
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5, Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.75 Ge0.25 H4")
d = 0.1
pre_perturbation_sites = self.mol.sites[:]
self.mol.perturb(distance=d)
post_perturbation_sites = self.mol.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(
x.distance(post_perturbation_sites[i]),
d,
3,
"Bad perturbation distance",
)
def test_add_site_property(self):
self.mol.add_site_property("charge", [4.1, -2, -2, -2, -2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[1].charge, -2)
self.mol.add_site_property("magmom", [3, 2, 2, 2, 2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[0].magmom, 3)
self.mol.remove_site_property("magmom")
self.assertRaises(AttributeError, getattr, self.mol[0], "magmom")
def test_to_from_dict(self):
self.mol.append("X", [2, 0, 0])
d = self.mol.as_dict()
mol2 = Molecule.from_dict(d)
self.assertEqual(type(mol2), Molecule)
self.assertMSONable(self.mol)
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
self.mol.apply_operation(op)
self.assertArrayAlmostEqual(self.mol[2].coords, [0.000000, 1.026719, -0.363000])
def test_substitute(self):
coords = [
[0.000000, 0.000000, 1.08],
[0.000000, 0.000000, 0.000000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
sub = Molecule(["X", "C", "H", "H", "H"], coords)
self.mol.substitute(1, sub)
self.assertAlmostEqual(self.mol.get_distance(0, 4), 1.54)
f = Molecule(["X", "F"], [[0, 0, 0], [0, 0, 1.11]])
self.mol.substitute(2, f)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.35)
oh = Molecule(
["X", "O", "H"],
[[0, 0.780362, -0.456316], [0, 0, 0.114079], [0, -0.780362, -0.456316]],
)
self.mol.substitute(1, oh)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.43)
self.mol.substitute(3, "methyl")
self.assertEqual(self.mol.formula, "H7 C3 O1 F1")
coords = [
[0.00000, 1.40272, 0.00000],
[0.00000, 2.49029, 0.00000],
[-1.21479, 0.70136, 0.00000],
[-2.15666, 1.24515, 0.00000],
[-1.21479, -0.70136, 0.00000],
[-2.15666, -1.24515, 0.00000],
[0.00000, -1.40272, 0.00000],
[0.00000, -2.49029, 0.00000],
[1.21479, -0.70136, 0.00000],
[2.15666, -1.24515, 0.00000],
[1.21479, 0.70136, 0.00000],
[2.15666, 1.24515, 0.00000],
]
benzene = Molecule(["C", "H", "C", "H", "C", "H", "C", "H", "C", "H", "C", "H"], coords)
benzene.substitute(1, sub)
self.assertEqual(benzene.formula, "H8 C7")
# Carbon attached should be in plane.
self.assertAlmostEqual(benzene[11].coords[2], 0)
benzene[14] = "Br"
benzene.substitute(13, sub)
self.assertEqual(benzene.formula, "H9 C8 Br1")
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = Molecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, Molecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
def test_extract_cluster(self):
species = self.mol.species * 2
coords = list(self.mol.cart_coords) + list(self.mol.cart_coords + [10, 0, 0])
mol = Molecule(species, coords)
cluster = Molecule.from_sites(mol.extract_cluster([mol[0]]))
self.assertEqual(mol.formula, "H8 C2")
self.assertEqual(cluster.formula, "H4 C1")
if __name__ == "__main__":
import unittest
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.