gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import uuid
import httplib2
from apiclient import discovery
from apiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
# 30 days in milliseconds
_EXPIRATION_MS = 30 * 24 * 60 * 60 * 1000
NUM_RETRIES = 3
def create_big_query():
"""Authenticates with cloud platform and gets a BiqQuery service object
"""
creds = GoogleCredentials.get_application_default()
return discovery.build(
'bigquery', 'v2', credentials=creds, cache_discovery=False)
def create_dataset(biq_query, project_id, dataset_id):
is_success = True
body = {
'datasetReference': {
'projectId': project_id,
'datasetId': dataset_id
}
}
try:
dataset_req = biq_query.datasets().insert(
projectId=project_id, body=body)
dataset_req.execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
if http_error.resp.status == 409:
print 'Warning: The dataset %s already exists' % dataset_id
else:
# Note: For more debugging info, print "http_error.content"
print 'Error in creating dataset: %s. Err: %s' % (dataset_id,
http_error)
is_success = False
return is_success
def create_table(big_query, project_id, dataset_id, table_id, table_schema,
description):
fields = [{
'name': field_name,
'type': field_type,
'description': field_description
} for (field_name, field_type, field_description) in table_schema]
return create_table2(big_query, project_id, dataset_id, table_id, fields,
description)
def create_partitioned_table(big_query,
project_id,
dataset_id,
table_id,
table_schema,
description,
partition_type='DAY',
expiration_ms=_EXPIRATION_MS):
"""Creates a partitioned table. By default, a date-paritioned table is created with
each partition lasting 30 days after it was last modified.
"""
fields = [{
'name': field_name,
'type': field_type,
'description': field_description
} for (field_name, field_type, field_description) in table_schema]
return create_table2(big_query, project_id, dataset_id, table_id, fields,
description, partition_type, expiration_ms)
def create_table2(big_query,
project_id,
dataset_id,
table_id,
fields_schema,
description,
partition_type=None,
expiration_ms=None):
is_success = True
body = {
'description': description,
'schema': {
'fields': fields_schema
},
'tableReference': {
'datasetId': dataset_id,
'projectId': project_id,
'tableId': table_id
}
}
if partition_type and expiration_ms:
body["timePartitioning"] = {
"type": partition_type,
"expirationMs": expiration_ms
}
try:
table_req = big_query.tables().insert(
projectId=project_id, datasetId=dataset_id, body=body)
res = table_req.execute(num_retries=NUM_RETRIES)
print 'Successfully created %s "%s"' % (res['kind'], res['id'])
except HttpError as http_error:
if http_error.resp.status == 409:
print 'Warning: Table %s already exists' % table_id
else:
print 'Error in creating table: %s. Err: %s' % (table_id,
http_error)
is_success = False
return is_success
def patch_table(big_query, project_id, dataset_id, table_id, fields_schema):
is_success = True
body = {
'schema': {
'fields': fields_schema
},
'tableReference': {
'datasetId': dataset_id,
'projectId': project_id,
'tableId': table_id
}
}
try:
table_req = big_query.tables().patch(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body)
res = table_req.execute(num_retries=NUM_RETRIES)
print 'Successfully patched %s "%s"' % (res['kind'], res['id'])
except HttpError as http_error:
print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
is_success = False
return is_success
def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
is_success = True
body = {'rows': rows_list}
try:
insert_req = big_query.tabledata().insertAll(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body)
res = insert_req.execute(num_retries=NUM_RETRIES)
if res.get('insertErrors', None):
print 'Error inserting rows! Response: %s' % res
is_success = False
except HttpError as http_error:
print 'Error inserting rows to the table %s' % table_id
is_success = False
return is_success
def sync_query_job(big_query, project_id, query, timeout=5000):
query_data = {'query': query, 'timeoutMs': timeout}
query_job = None
try:
query_job = big_query.jobs().query(
projectId=project_id,
body=query_data).execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
print 'Query execute job failed with error: %s' % http_error
print http_error.content
return query_job
# List of (column name, column type, description) tuples
def make_row(unique_row_id, row_values_dict):
"""row_values_dict is a dictionary of column name and column value.
"""
return {'insertId': unique_row_id, 'json': row_values_dict}
|
|
# Lint as: python3
"""Tests for improve_nas.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl import flags
from absl.testing import parameterized
import adanet
from adanet.research.improve_nas.trainer import improve_nas
import numpy as np
import tensorflow.compat.v1 as tf
_IMAGE_DIM = 32
class _FakeSummary(object):
"""A fake `Summary`."""
def scalar(self, name, tensor):
del name # Unused
del tensor # Unused
def _optimizer(learning_rate):
return tf.train.GradientDescentOptimizer(learning_rate), learning_rate
def _builder(snapshot=False,
knowledge_distillation=improve_nas.KnowledgeDistillation.NONE,
checkpoint_dir=None,
use_aux_head=False,
learn_mixture_weights=False,
model_version="cifar"):
hparams = tf.contrib.training.HParams(
clip_gradients=5.,
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=3,
use_aux_head=use_aux_head,
aux_head_weight=0.4,
label_smoothing=0.1,
num_conv_filters=4,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
data_format="NHWC",
use_bounded_activation=False,
skip_reduction_layer_input=0,
initial_learning_rate=.01,
complexity_decay_rate=0.9,
weight_decay=.0001,
knowledge_distillation=knowledge_distillation,
snapshot=snapshot,
learn_mixture_weights=learn_mixture_weights,
mixture_weight_type=adanet.MixtureWeightType.SCALAR,
model_version=model_version,
total_training_steps=100)
return improve_nas.Builder(
[tf.feature_column.numeric_column(key="x", shape=[32, 32, 3])],
seed=11,
optimizer_fn=_optimizer,
checkpoint_dir=checkpoint_dir,
hparams=hparams)
def _subnetwork_generator(checkpoint_dir):
hparams = tf.contrib.training.HParams(
clip_gradients=5.,
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=3,
use_aux_head=False,
aux_head_weight=0.4,
label_smoothing=0.1,
num_conv_filters=4,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
complexity_decay_rate=0.9,
num_reduction_layers=2,
data_format="NHWC",
skip_reduction_layer_input=0,
initial_learning_rate=.01,
use_bounded_activation=False,
weight_decay=.0001,
knowledge_distillation=improve_nas.KnowledgeDistillation.NONE,
snapshot=False,
learn_mixture_weights=False,
mixture_weight_type=adanet.MixtureWeightType.SCALAR,
model_version="cifar",
total_training_steps=100)
return improve_nas.Generator(
[tf.feature_column.numeric_column(key="x", shape=[32, 32, 3])],
seed=11,
optimizer_fn=_optimizer,
iteration_steps=3,
checkpoint_dir=checkpoint_dir,
hparams=hparams)
class ImproveNasBuilderTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(ImproveNasBuilderTest, self).setUp()
# Setup and cleanup test directory.
self.test_subdirectory = os.path.join(tf.flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(ImproveNasBuilderTest, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
@parameterized.named_parameters({
"testcase_name": "two_subnetworks_adaptive_knowledge_distillation_aux",
"builder_params": [
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.ADAPTIVE,
"use_aux_head": True,
},
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.ADAPTIVE,
"use_aux_head": True,
},
],
"want_name": "NasNet_A_1.0_96_adaptive_cifar",
}, {
"testcase_name": "two_subnetworks_born_again_knowledge_distillation_w",
"builder_params": [
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.BORN_AGAIN,
"use_aux_head":
True,
"learn_mixture_weights": True,
},
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.BORN_AGAIN,
"use_aux_head":
True,
"learn_mixture_weights": True,
},
],
"want_name": "NasNet_A_1.0_96_born_again_cifar",
})
def test_build_subnetwork(self, builder_params, want_name):
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
data = np.concatenate([
np.ones((1, _IMAGE_DIM, _IMAGE_DIM, 1)), 2. * np.ones(
(1, _IMAGE_DIM, _IMAGE_DIM, 1))
])
features = {"x": tf.constant(data)}
labels = tf.constant([0, 1])
training = True
mode = tf.estimator.ModeKeys.TRAIN
head = tf.contrib.estimator.binary_classification_head(
loss_reduction=tf.losses.Reduction.SUM)
ensemble = None
name = None
subnetwork = None
builders = []
for builder_param in builder_params:
builders.append(
_builder(checkpoint_dir=self.test_subdirectory, **builder_param))
for idx, builder in enumerate(builders):
name = builder.name
# Pass the subnetworks of previous builders to the next builder.
with tf.variable_scope("subnetwork_{}".format(idx)):
subnetwork = builder.build_subnetwork(
features=features,
logits_dimension=head.logits_dimension,
training=training,
iteration_step=tf.train.get_or_create_global_step(),
summary=_FakeSummary(),
previous_ensemble=ensemble)
logits = subnetwork.logits
weighted_subnetworks = []
if ensemble:
logits += ensemble.logits
weighted_subnetworks = ensemble.weighted_subnetworks
ensemble = adanet.Ensemble(
weighted_subnetworks=weighted_subnetworks + [
adanet.WeightedSubnetwork(
name=None,
logits=logits,
weight=None,
subnetwork=subnetwork)
],
logits=logits,
bias=0.)
estimator_spec = head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
train_op_fn=lambda loss: tf.no_op(),
logits=ensemble.logits)
sess.run(tf.global_variables_initializer())
train_op = builders[-1].build_subnetwork_train_op(
subnetwork,
estimator_spec.loss,
var_list=None,
labels=labels,
iteration_step=tf.train.get_or_create_global_step(),
summary=_FakeSummary(),
previous_ensemble=ensemble)
for _ in range(10):
sess.run(train_op)
self.assertEqual(want_name, name)
self.assertGreater(sess.run(estimator_spec.loss), 0.0)
class QuetzalGeneratorTest(tf.test.TestCase):
def test_candidate_generation(self):
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.mkdir(self.test_subdirectory)
subnetwork_generator = _subnetwork_generator(self.test_subdirectory)
subnetwork_builders = subnetwork_generator.generate_candidates(
previous_ensemble=None,
# The following arguments are unused by
# quetzal.Generator.
iteration_number=0,
previous_ensemble_reports=[],
all_reports=[])
self.assertEqual(1, len(subnetwork_builders))
if __name__ == "__main__":
tf.test.main()
|
|
import unittest
from pylayers.gis.layout import *
L1 = Layout('defstr.lay')
class TestLayout(unittest.TestCase):
def test_add_fnod(self):
L = Layout('defstr.lay')
L.add_fnod(p=(10,10))
self.assertEqual(L.Np,13)
def test_add_furniture(self):
L = Layout('defstr.lay')
L.add_furniture(name='R1_C',
matname='PARTITION', origin=(5., 5.),
zmin=0.,
height=1.,
width=1.,
length=1.,
angle=0.)
def test_add_nfpe(self):
L = Layout('defstr.lay')
L.add_nfpe(-8,7,6)
self.assertEqual(L.Np,13)
def test_angleonlink(self):
data1 = L1.angleonlink(np.array([2,2.5]),np.array([8,4]))
data2 = L1.angleonlink3(np.array([2,2.5,1.5]),np.array([8,4,1.5]))
print(data1)
print(data2)
# The 3d intersection releaves ambiguity on vertical segments
# Problem at jonction between seg here 2 segments are found
#
data1 = L1.angleonlink(np.array([2,2]),np.array([8,4]))
data2 = L1.angleonlink3(np.array([2,2,1.5]),np.array([8,4,1.5]))
print(data1)
print(data2)
def test_boundary(self):
L = Layout('defstr.lay')
L.boundary()
def test_build(self):
L = Layout('defstr.lay')
L.build()
def test_cleanup(self):
L = Layout('defstr.lay')
L.add_fnod(p=(10,10))
L.cleanup()
self.assertEqual(L.Np,12)
def test_load(self):
L = Layout('defstr.lay')
self.assertEqual(L.Np,12)
self.assertEqual(L.Ns,15)
def test_check(self):
bc,ds = L1.check()
self.assertTrue(bc)
def test_check2(self):
L = Layout('defstr.lay')
L.build()
tseg = L.check2()
L.build()
L.check_Gi()
def test_have_subseg(self):
self.assertTrue(L1.have_subseg(1))
self.assertTrue(L1.have_subseg(2))
self.assertTrue(L1.have_subseg(3))
self.assertFalse(L1.have_subseg(4))
self.assertFalse(L1.have_subseg(5))
self.assertFalse(L1.have_subseg(6))
self.assertFalse(L1.have_subseg(7))
self.assertFalse(L1.have_subseg(8))
self.assertFalse(L1.have_subseg(9))
self.assertFalse(L1.have_subseg(10))
self.assertFalse(L1.have_subseg(11))
def test_add_pons(self):
L = Layout('defstr.lay')
L.add_pons(1,alpha=0.6)
self.assertEqual(L.Np,13)
def test_isseg(self):
self.assertTrue(L1.isseg(-8,-7))
def test_ispoint(self):
pto = np.array([[0],[0]])
num = L1.ispoint(pto)
self.assertEqual(num,-1)
def test_seg_intersection(self):
pt1 = L1.Gs.pos[-8]
pt2 = L1.Gs.pos[-7]
liseg,lipsh = L1.seg_intersection(**{'ta':pt1,'he':pt2})
def test_clip(self):
seglist = L1.clip(2,8,2,4)
self.assertEqual(sum(seglist),10)
def test_cy2pt(self):
L = Layout('defstr.lay')
L.build()
pt = L.cy2pt(2)
def test_geomfile(self):
L1.geomfile()
def test_DLRosm(self):
L = Layout('DLR.osm')
if __name__ == '__main__':
unittest.main()
# add_pnod
# add_pons
# add_segment
# angleonlinkold
# build
# buildGi
# buildGr
# buildGt
# buildGv
# buildGw
# _convex_hull
# cy2pt
# cycleinline
# _delaunay
# del_points
# del_segment
# diag
# distwall
# dumpr
# dumpw
# ed2nd
# edit_seg
# exportosm
# extrseg
# facet3D
# facets3D
# filterGi
# _find_diffractions
# find_edgelist
# g2npy
# geomfile
# getangles
# get_diffslab
# get_paths
# get_points
# get_Sg_pos
# get_zone
# have_subseg
# importosm
# importres
# importshp
# info
# info_segment
# intercy
# _interlist
# isindoor
# ispoint
# isseg
# layerongrid
# layeronlink
# load
# loadfur
# load_modif
# ls
# mask
# _merge_polygons
# merge_segment
# nd2seg
# numseg
# off_overlay
# offset_index
# onseg
# outputGi
# outputGi_mp
# outputGi_new
# plot
# plot_segments
# pltlines
# pltpoly
# pltvnodes
# point_touches_seg
# polysh2geu
# pt2cy
# pt2ro
# ptGs2cy
# ptin
# randTxRx
# repair
# room2nodes
# room2segments
# rotate
# save
# scl_overlay
# seg2pts
# seg2ro
# seginframe
# seginframe2
# seginline
# segpt
# seguv
# show
# show3
# _show3
# showG
# _showGi
# showGs
# _showGt
# _showGv
# show_layer
# show_nodes
# show_seg1
# show_segment
# showSig
# signature
# subseg
# thwall
# translate
# _triangle
# _triangle_old
# updateshseg
# _updGsncy
# visilist
# visi_papb
# _visual_check
# waypointGw
# wedge
# wedge2
|
|
from __future__ import absolute_import, division, print_function
import hashlib
import linecache
from operator import itemgetter
from . import _config
from ._compat import PY2, iteritems, isclass, iterkeys, metadata_proxy
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_convert_pat = "__attr_convert_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attr(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata={}):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable datatypes like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable convert: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to convert attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default .
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
convert=convert,
metadata=metadata,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
def _transform_attrs(cls, these):
"""
Transforms all `_CountingAttr`s on a class into `Attribute`s and saves the
list in `__attrs_attrs__`.
If *these* is passed, use that and don't look for them on the class.
"""
super_cls = []
for c in reversed(cls.__mro__[1:-1]):
sub_attrs = getattr(c, "__attrs_attrs__", None)
if sub_attrs is not None:
super_cls.extend(a for a in sub_attrs if a not in super_cls)
if these is None:
ca_list = [(name, attr)
for name, attr
in cls.__dict__.items()
if isinstance(attr, _CountingAttr)]
else:
ca_list = [(name, ca)
for name, ca
in iteritems(these)]
non_super_attrs = [
Attribute.from_counting_attr(name=attr_name, ca=ca)
for attr_name, ca
in sorted(ca_list, key=lambda e: e[1].counter)
]
attr_names = [a.name for a in super_cls + non_super_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
cls.__attrs_attrs__ = AttrsClass(super_cls + [
Attribute.from_counting_attr(name=attr_name, ca=ca)
for attr_name, ca
in sorted(ca_list, key=lambda e: e[1].counter)
])
had_default = False
for a in cls.__attrs_attrs__:
if these is None and a not in super_cls:
setattr(cls, a.name, a)
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
def attributes(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
represantation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initialiazes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3.5/reference/datamodel.html#slots
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
.. versionchanged::
17.1.0 *hash* supports ``None`` as value which is also the default
now.
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
if repr is False and str is True:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
if slots:
# Only need this later if we're using slots.
if these is None:
ca_list = [name
for name, attr
in cls.__dict__.items()
if isinstance(attr, _CountingAttr)]
else:
ca_list = list(iterkeys(these))
_transform_attrs(cls, these)
# Can't just re-use frozen name because Python's scoping. :(
# Can't compare function objects because Python 2 is terrible. :(
effectively_frozen = _has_frozen_superclass(cls) or frozen
if repr is True:
cls = _add_repr(cls, ns=repr_ns)
if str is True:
cls.__str__ = cls.__repr__
if cmp is True:
cls = _add_cmp(cls)
if hash is not True and hash is not False and hash is not None:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
cls = _add_hash(cls)
else:
cls.__hash__ = None
if init is True:
cls = _add_init(cls, effectively_frozen)
if effectively_frozen is True:
cls.__setattr__ = _frozen_setattrs
cls.__delattr__ = _frozen_delattrs
if slots is True:
# slots and frozen require __getstate__/__setstate__ to work
cls = _add_pickle(cls)
if slots is True:
cls_dict = dict(cls.__dict__)
cls_dict["__slots__"] = tuple(ca_list)
for ca_name in ca_list:
# It might not actually be in there, e.g. if using 'these'.
cls_dict.pop(ca_name, None)
cls_dict.pop("__dict__", None)
qualname = getattr(cls, "__qualname__", None)
cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
if qualname is not None:
cls.__qualname__ = qualname
return cls
# attrs_or class type depends on the usage of the decorator. It's a class
# if it's used as `@attributes` but ``None`` if used # as `@attributes()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _add_hash(cls, attrs=None):
"""
Add a hash method to *cls*.
"""
if attrs is None:
attrs = [a
for a in cls.__attrs_attrs__
if a.hash is True or (a.hash is None and a.cmp is True)]
def hash_(self):
"""
Automatically created by attrs.
"""
return hash(_attrs_to_tuple(self, attrs))
cls.__hash__ = hash_
return cls
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = [a for a in cls.__attrs_attrs__ if a.cmp]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def eq(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) == attrs_to_tuple(other)
else:
return NotImplemented
def ne(self, other):
"""
Automatically created by attrs.
"""
result = eq(self, other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def lt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def le(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def gt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def ge(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
cls.__eq__ = eq
cls.__ne__ = ne
cls.__lt__ = lt
cls.__le__ = le
cls.__gt__ = gt
cls.__ge__ = ge
return cls
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = [a for a in cls.__attrs_attrs__ if a.repr]
def repr_(self):
"""
Automatically created by attrs.
"""
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
return "{0}({1})".format(
class_name,
", ".join(a.name + "=" + repr(getattr(self, a.name))
for a in attrs)
)
cls.__repr__ = repr_
return cls
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
attrs = [a for a in cls.__attrs_attrs__
if a.init or a.default is not NOTHING]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs = _attrs_to_script(
attrs,
frozen,
getattr(cls, "__attrs_post_init__", False),
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
init = locs["__init__"]
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename
)
cls.__init__ = init
return cls
def _add_pickle(cls):
"""
Add pickle helpers, needed for frozen and slotted classes
"""
def _slots_getstate__(obj):
"""
Play nice with pickle.
"""
return tuple(getattr(obj, a.name) for a in fields(obj.__class__))
def _slots_setstate__(obj, state):
"""
Play nice with pickle.
"""
__bound_setattr = _obj_setattr.__get__(obj, Attribute)
for a, value in zip(fields(obj.__class__), state):
__bound_setattr(a.name, value)
cls.__getstate__ = _slots_getstate__
cls.__setstate__ = _slots_setstate__
return cls
def fields(cls):
"""
Returns the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accesors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _attrs_to_script(attrs, frozen, post_init):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if frozen is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(" " + fmt_setter_with_converter(attr_name,
arg_name))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"convert", "metadata",
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("convert", convert)
bound_setattr("metadata", (metadata_proxy(metadata) if metadata
else _empty_metadata_singleton))
def __setattr__(self, name, value):
raise FrozenInstanceError()
@classmethod
def from_counting_attr(cls, name, ca):
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default",
) # exclude methods
}
return cls(name=name, validator=ca._validator, default=ca._default,
**inst_dict)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "convert")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, convert,
metadata):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.convert = convert
self.metadata = metadata
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attributes(slots=True, init=False)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attr()
takes_self = attr()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attr()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
return attributes(**attributes_arguments)(type(name, bases, cls_dict))
# These are required by whithin this module so we define them here and merely
# import into .validators.
@attributes(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attr()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.head import binary_class_head as binary_head_lib
from tensorflow_estimator.python.estimator.head import head_utils as test_lib
from tensorflow_estimator.python.estimator.head import multi_class_head as multi_head_lib
from tensorflow_estimator.python.estimator.head import multi_head
from tensorflow_estimator.python.estimator.head import sequential_head as seq_head_lib
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
def _convert_to_tensor(features):
"""Converts an arrays or dict of arrays to tensors or dict of tensors."""
if isinstance(features, dict):
if set(features.keys()) == set(['indices', 'values', 'dense_shape']):
return tf.sparse.SparseTensor(**features)
for col in features:
features[col] = _convert_to_tensor(features[col])
return features
return ops.convert_to_tensor(features)
@test_util.run_all_in_graph_and_eager_modes
class TestFlatten(tf.test.TestCase, parameterized.TestCase):
"""Tests flatten functions."""
@parameterized.named_parameters(
{
'testcase_name': 'one_dim_sparse_tensor',
'tensor': {
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (1, 2, 3),
'dense_shape': (2, 2)
},
'expected': [[1], [2], [3]]
}, {
'testcase_name': 'multi_dim_sparse_tensor',
'tensor': {
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0),
(1, 0, 1)),
'values': (1, 2, 3, 4, 5, 6),
'dense_shape': (2, 2, 2)
},
'expected': [[1, 2], [3, 4], [5, 6]]
}, {
'testcase_name': 'one_dim_dense_tensor',
'tensor': [[1, 2], [3, 4]],
'expected': [[1], [2], [3]]
}, {
'testcase_name': 'multi_dim_dense_tensor',
'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'expected': [[1, 2], [3, 4], [5, 6]]
}, {
'testcase_name': 'unsorted_sparse_indices',
'tensor': {
'indices': ((0, 0), (1, 0), (0, 1)),
'values': (1, 3, 2),
'dense_shape': (2, 2)
},
'expected': [[1], [2], [3]]
})
def test_flatten_tensor(self, tensor, expected):
"""Tests the output of the `_flatten_tensor` function.
Args:
tensor: Dense or sparse array.
expected: Array with expected output of `_flatten_tensor`.
"""
sequence_mask = np.array([[1, 1], [1, 0]])
tensor = _convert_to_tensor(tensor)
flat_tensor = seq_head_lib._flatten_tensor(
tensor, sequence_mask, expected_length=sequence_mask.sum())
if tf.executing_eagerly():
self.assertAllEqual(flat_tensor, expected)
return
with self.cached_session() as sess:
self.assertAllEqual(sess.run(flat_tensor), expected)
def _test_flatten_method(self, features, feature_columns):
"""Runs seq head's `_flatten` method and returns output for testing."""
head = seq_head_lib.SequentialHeadWrapper(
static_head=None,
sequence_length_mask='sequence_mask',
feature_columns=feature_columns)
labels = {
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (1, 2, 3),
'dense_shape': (2, 2)
}
logits = np.array([[[10], [11]], [[12], [13]]])
features = _convert_to_tensor(features)
labels = tf.sparse.SparseTensor(**labels)
logits = ops.convert_to_tensor(logits)
output = head._flatten(labels, logits, features)
if tf.executing_eagerly():
return output
with self.cached_session() as sess:
return sess.run(output)
def test_flatten_method(self):
"""Tests output of `_flatten` method."""
features = {'sequence_mask': np.array([[1, 1], [1, 0]])}
expected_output = ([[1], [2], [3]], [[10], [11], [12]], {})
output = self._test_flatten_method(features, feature_columns=[])
self.assertAllClose(expected_output, output)
def test_flatten_with_one_feature_columns(self):
"""Tests output of `_flatten` method with one feature column provided."""
features = {
'sequence_mask': np.array([[1, 1], [1, 0]]),
'weights': np.array([[0.5, 0.5], [1., 0]])
}
expected_output = ([[1], [2], [3]], [[10], [11], [12]], {
'weights': np.array([[0.5], [0.5], [1.]])
})
output = self._test_flatten_method(features, feature_columns='weights')
self.assertAllClose(expected_output, output)
def test_flatten_with_multiple_feature_columns(self):
"""Tests `_flatten` method with multiple feature columns provided."""
features = {
'sequence_mask': np.array([[1, 1], [1, 0]]),
'a': np.array([[0.5, 0.5], [1., 0]]),
'b': np.array([[1.5, 1.5], [2., 0]])
}
expected_output = ([[1], [2], [3]], [[10], [11], [12]], {
'a': np.array([[0.5], [0.5], [1.]]),
'b': np.array([[1.5], [1.5], [2.]])
})
output = self._test_flatten_method(features, feature_columns=['a', 'b'])
self.assertAllClose(expected_output, output)
def test_flatten_no_mask(self):
"""Tests error in `_flatten` method when sequence mask is not provided."""
features = {}
with self.assertRaisesRegexp(
ValueError, (r'The provided sequence_length_mask key `sequence_mask` '
r'should be included in.* Found keys: \[\].')):
_ = self._test_flatten_method(features, feature_columns=[])
def test_flatten_missing_feature(self):
"""Tests error in `_flatten` method when feature is not provided."""
features = {'sequence_mask': np.array([[1, 1], [1, 0]])}
with self.assertRaisesRegexp(
ValueError, '`weights` column expected in features dictionary.'):
_ = self._test_flatten_method(features, feature_columns=['weights'])
def test_flatten_tensor_wrong_feature_dim(self):
"""Tests `_flatten` method when feature has wrong dimension."""
features = {
'sequence_mask': np.array([[1, 1], [1, 0]]),
'weights': np.array([0.5, 0.5, 1., 0])
}
with self.assertRaisesRegexp(
ValueError, 'Input tensor expected to have at least 2 dimensions.'):
_ = self._test_flatten_method(features, feature_columns=['weights'])
def test_flatten_tensor_wrong_feature_mask(self):
"""Tests `_flatten` with feature mask different from provided mask."""
features = {'sequence_mask': np.array([[1, 1], [1, 1]])}
error = (
ValueError
if tf.executing_eagerly() else tf.errors.InvalidArgumentError)
with self.assertRaisesRegexp(
error, 'Tensor shape is incompatible with provided mask.'):
_ = self._test_flatten_method(features, feature_columns=[])
def test_flatten_tensor_wrong_mask_dim(self):
"""Tests `_flatten` with mask that has wrong dimensions."""
features = {'sequence_mask': np.array([1, 1])}
with self.assertRaisesRegexp(
ValueError, 'Mask is expected to have two dimensions, got .* instead.'):
_ = self._test_flatten_method(features, feature_columns=[])
class _MockHead(object):
"""A static head to be wrapped in a sequential head, for testing."""
def metrics(self, regularization_losses=None):
return regularization_losses
def loss(self, **kwargs):
return kwargs
def create_estimator_spec(self, **kwargs):
Spec = collections.namedtuple('Spec', ['predictions', 'kwargs']) # pylint: disable=invalid-name
return Spec(predictions={}, kwargs=kwargs)
@test_util.run_all_in_graph_and_eager_modes
class TestSequentialHead(tf.test.TestCase):
"""Tests sequential head methods."""
def _assert_equal(self, d, dref, session=None):
"""Recursively checks that all items of a dictionary are close.
Dictionary can contain numerical values, `Tensor` objects or dictionaries of
the former.
If an item is a `Tensor`, its value is evaluated then compared to the
reference.
Args:
d: Dictionary to check.
dref: Dictionary to use as a reference for checks.
session: A `tf.Session` object.
"""
for key, ref_item in dref.items():
if isinstance(ref_item, dict):
self._assert_equal(d[key], dref=ref_item, session=session)
elif isinstance(d[key], tf.Tensor):
self.assertAllClose(
session.run(d[key]) if session else d[key], ref_item)
else:
self.assertEqual(d[key], ref_item)
def test_predictions(self):
"""Tests predictions output.
Use `predictions` method in eager execution, else `create_estimator_spec` in
PREDICT mode.
logits = [[0.3, -0.4], [0.2, 0.2]]
logistics = 1 / (1 + exp(-logits))
= [[0.57, 0.40], [0.55, 0.55]]
"""
head = seq_head_lib.SequentialHeadWrapper(binary_head_lib.BinaryClassHead(),
'sequence_mask')
logits = [[[0.3], [-0.4]], [[0.2], [0.2]]]
expected_logistics = [[[0.574443], [0.401312]], [[0.549834], [0.549834]]]
features = {
'sequence_mask': ops.convert_to_tensor(np.array([[1, 1], [1, 0]]))
}
keys = prediction_keys.PredictionKeys
if tf.executing_eagerly():
predictions = head.predictions(
logits=logits, keys=[keys.LOGITS, keys.LOGISTIC])
self.assertItemsEqual(predictions.keys(), [keys.LOGITS, keys.LOGISTIC])
self.assertAllClose(logits, predictions[keys.LOGITS])
self.assertAllClose(expected_logistics, predictions[keys.LOGISTIC])
return
spec = head.create_estimator_spec(
features=features,
mode=ModeKeys.PREDICT,
logits=logits,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
self.assertIn('sequence_mask', spec.predictions)
with self.cached_session() as sess:
self.assertAllEqual(
sess.run(spec.predictions['sequence_mask']),
features['sequence_mask'])
self.assertAllClose(logits, sess.run(spec.predictions[keys.LOGITS]))
self.assertAllClose(expected_logistics,
sess.run(spec.predictions[keys.LOGISTIC]))
def test_metrics(self):
"""Tests the `metrics` method.
Tests that:
- Returned metrics match the returned metrics of the static head.
- `regularization_losses` argument is properly passed to the static head's
method.
"""
head = seq_head_lib.SequentialHeadWrapper(binary_head_lib.BinaryClassHead(),
'mask')
metrics = head.metrics(regularization_losses=2.5)
keys = metric_keys.MetricKeys
self.assertIn(keys.ACCURACY, metrics)
self.assertIn(keys.LOSS_REGULARIZATION, metrics)
def test_loss_args(self):
"""Tests that variables are flattened and passed to static head's method."""
logits = [[1, 2], [3, 4]]
labels = [[0, 1], [0, 2]]
features = {'weights': [[0.3, 0.2], [0.5, 100]], 'mask': [[1, 1], [1, 0]]}
head = seq_head_lib.SequentialHeadWrapper(_MockHead(), 'mask', 'weights')
expected_output = {
'logits': [[1], [2], [3]],
'labels': [[0], [1], [0]],
'features': {
'weights': [[0.3], [0.2], [0.5]]
},
'mode': 'my-mode',
'regularization_losses': 123
}
output = head.loss(
logits=_convert_to_tensor(logits),
labels=_convert_to_tensor(labels),
features=_convert_to_tensor(features),
mode='my-mode',
regularization_losses=123)
with self.cached_session() as sess:
self._assert_equal(output, dref=expected_output, session=sess)
def test_create_estimator_spec_args(self):
"""Tests that variables are flattened and passed to static head's method."""
logits = [[1, 2], [3, 4]]
labels = [[0, 1], [0, 2]]
features = {'weights': [[0.3, 0.2], [0.5, 100]], 'mask': [[1, 1], [1, 0]]}
head = seq_head_lib.SequentialHeadWrapper(_MockHead(), 'mask', 'weights')
w = tf.Variable(1)
update_op = w.assign_add(1)
trainable_variables = [tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)]
expected_output = {
'logits': [[1], [2], [3]],
'labels': [[0], [1], [0]],
'features': {
'weights': [[0.3], [0.2], [0.5]]
},
'mode': ModeKeys.TRAIN,
'regularization_losses': 123,
'optimizer': 'my-opt',
'train_op_fn': 'my-train-op',
'trainable_variables': trainable_variables,
'update_ops': [update_op]
}
spec = head.create_estimator_spec(
logits=_convert_to_tensor(logits),
labels=_convert_to_tensor(labels),
features=_convert_to_tensor(features),
mode=ModeKeys.TRAIN,
optimizer='my-opt',
train_op_fn='my-train-op',
regularization_losses=123,
update_ops=[update_op],
trainable_variables=trainable_variables)
with self.cached_session() as sess:
self.assertItemsEqual(spec.kwargs.keys(), expected_output.keys())
self._assert_equal(spec.kwargs, dref=expected_output, session=sess)
def test_head_properties(self):
"""Tests that the head's properties are correcly implemented."""
static_head = binary_head_lib.BinaryClassHead(
loss_reduction=tf.losses.Reduction.SUM, name='a_static_head')
head = seq_head_lib.SequentialHeadWrapper(static_head,
'a_sequence_mask_col')
self.assertEqual(head.name, 'a_static_head_sequential')
self.assertEqual(head.logits_dimension, 1)
self.assertEqual(head.loss_reduction, tf.losses.Reduction.SUM)
self.assertEqual(head.input_sequence_mask_key, 'a_sequence_mask_col')
self.assertEqual(head.static_head.name, 'a_static_head')
def test_loss_reduction(self):
"""Tests loss reduction.
Use `loss` method in eager execution, else `create_estimator_spec` in TRAIN
mode.
logits = [[[2., 3., 4.], [5., -0.5, 0.]],
[[-1.0, 2.0, 0.5], [_]]],
labels = [[0, 1],
[2, _]]
weights = [[0.5, 0.2],
[0.3, _]]
loss = [0.5*2.40 + 0.2*5.41 + 0.3*1.74] / 3 = 0.94
"""
static_head = multi_head_lib.MultiClassHead(
n_classes=3, weight_column='weights')
head = seq_head_lib.SequentialHeadWrapper(static_head, 'sequence_mask',
'weights')
expected_loss = 0.942783
features = {
'weights':
tf.sparse.SparseTensor(
indices=((0, 0), (0, 1), (1, 0)),
values=(0.5, 0.2, 0.3),
dense_shape=(2, 2)),
'sequence_mask':
ops.convert_to_tensor([[1, 1], [1, 0]])
}
logits = ops.convert_to_tensor([[[2., 3., 4.], [5., -0.5, 0.]],
[[-1.0, 2.0, 0.5], [1.0, 0.5, 2.0]]])
labels = tf.sparse.SparseTensor(
indices=((0, 0), (0, 1), (1, 0)), values=(0, 1, 2), dense_shape=(2, 2))
class _Optimizer(tf.keras.optimizers.Optimizer):
def get_updates(self, loss, params):
del params, loss
return [tf.constant('op')]
def get_config(self):
config = super(_Optimizer, self).get_config()
return config
if tf.executing_eagerly():
loss = head.loss(logits=logits, labels=labels, features=features)
else:
spec = head.create_estimator_spec(
features,
ModeKeys.TRAIN,
logits,
labels=labels,
optimizer=_Optimizer('my_optimizer'),
trainable_variables=[
tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)
])
with self.cached_session() as sess:
loss = sess.run(spec.loss)
self.assertAllClose(loss, expected_loss, atol=1e-4)
def test_metrics_computation(self):
"""Runs metrics computation tests.
Use `update_metrics` method in eager execution, else `create_estimator_spec`
in EVAL mode.
logits = [[-101, 102, -103], [104, _, _]]
predicted_labels = [[0, 1, 0], [1, _, _]]
labels = [[1, 1, 1], [1, _, _]]
weights = [[2, 5, 1], [2, _, _]]
loss = (101*2 + 103*1) / 10 = 30.5
accuracy = (0 + 5 + 0 + 2) / (2 + 5 + 1 + 2) = 0.7
prediction_mean = (0 + 5 + 0 + 2) / (2 + 5 + 1 + 2) = 0.7
precision = (5 + 2) / (5 + 2) = 1.0
recall = (5 + 2) / (2 + 5 + 1 + 2) = 0.7
"""
static_head = binary_head_lib.BinaryClassHead(weight_column='weights')
head = seq_head_lib.SequentialHeadWrapper(static_head, 'sequence_mask',
'weights')
features = {
'sequence_mask': np.array([[1, 1, 1], [1, 0, 0]]),
'weights': np.array([[2, 5, 1], [2, 100, 100]])
}
regularization_losses = [100.]
logits = _convert_to_tensor([[-101, 102, -103], [104, 100, 100]])
labels = tf.sparse.SparseTensor(
values=[1, 1, 1, 1],
indices=((0, 0), (0, 1), (0, 2), (1, 0)),
dense_shape=(2, 3))
features = _convert_to_tensor(features)
expected_loss = 30.5
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss,
keys.ACCURACY: 0.7,
keys.PREDICTION_MEAN: 0.7,
keys.LABEL_MEAN: 1.0,
keys.LOSS_REGULARIZATION: 100,
keys.PRECISION: 1.0,
keys.RECALL: 0.7,
keys.ACCURACY_BASELINE: 1.0,
keys.AUC: 0.,
keys.AUC_PR: 1.0
}
if tf.executing_eagerly():
eval_metrics = head.metrics(regularization_losses=regularization_losses)
updated_metrics = head.update_metrics(eval_metrics, features, logits,
labels, regularization_losses)
self.assertItemsEqual(expected_metrics.keys(), updated_metrics.keys())
self.assertAllClose(
expected_metrics,
{k: updated_metrics[k].result() for k in updated_metrics})
return
spec = head.create_estimator_spec(
features=features,
mode=ModeKeys.EVAL,
logits=logits,
labels=labels,
regularization_losses=regularization_losses,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
_ = sess.run(update_ops)
self.assertAllClose(expected_metrics,
{k: value_ops[k].eval() for k in value_ops})
def test_wrong_mask_type(self):
"""Tests error raised when the mask doesn't have proper type."""
with self.assertRaisesRegexp(TypeError,
'`sequence_mask` column must be a string.'):
_ = seq_head_lib.SequentialHeadWrapper(None, sequence_length_mask=1)
def test_wrong_feature_column_type(self):
"""Tests error raised when the feature column doesn't have proper type."""
with self.assertRaisesRegexp(
TypeError, '`feature_columns` must be either a string or an iterable'):
_ = seq_head_lib.SequentialHeadWrapper(None, 'mask', feature_columns=1)
def test_wrong_feature_column_type_in_iterable(self):
"""Tests error raised when the feature column doesn't have proper type."""
with self.assertRaisesRegexp(TypeError,
'Column must a string. Given type: .*.'):
_ = seq_head_lib.SequentialHeadWrapper(None, 'mask', feature_columns=[1])
def test_multi_head_provided(self):
"""Tests error raised when a multi-head is provided."""
with self.assertRaisesRegexp(
ValueError,
'`MultiHead` is not supported with `SequentialHeadWrapper`.'):
_ = seq_head_lib.SequentialHeadWrapper(
multi_head.MultiHead(
[binary_head_lib.BinaryClassHead(name='test-head')]))
if __name__ == '__main__':
tf.test.main()
|
|
from types import FunctionType
from mock import patch, Mock, ANY, call, sentinel
from nose.tools import assert_raises, assert_equal, assert_true, assert_false
from zabby.tests import (assert_is_instance, ensure_removed,
ensure_contains_only_formatted_lines,
assert_less_equal, assert_less, assert_in)
from zabby.core import utils
from zabby.core.exceptions import WrongArgumentError, OperatingSystemError
from zabby.core.six import integer_types, string_types, u, b
from zabby.core.utils import (SIZE_CONVERSION_MODES, validate_mode,
convert_size, lines_from_file, lists_from_file,
dict_from_file, to_bytes, sh, tcp_communication,
exception_guard)
def test_validate_mode_raises_exception_if_mode_is_not_available():
assert_raises(WrongArgumentError, utils.validate_mode, 'mode', [])
def test_validate_mode_does_not_raise_exception_if_mode_is_available():
validate_mode('mode', ['mode'])
def test_convert_size_returns_integers_or_floats():
free, total = 50, 100
for conversion_mode in SIZE_CONVERSION_MODES:
converted_size = convert_size(free, total, conversion_mode)
assert_is_instance(converted_size, (float, integer_types))
def test_convert_size_returns_zero_if_total_size_is_zero():
assert_equal(0, convert_size(1, 0, SIZE_CONVERSION_MODES[0]))
FILE_PATH = '/tmp/zabby_test_file'
class TestLinesFromFile():
def test_raises_exception_if_file_is_not_found(self):
ensure_removed(FILE_PATH)
assert_raises(IOError, lines_from_file, FILE_PATH)
def test_raises_exception_if_file_is_empty(self):
ensure_removed(FILE_PATH)
open(FILE_PATH, mode='w').close()
assert_raises(OperatingSystemError, lines_from_file, FILE_PATH)
def test_returns_list_of_strings(self):
ensure_contains_only_formatted_lines(FILE_PATH, 'line')
found_lines = lines_from_file(FILE_PATH)
assert_is_instance(found_lines, list)
for found_line in found_lines:
assert_is_instance(found_line, string_types)
def test_returns_up_to_maximum_number_of_lines(self):
ensure_contains_only_formatted_lines(FILE_PATH, 'line', 3)
maximum_number_of_lines = 2
found_lines = lines_from_file(FILE_PATH, maximum_number_of_lines)
assert_less_equal(maximum_number_of_lines, len(found_lines))
class TestListsFromFile():
def test_returns_list_of_lists(self):
ensure_contains_only_formatted_lines(FILE_PATH, '1 2')
found_lists = lists_from_file(FILE_PATH)
assert_is_instance(found_lists, list)
for found_list in found_lists:
assert_is_instance(found_list, list)
class TestDictFromFile():
def test_returns_dict(self):
ensure_contains_only_formatted_lines(FILE_PATH, 'key value')
d = dict_from_file(FILE_PATH)
assert_is_instance(d, dict)
assert_less(0, len(d))
def test_lines_without_value_are_not_included(self):
ensure_contains_only_formatted_lines(FILE_PATH, 'key')
d = dict_from_file(FILE_PATH)
assert_equal(0, len(d))
class TestToBytes():
def test_raises_exception_if_wrong_factor_is_passed(self):
assert_raises(WrongArgumentError, to_bytes, 1, 'wrong')
def test_raises_exception_if_value_is_not_convertible_to_int(self):
assert_raises(ValueError, to_bytes, 'wrong', 'kB')
def test_returns_integer(self):
value = to_bytes(1, 'kB')
assert_is_instance(value, integer_types)
COMMAND = 'command'
COMMAND_WITH_ARGUMENTS = 'command {0}'
STDOUT = 'stdout\n'
STDERR = 'stderr\n'
ARGUMENT = 'argument'
class TestSh():
def setup(self):
self._patcher_popen = patch('zabby.core.utils.Popen')
self.process = Mock()
self.process.communicate.return_value = (STDOUT, '')
self.mock_popen = self._patcher_popen.start()
self.mock_popen.return_value = self.process
self._patcher_time = patch('zabby.core.utils.time')
self.mock_time = self._patcher_time.start()
def teardown(self):
self._patcher_popen.stop()
self._patcher_time.stop()
def test_returns_a_function(self):
f = sh(COMMAND)
assert_is_instance(f, FunctionType)
def test_function_runs_command_when_called(self):
sh(COMMAND)()
command = self.mock_popen.call_args[0][0]
assert_equal(command, COMMAND)
self.process.communicate.assert_called_once_with()
def test_command_output_is_returned(self):
result = sh(COMMAND)()
assert_true(not result.endswith('\n'))
assert_in(result, STDOUT)
@patch('zabby.core.utils.logging')
def test_command_errors_are_logged(self, mock_logging):
mock_logger = Mock()
mock_logging.getLogger.return_value = mock_logger
self.process.communicate.return_value = (STDOUT, STDERR)
sh(COMMAND)()
mock_logger.warn.assert_called_once_with(ANY)
def test_function_accepts_arguments(self):
sh(COMMAND)(ARGUMENT)
def test_function_inserts_arguments_into_command(self):
sh(COMMAND_WITH_ARGUMENTS)(ARGUMENT)
command = self.mock_popen.call_args[0][0]
assert_in(ARGUMENT, command)
def test_calling_command_that_accepts_arguments_without_them(self):
f = sh(COMMAND_WITH_ARGUMENTS)
assert_raises(WrongArgumentError, f)
def test_calling_command_without_timeout_does_not_poll(self):
sh(COMMAND, timeout=None)()
assert_false(self.process.poll.called)
def test_raises_exception_if_poll_never_succeed(self):
self.process.poll.return_value = None
command = sh(COMMAND, timeout=10.0)
assert_raises(OperatingSystemError, command)
def test_does_not_raise_exception_if_poll_eventually_succeed(self):
self.process.poll.side_effect = [None, 0]
wait_step = 0.1
sh(COMMAND, timeout=10.0, wait_step=wait_step)()
self.mock_time.sleep.assert_called_with(wait_step)
def test_raises_exception_if_command_does_not_produce_output(self):
self.process.communicate.return_value = ('', '')
f = sh(COMMAND)
assert_raises(OperatingSystemError, f)
def test_raises_exception_if_command_produces_errors(self):
self.process.communicate.return_value = (STDOUT, STDERR)
f = sh(COMMAND, raise_on_nonempty_err=True)
assert_raises(OperatingSystemError, f)
def test_communicate_with_completed_process(self):
sh(COMMAND)()
self.process.communicate.assert_called_once_with()
def test_communicate_with_timed_out_process(self):
self.process.poll.return_value = None
command = sh(COMMAND, timeout=10.0)
assert_raises(OperatingSystemError, command)
self.process.communicate.assert_called_once_with()
PORT = 8080
REQUEST = b('')
class TestTcpCommunication():
def setup(self):
self._patcher_socket = patch('zabby.core.utils.socket')
self.conn = Mock()
self.mock_socket = self._patcher_socket.start()
self.mock_socket.create_connection.return_value = self.conn
def teardown(self):
self._patcher_socket.stop()
def test_raises_exception_for_non_binary_requests(self):
assert_raises(WrongArgumentError, tcp_communication, PORT,
requests=[u('request')])
def test_does_not_handle_exceptions(self):
exception = IOError
self.mock_socket.create_connection.side_effect = exception
assert_raises(exception, tcp_communication, PORT)
def test_receives_before_sending_if_so_requested(self):
tcp_communication(PORT, receive_first=True)
calls = [call.recv(ANY), call.close()]
assert_equal(calls, self.conn.method_calls)
def test_sends_requests_receives_replies(self):
tcp_communication(PORT, requests=[REQUEST])
calls = [call.sendall(REQUEST), call.recv(ANY), call.close()]
assert_equal(calls, self.conn.method_calls)
class TestExceptionGuard():
def test_returns_a_wrapper(self):
wrapper = exception_guard(lambda: None)
assert_is_instance(wrapper, FunctionType)
def test_calls_function_when_called(self):
f = Mock()
f.return_value = sentinel
assert_equal(sentinel, exception_guard(f)())
f.assert_called_once_with()
def test_returns_sentinel_when_exception_is_raised(self):
exception_class = IOError
def failing():
raise exception_class()
assert_equal(sentinel,
exception_guard(failing, exception_class, sentinel)())
def test_does_not_catch_wider_exceptions(self):
parent_exception = Exception
child_exception = IOError
assert_true(issubclass(child_exception, parent_exception))
def failing():
raise parent_exception()
guarded_function = exception_guard(failing, child_exception)
assert_raises(parent_exception, guarded_function)
def test_passes_arguments(self):
assert_equal(sentinel, exception_guard(lambda x: x)(sentinel))
|
|
import sys
import time
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
from django.db.utils import DatabaseError
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'NUMBER(19)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False):
parameters = self._get_test_db_params()
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test database, %s, already exists. "
"Type 'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
try:
self._execute_test_db_destruction(cursor, parameters, verbosity)
except DatabaseError as e:
if 'ORA-29857' in str(e):
self._handle_objects_preventing_db_destruction(cursor, parameters,
verbosity, autoclobber)
else:
# Ran into a database error that isn't about leftover objects in the tablespace
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
except Exception as e:
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test user, %s, already exists. Type "
"'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self.connection.close() # done with main user -- test user and tablespaces created
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \
self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \
self.connection.settings_dict['USER'] = parameters['user']
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password']
return self.connection.settings_dict['NAME']
def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
print("There are objects in the old test database which prevent its destruction.")
print("If they belong to the test user, deleting the user will allow the test "
"database to be recreated.")
print("Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n")
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test user: %s\n" % e)
sys.exit(2)
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
print("Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it.\n" % parameters['user'])
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = self._get_test_db_params()
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['user'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize)s
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize_tmp)s
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
# Most test-suites can be run without the create-view privilege. But some need it.
extra = "GRANT CREATE VIEW TO %(user)s"
try:
self._execute_statements(cursor, [extra], parameters, verbosity, allow_quiet_fail=True)
except DatabaseError as err:
description = str(err)
if 'ORA-01031' in description:
if verbosity >= 2:
print("Failed to grant CREATE VIEW permission to test user. This may be ok.")
else:
raise
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['user'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
if (not allow_quiet_fail) or verbosity >= 2:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _get_test_db_params(self):
return {
'dbname': self._test_database_name(),
'user': self._test_database_user(),
'password': self._test_database_passwd(),
'tblspace': self._test_database_tblspace(),
'tblspace_temp': self._test_database_tblspace_tmp(),
'datafile': self._test_database_tblspace_datafile(),
'datafile_tmp': self._test_database_tblspace_tmp_datafile(),
'maxsize': self._test_database_tblspace_size(),
'maxsize_tmp': self._test_database_tblspace_tmp_size(),
}
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get('NAME', prefixed='NAME')
def _test_database_create(self):
return self._test_settings_get('CREATE_DB', default=True)
def _test_user_create(self):
return self._test_settings_get('CREATE_USER', default=True)
def _test_database_user(self):
return self._test_settings_get('USER', prefixed='USER')
def _test_database_passwd(self):
return self._test_settings_get('PASSWORD', default=PASSWORD)
def _test_database_tblspace(self):
return self._test_settings_get('TBLSPACE', prefixed='USER')
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict['TEST'].get('TBLSPACE_TMP',
TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp')
def _test_database_tblspace_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace()
return self._test_settings_get('DATAFILE', default=tblspace)
def _test_database_tblspace_tmp_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace_tmp()
return self._test_settings_get('DATAFILE_TMP', default=tblspace)
def _test_database_tblspace_size(self):
return self._test_settings_get('DATAFILE_MAXSIZE', default='500M')
def _test_database_tblspace_tmp_size(self):
return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M')
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import inspect
import logging
import re
import unittest
from py_utils import cloud_storage
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.results import artifact_logger
from telemetry.testing import browser_test_context
from typ import json_results
from typ import test_case
DEFAULT_LOG_FORMAT = (
'(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
'%(message)s')
class SeriallyExecutedBrowserTestCase(test_case.TestCase):
# Below is a reference to the typ.Runner instance. It will be used in
# member functions like GetExpectationsForTest() to get test information
# from the typ.Runner instance running the test.
_typ_runner = None
def __init__(self, methodName):
super(SeriallyExecutedBrowserTestCase, self).__init__(methodName)
self._private_methodname = methodName
def shortName(self):
"""Returns the method name this test runs, without the package prefix."""
return self._private_methodname
def set_artifacts(self, artifacts):
super(SeriallyExecutedBrowserTestCase, self).set_artifacts(artifacts)
artifact_logger.RegisterArtifactImplementation(artifacts)
def setUp(self):
if hasattr(self, 'browser') and self.browser:
self.browser.CleanupUnsymbolizedMinidumps()
def tearDown(self):
if hasattr(self, 'browser') and self.browser:
self.browser.CleanupUnsymbolizedMinidumps(fatal=True)
@classmethod
def Name(cls):
return cls.__name__
@classmethod
def AddCommandlineArgs(cls, parser):
pass
@classmethod
def SetUpProcess(cls):
""" Set up testing logic before running the test case.
This is guaranteed to be called only once for all the tests before the test
suite runs.
"""
finder_options = browser_test_context.GetCopy().finder_options
cls._finder_options = finder_options
# Set up logging based on the verbosity passed from the parent to
# the child process.
if finder_options.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif finder_options.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
logging.basicConfig(format=DEFAULT_LOG_FORMAT)
cls.platform = None
cls.browser = None
cls._browser_to_create = None
cls._browser_options = None
@classmethod
def SetBrowserOptions(cls, browser_options):
"""Sets the browser option for the browser to create.
Args:
browser_options: Browser options object for the browser we want to test.
"""
cls._browser_options = browser_options
cls._browser_to_create = browser_finder.FindBrowser(browser_options)
if not cls._browser_to_create:
raise browser_finder_exceptions.BrowserFinderException(
'Cannot find browser of type %s. \n\nAvailable browsers:\n%s\n' % (
browser_options.browser_options.browser_type,
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(
browser_options))))
if not cls.platform:
cls.platform = cls._browser_to_create.platform
cls.platform.SetFullPerformanceModeEnabled(
browser_options.full_performance_mode)
cls.platform.network_controller.Open(
browser_options.browser_options.wpr_mode)
else:
assert cls.platform == cls._browser_to_create.platform, (
'All browser launches within same test suite must use browsers on '
'the same platform')
@classmethod
def StartWPRServer(cls, archive_path=None, archive_bucket=None):
"""Start a webpage replay server.
Args:
archive_path: Path to the WPR file. If there is a corresponding sha1 file,
this archive will be automatically downloaded from Google Storage.
archive_bucket: The bucket to look for the WPR archive.
"""
assert cls._browser_options, (
'Browser options must be set with |SetBrowserOptions| prior to '
'starting WPR')
assert not cls.browser, 'WPR must be started prior to browser being started'
cloud_storage.GetIfChanged(archive_path, archive_bucket)
cls.platform.network_controller.StartReplay(archive_path)
@classmethod
def StopWPRServer(cls):
cls.platform.network_controller.StopReplay()
@classmethod
def StartBrowser(cls):
assert cls._browser_options, (
'Browser options must be set with |SetBrowserOptions| prior to '
'starting WPR')
assert not cls.browser, 'Browser is started. Must close it first'
try:
# TODO(crbug.com/803104): Note cls._browser_options actually is a
# FinderOptions object, and we need to access the real browser_option's
# contained inside.
cls._browser_to_create.SetUpEnvironment(
cls._browser_options.browser_options)
cls.browser = cls._browser_to_create.Create()
specifiers = set(cls.GetPlatformTags(cls.browser) +
cls._browser_to_create.GetTypExpectationsTags())
if cls._typ_runner.has_expectations and specifiers:
logging.info(
'The following expectations condition tags were generated %s' %
str(list(specifiers)))
cls._typ_runner.expectations.add_tags(specifiers)
except Exception:
cls._browser_to_create.CleanUpEnvironment()
raise
@classmethod
def StopBrowser(cls):
assert cls.browser, 'Browser is not started'
try:
cls.browser.Close()
cls.browser = None
finally:
cls._browser_to_create.CleanUpEnvironment()
@classmethod
def TearDownProcess(cls):
""" Tear down the testing logic after running the test cases.
This is guaranteed to be called only once for all the tests after the test
suite finishes running.
"""
if cls.platform:
cls.platform.StopAllLocalServers()
cls.platform.network_controller.Close()
cls.platform.SetFullPerformanceModeEnabled(False)
if cls.browser:
cls.StopBrowser()
@classmethod
def SetStaticServerDirs(cls, dirs_path):
assert cls.platform
assert isinstance(dirs_path, list)
cls.platform.SetHTTPServerDirectories(dirs_path)
@classmethod
def UrlOfStaticFilePath(cls, file_path):
return cls.platform.http_server.UrlOf(file_path)
@classmethod
def ExpectationsFiles(cls):
"""Subclasses can override this class method to return a list of absolute
paths to the test expectations files.
Returns:
A list of test expectations file paths. The paths must be absolute.
"""
return []
def GetExpectationsForTest(self):
"""Subclasses can override this method to return a tuple containing a set
of expected results and a flag indicating if the test has the RetryOnFailure
expectation. Tests members may want to know the test expectation in order to
modify its behavior for certain expectations. For instance GPU tests want
to avoid symbolizing any crash dumps in the case of expected test failures
or when tests are being retried because they are expected to be flaky.
Returns:
A tuple containing set of expected results for a test and a boolean value
indicating if the test contains the RetryOnFailure expectation. When there
are no expectations files passed to typ, then a tuple of
(set(['PASS']), False) should be returned from this function.
"""
exp = self.__class__._typ_runner.expectations_for(self)
return exp.results, exp.should_retry_on_failure
@classmethod
def GetPlatformTags(cls, browser):
"""This method uses the Browser instances's platform member variable to get
the operating system, operating system version and browser type tags.
Example tags for operating system are 'linux' and 'mac'. Example tags
for the operating system version are 'mojave' and 'vista'. Example tags
for browser type are 'debug' and 'release'. If a None value or empty string
is retrieved from the browser's platform member variable, then it will be
filtered out.
Args:
Browser instance returned from the possible_browser.BrowserSession() method.
Returns:
A list of tags derived from the Browser instance's platform member variable.
"""
return browser.GetTypExpectationsTags()
@staticmethod
def GetJSONResultsDelimiter():
"""This method returns the path delimiter that will be used to seperate
a test name into parts. By default, the delimiter is '.'
"""
return json_results.DEFAULT_TEST_SEPARATOR
def LoadAllTestsInModule(module):
""" Load all tests & generated browser tests in a given module.
This is supposed to be invoke in load_tests() method of your test modules that
use browser_test_runner framework to discover & generate the tests to be
picked up by the test runner. Here is the example of how your test module
should looks like:
################## my_awesome_browser_tests.py ################
import sys
from telemetry.testing import serially_executed_browser_test_case
...
class TestSimpleBrowser(
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
...
...
def load_tests(loader, tests, pattern):
return serially_executed_browser_test_case.LoadAllTestsInModule(
sys.modules[__name__])
#################################################################
Args:
module: the module which contains test cases classes.
Returns:
an instance of unittest.TestSuite, which contains all the tests & generated
test cases to be run.
"""
suite = unittest.TestSuite()
test_context = browser_test_context.GetCopy()
if not test_context:
return suite
for _, obj in inspect.getmembers(module):
if (inspect.isclass(obj) and
issubclass(obj, SeriallyExecutedBrowserTestCase)):
# We bail out early if this class doesn't match the targeted
# test_class in test_context to avoid calling GenerateTestCases
# for tests that we don't intend to run. This is to avoid possible errors
# in GenerateTestCases as the test class may define custom options in
# the finder_options object, and hence would raise error if they can't
# find their custom options in finder_options object.
if test_context.test_class != obj:
continue
for test in GenerateTestCases(
test_class=obj, finder_options=test_context.finder_options):
if test.id() in test_context.test_case_ids_to_run:
suite.addTest(test)
return suite
def _GenerateTestMethod(based_method, args):
return lambda self: based_method(self, *args)
_TEST_GENERATOR_PREFIX = 'GenerateTestCases_'
_INVALID_TEST_NAME_RE = re.compile(r'[^a-zA-Z0-9_\.\\\/-]')
def _ValidateTestMethodname(test_name):
assert not bool(_INVALID_TEST_NAME_RE.search(test_name))
def GenerateTestCases(test_class, finder_options):
test_cases = []
for name, method in inspect.getmembers(
test_class, predicate=inspect.ismethod):
if name.startswith('test'):
# Do not allow method names starting with "test" in these
# subclasses, to avoid collisions with Python's unit test runner.
raise Exception('Name collision with Python\'s unittest runner: %s' %
name)
elif name.startswith('Test'):
# Pass these through for the time being. We may want to rethink
# how they are handled in the future.
test_cases.append(test_class(name))
elif name.startswith(_TEST_GENERATOR_PREFIX):
based_method_name = name[len(_TEST_GENERATOR_PREFIX):]
assert hasattr(test_class, based_method_name), (
'%s is specified but based method %s does not exist' %
(name, based_method_name))
based_method = getattr(test_class, based_method_name)
for generated_test_name, args in method(finder_options):
_ValidateTestMethodname(generated_test_name)
setattr(test_class, generated_test_name, _GenerateTestMethod(
based_method, args))
test_cases.append(test_class(generated_test_name))
return test_cases
|
|
# -*- encoding:utf-8 -*-
import numpy as np
class Optimizer(object):
def __init__(self, lr=1e-3, decay=0., grad_clip=-1, lr_min=0., lr_max=np.inf):
self.lr = lr
self.decay = decay
self.clip = grad_clip
self.lr_min = lr_min
self.lr_max = lr_max
self.iterations = 0
def update(self):
self.iterations += 1
self.lr *= (1. / 1 + self.decay * self.iterations)
self.lr = np.clip(self.lr, self.lr_min, self.lr_max)
class SGD(Optimizer):
def __init__(self, *args, **kwargs):
super(SGD, self).__init__(*args, **kwargs)
def minimize(self, params, grads):
for p, g in zip(params, grads):
p -= self.lr * _grad_clip(g, self.clip)
super(SGD, self).update()
def maximum(self, params, grads):
for p, g in zip(params, grads):
p += self.lr * _grad_clip(g, self.clip)
super(SGD, self).update()
class Momentum(Optimizer):
"""
Performs stochastic gradient descent with momentum.
momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
"""
def __init__(self, momentum=0.9, nesterov=False, *args, **kwargs):
super(Momentum, self).__init__(*args, **kwargs)
self.momentum = momentum
self.nesterov = nesterov
self.velocity = dict()
def minimize(self, params, grads):
for p, g in zip(params, grads):
v = self.velocity.get(id(p), np.zeros_like(p))
v = self.momentum * v - self.lr * g
if self.nesterov:
p = p + self.momentum * v - self.lr * g
else:
p = p + v
self.velocity[id(p)] = v
super(Momentum, self).update()
def maximum(self, params, grads):
for p, g in zip(params, grads):
v = self.velocity.get(id(p), np.zeros_like(p))
v = self.momentum * v - self.lr * g
p -= v
self.velocity[id(p)] = p
super(Momentum, self).update()
class RMSProp(Optimizer):
"""
Uses the RMSProp update rule, which uses a moving average of squared gradient
values to set adaptive per-parameter learning rates.
learning_rate: Scalar learning rate.
decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
epsilon: Small scalar used for smoothing to avoid dividing by zero.
cache: Moving average of second moments of gradients.
"""
def __init__(self, decay_rate=0.99, epsilon=1e-8, *args, **kwargs):
super(RMSProp, self).__init__(*args, **kwargs)
self.decay_rate = decay_rate
self.epsilon = epsilon
self.cache = dict()
def minimize(self, params, grads):
for p, g in zip(params, grads):
cache = self.cache.get(id(p), np.zeros_like(p))
self.cache[id(p)] = self.decay_rate * cache + (1 - self.decay_rate) * (g ** 2)
p -= self.lr * g / (np.sqrt(self.cache[id(p)]) + self.epsilon)
super(RMSProp, self).update()
def maximum(self, params, grads):
for p, g in zip(params, grads):
cache = self.cache.get(id(p), np.zeros_like(p))
self.cache[id(p)] = self.decay_rate * cache + (1 - self.decay_rate) * (g ** 2)
p += self.lr * g / (np.sqrt(self.cache[id(p)]) + self.epsilon)
super(RMSProp, self).update()
class Adam(Optimizer):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
beta1: Decay rate for moving average of first moment of gradient.
beta2: Decay rate for moving average of second moment of gradient.
epsilon: Small scalar used for smoothing to avoid dividing by zero.
m: Moving average of gradient.
v: Moving average of squared gradient.
"""
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, *args, **kwargs):
super(Adam, self).__init__(*args, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = dict()
self.v = dict()
def minimize(self, params, grads):
for p, g in zip(params, grads):
m = self.m.get(id(p), np.zeros_like(p))
v = self.v.get(id(p), np.zeros_like(p))
self.m[id(p)] = self.beta1 * m + (1 - self.beta1) * g
self.v[id(p)] = self.beta2 * v + (1 - self.beta2) * g ** 2
mb = self.m[id(p)] / (1 - self.beta1 ** (self.iterations + 1))
vb = self.v[id(p)] / (1 - self.beta2 ** (self.iterations + 1))
p -= (self.lr * mb / (np.sqrt(vb) + self.epsilon))
super(Adam, self).update()
def maximum(self, params, grads):
for p, g in zip(params, grads):
m = self.m.get(id(p), np.zeros_like(p))
v = self.v.get(id(p), np.zeros_like(p))
self.m[id(p)] = self.beta1 * m + (1 - self.beta1) * g
self.v[id(p)] = self.beta2 * v + (1 - self.beta2) * g
mb = self.m[id(p)] / (1 - self.beta1 ** (self.iterations + 1))
vb = self.v[id(p)] / (1 - self.beta2 ** (self.iterations + 1))
p += (self.lr * mb / (np.sqrt(vb) + self.epsilon))
super(Adam, self).update()
class Adagrad(Optimizer):
def __init__(self, epsilon=1e-7, *args, **kwargs):
super(Adagrad, self).__init__(*args, **kwargs)
self.epsilon = epsilon
self.__r = dict()
def minimize(self, params, grads):
for p, g in zip(params, grads):
self.__r.setdefault(id(p), np.zeros_like(p))
self.__r[id(p)] += g ** 2
p -= self.lr / (self.epsilon + np.sqrt(self.__r[id(p)])) * g
super(Adagrad, self).update()
def maximum(self, params, grads):
for p, g in zip(params, grads):
self.__r.setdefault(id(p), np.zeros_like(p))
self.__r[id(p)] += g ** 2
p += self.lr / (self.epsilon + np.sqrt(self.__r[id(p)])) * g
super(Adagrad, self).update()
class Adadelta(Optimizer):
"""Adadelta optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
It is recommended to leave it at the default value.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-8, decay=0.,
*args, **kwargs):
super(Adadelta, self).__init__(lr=lr, decay=decay, *args, **kwargs)
self.rho = rho
self.epsilon = epsilon
self.__accmulators = dict()
self.__delta_accumulators = dict()
def minimize(self, params, grads):
shapes = [p.shape for p in params]
# accumulate gradients
accumulators = [np.zeros(shape) for shape in shapes]
# accumulate updates
delta_accumulators = [np.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = []
for p, g in zip(params, grads):
a = self.__accmulators.setdefault(id(p), np.zeros_like(p))
d_a = self.__delta_accumulators.setdefault(id(p), np.zeros_like(p))
# update accumulator
new_a = self.rho * a + (1. - self.rho) * np.square(g)
# use the new accumulator and the *old* delta_accumulator
update = g * np.sqrt(d_a + self.epsilon) / np.sqrt(new_a + self.epsilon)
p -= self.lr * update
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * np.square(update)
self.__accmulators[id(p)] = new_a
self.__delta_accumulators[id(p)] = new_d_a
super(Adadelta, self).update()
def _grad_clip(grad, clip):
if clip > 0:
return np.clip(grad, -clip, clip)
else:
return grad
# name aliases
sgd = SGD
momentum = Momentum
rmsprop = RMSProp
adam = Adam
adagrad = Adagrad
adadelta = Adadelta
def get(optimizer):
if isinstance(optimizer, str):
optimizer = optimizer.lower()
if optimizer in ('sgd', ):
return SGD()
elif optimizer in ('momentum', ):
return Momentum()
elif optimizer in ('rmsprop', 'rms'):
return RMSProp()
elif optimizer in ('adam'):
return Adam()
elif optimizer in ('adagrad', ):
return Adagrad()
elif optimizer in ('adadelta',):
return Adadelta()
else:
raise ValueError('Unknown optimizer name `{}`'.format(optimizer))
elif isinstance(optimizer, Optimizer):
return optimizer
else:
raise ValueError('Unknown optimizer type `{}`'.format(optimizer.__class__.__name__))
|
|
#! /usr/bin/env python
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
# use a global instance instead of globals
class G:pass
g = G()
import sys
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = ""):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(abs(loops))
if loops >= 0:
print "Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime)
print "This machine benchmarks at %g pystones/second" % stones
def pystones(loops=LOOPS):
return Proc0(loops)
g.IntGlob = 0
g.BoolGlob = FALSE
g.Char1Glob = '\0'
g.Char2Glob = '\0'
g.Array1Glob = [0]*51
g.Array2Glob = map(lambda x: x[:], [g.Array1Glob]*51)
g.PtrGlb = None
g.PtrGlbNext = None
def Proc0(loops=LOOPS):
#global IntGlob
#global BoolGlob
#global Char1Glob
#global Char2Glob
#global Array1Glob
#global Array2Glob
#global PtrGlb
#global PtrGlbNext
starttime = clock()
#for i in range(loops):
# this is bad with very large values of loops
# XXX xrange support?
i = 0
while i < loops:
i += 1
# the above is most likely to vanish in C :-(
nulltime = clock() - starttime
g.PtrGlbNext = Record()
g.PtrGlb = Record()
g.PtrGlb.PtrComp = g.PtrGlbNext
g.PtrGlb.Discr = Ident1
g.PtrGlb.EnumComp = Ident3
g.PtrGlb.IntComp = 40
g.PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
g.Array2Glob[8][7] = 10
EnumLoc = None # addition for flow space
starttime = clock()
#for i in range(loops):
# this is bad with very large values of loops
# XXX xrange support?
i = 0
while i < loops:
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
g.BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(g.Array1Glob, g.Array2Glob, IntLoc1, IntLoc3)
g.PtrGlb = Proc1(g.PtrGlb)
CharIndex = 'A'
while CharIndex <= g.Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
i += 1
benchtime = clock() - starttime - nulltime
if benchtime < 1E-8:
benchtime = 1E-8 # time too short, meaningless results anyway
return benchtime, (loops / benchtime)
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = g.PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = g.PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
EnumLoc = None # addition for flow space
while 1:
if g.Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - g.IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
#global IntGlob
if g.PtrGlb is not None:
PtrParOut = g.PtrGlb.PtrComp
else:
g.IntGlob = 100
g.PtrGlb.IntComp = Proc7(10, g.IntGlob)
return PtrParOut
def Proc4():
#global Char2Glob
BoolLoc = g.Char1Glob == 'A'
BoolLoc = BoolLoc or g.BoolGlob
g.Char2Glob = 'B'
def Proc5():
#global Char1Glob
#global BoolGlob
g.Char1Glob = 'A'
g.BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if g.IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
#global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
g.IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
def error(msg):
print >> sys.stderr, msg,
print >> sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
def entrypoint(loops=None):
import string # just a little test
print string.replace("import works", "s", "x")
if loops is None:
loops = LOOPS # initialize early, for slow space
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
if hasattr(sys, 'pypy_objspaceclass'):
loops = LOOPS / 2000 # XXX rough estimate, adjust
main(loops)
if __name__ == '__main__':
entrypoint()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a CloudTasksHook
which allows you to connect to Google Cloud Tasks service,
performing actions to queues or tasks.
"""
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.tasks_v2 import CloudTasksClient
from google.cloud.tasks_v2.types import Queue, Task
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class CloudTasksHook(GoogleBaseHook):
"""
Hook for Google Cloud Tasks APIs. Cloud Tasks allows developers to manage
the execution of background work in their applications.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client: Optional[CloudTasksClient] = None
def get_conn(self) -> CloudTasksClient:
"""
Provides a client for interacting with the Google Cloud Tasks API.
:return: Google Cloud Tasks API Client
:rtype: google.cloud.tasks_v2.CloudTasksClient
"""
if self._client is None:
self._client = CloudTasksClient(credentials=self._get_credentials(), client_info=self.client_info)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: Union[dict, Queue],
project_id: str = PROVIDE_PROJECT_ID,
queue_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Queue:
"""
Creates a queue in Cloud Tasks.
:param location: The location name in which the queue will be created.
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
full_location_path = f"projects/{project_id}/locations/{location}"
return client.create_queue(
request={'parent': full_location_path, 'queue': task_queue},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: str = PROVIDE_PROJECT_ID,
location: Optional[str] = None,
queue_name: Optional[str] = None,
update_mask: Optional[FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Queue:
"""
Updates a queue in Cloud Tasks.
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name and location:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
return client.update_queue(
request={'queue': task_queue, 'update_mask': update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Queue:
"""
Gets a queue from Cloud Tasks.
:param location: The location name in which the queue was created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.get_queue(
request={'name': full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
results_filter: Optional[str] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> List[Queue]:
"""
Lists queues from Cloud Tasks.
:param location: The location name in which the queues were created.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param results_filter: (Optional) Filter used to specify a subset of queues.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_location_path = f"projects/{project_id}/locations/{location}"
queues = client.list_queues(
request={'parent': full_location_path, 'filter': results_filter, 'page_size': page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(queues)
@GoogleBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
:param location: The location name in which the queue will be deleted.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
client.delete_queue(
request={'name': full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> List[Queue]:
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
:param location: The location name in which the queue will be purged.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.purge_queue(
request={'name': full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> List[Queue]:
"""
Pauses a queue in Cloud Tasks.
:param location: The location name in which the queue will be paused.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.pause_queue(
request={'name': full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> List[Queue]:
"""
Resumes a queue in Cloud Tasks.
:param location: The location name in which the queue will be resumed.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.resume_queue(
request={'name': full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: Union[Dict, Task],
project_id: str = PROVIDE_PROJECT_ID,
task_name: Optional[str] = None,
response_view: Optional[Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Task:
"""
Creates a task in Cloud Tasks.
:param location: The location name in which the task will be created.
:param queue_name: The queue's name.
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
if task_name:
full_task_name = (
f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task['name'] = full_task_name
else:
raise AirflowException('Unable to set task_name.')
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.create_task(
request={'parent': full_queue_name, 'task': task, 'response_view': response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str = PROVIDE_PROJECT_ID,
response_view: Optional[Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Task:
"""
Gets a task from Cloud Tasks.
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.get_task(
request={'name': full_task_name, 'response_view': response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: str,
response_view: Optional[Task.View] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> List[Task]:
"""
Lists the tasks in Cloud Tasks.
:param location: The location name in which the tasks were created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: list[google.cloud.tasks_v2.types.Task]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
tasks = client.list_tasks(
request={'parent': full_queue_name, 'response_view': response_view, 'page_size': page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(tasks)
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
"""
Deletes a task from Cloud Tasks.
:param location: The location name in which the task will be deleted.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
client.delete_task(
request={'name': full_task_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional[Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Task:
"""
Forces to run a task in Cloud Tasks.
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.run_task(
request={'name': full_task_name, 'response_view': response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
|
|
from unittest import TestCase
import queue
from ddsc.core.parallel import WaitingTaskList, Task, TaskRunner, TaskExecutor
from mock import patch, Mock
def no_op():
pass
class NoOpTask(object):
def __init__(self):
self.func = no_op
class TestWaitingTaskList(TestCase):
def task_ids(self, tasks):
return [task.id for task in tasks]
def test_get_next_tasks_two_tiers(self):
task_list = WaitingTaskList()
task_list.add(Task(1, None, NoOpTask()))
task_list.add(Task(2, 1, NoOpTask()))
task_list.add(Task(3, 1, NoOpTask()))
none_task_ids = self.task_ids(task_list.get_next_tasks(None))
self.assertEqual([1], none_task_ids)
one_task_ids = self.task_ids(task_list.get_next_tasks(1))
self.assertEqual([2, 3], one_task_ids)
two_task_ids = self.task_ids(task_list.get_next_tasks(2))
self.assertEqual([], two_task_ids)
def test_get_next_tasks_one_tiers(self):
task_list = WaitingTaskList()
task_list.add(Task(1, None, NoOpTask()))
task_list.add(Task(2, None, NoOpTask()))
task_list.add(Task(3, None, NoOpTask()))
none_task_ids = self.task_ids(task_list.get_next_tasks(None))
self.assertEqual([1, 2, 3], none_task_ids)
class AddCommandContext(object):
def __init__(self, values, message_data, message_queue, task_id):
self.values = values
self.message_data = message_data
self.message_queue = message_queue
self.task_id = task_id
def send_message(self, data):
self.message_queue.put((self.task_id, data))
class AddCommand(object):
"""
Simple task that adds two numbers together returning the result.
Run in a separate process to illustrate/test the parallel.TaskRunner.
"""
def __init__(self, value1, value2):
self.values = value1, value2
self.parent_task_result = None
self.result = None
self.func = add_func
self.send_message = None
self.on_message_data = []
def before_run(self, parent_task_result):
self.parent_task_result = parent_task_result
def create_context(self, message_queue, task_id):
return AddCommandContext(self.values, self.send_message, message_queue, task_id)
def after_run(self, results):
self.result = results
def on_message(self, data):
self.on_message_data.append(data)
def add_func(context):
"""
Function run by AddCommand
:param context
:return: sum of values
"""
values = context.values
if context.message_data:
context.send_message(context.message_data)
v1, v2 = values
return v1 + v2
class TestTaskRunner(TestCase):
"""
Task runner should be able to add numbers in a separate process and re-use the result in waiting tasks.
"""
def test_single_add(self):
add_command = AddCommand(10, 30)
runner = TaskRunner(num_workers=10)
runner.add(None, add_command)
runner.run()
self.assertEqual(add_command.parent_task_result, None)
self.assertEqual(add_command.result, 40)
self.assertEqual(add_command.send_message, None)
def test_two_adds_in_order(self):
add_command = AddCommand(10, 30)
add_command2 = AddCommand(4, 1)
runner = TaskRunner(num_workers=10)
runner.add(None, add_command)
runner.add(1, add_command2)
runner.run()
self.assertEqual(add_command.parent_task_result, None)
self.assertEqual(add_command.result, 40)
self.assertEqual(add_command2.parent_task_result, 40)
self.assertEqual(add_command2.result, 5)
def test_two_adds_in_parallel(self):
add_command = AddCommand(10, 30)
add_command2 = AddCommand(4, 1)
runner = TaskRunner(num_workers=10)
runner.add(None, add_command,)
runner.add(None, add_command2)
runner.run()
self.assertEqual(add_command.parent_task_result, None)
self.assertEqual(add_command.result, 40)
self.assertEqual(add_command2.parent_task_result, None)
self.assertEqual(add_command2.result, 5)
def test_command_with_message(self):
add_command = AddCommand(10, 30)
add_command.send_message = 'ok'
add_command2 = AddCommand(4, 1)
add_command2.send_message = 'waiting'
runner = TaskRunner(num_workers=10)
runner.add(None, add_command,)
runner.add(None, add_command2)
runner.run()
self.assertEqual(add_command.parent_task_result, None)
self.assertEqual(add_command.result, 40)
self.assertEqual(add_command2.parent_task_result, None)
self.assertEqual(add_command2.result, 5)
self.assertEqual(add_command.on_message_data, ['ok'])
self.assertEqual(add_command2.on_message_data, ['waiting'])
@patch('ddsc.core.parallel.TaskExecutor')
def test_run_closes_executor(self, mock_task_executor):
add_command = AddCommand(10, 30)
runner = TaskRunner(num_workers=10)
runner.add(None, add_command)
runner.run()
mock_task_executor.assert_called_with(10)
mock_task_executor.return_value.close.assert_called_with()
class TestTaskExecutor(TestCase):
@patch('ddsc.core.parallel.multiprocessing')
def test_wait_for_tasks_with_multiple_messages(self, mock_multiprocessing):
# Setup so we will go the the wait_for_tasks loop once and
# the process_all_messages_in_queue inside the loop receives all the messages
message_queue = Mock()
message_queue.get_nowait.side_effect = [
(1, "TEST"),
(1, "TEST2"),
queue.Empty,
queue.Empty
]
mock_multiprocessing.Manager.return_value.Queue.return_value = message_queue
mock_pool = Mock()
mock_pending_result = Mock()
mock_pending_result.get.return_value = (1, 40)
mock_pool.apply_async.side_effect = [
mock_pending_result
]
mock_multiprocessing.Pool.return_value = mock_pool
add_command = AddCommand(10, 30)
# Force messages to come in before the task exits
# so we test 'wait_for_tasks' and not '
add_command.send_message = 'testing'
executor = TaskExecutor(2)
executor.add_task(Task(1, None, add_command), None)
executor.wait_for_tasks()
self.assertEqual(40, add_command.result)
self.assertEqual(add_command.on_message_data, ['TEST', 'TEST2'])
@patch('ddsc.core.parallel.multiprocessing')
def test_get_finished_results_with_multiple_messages(self, mock_multiprocessing):
# Setup so we will go the the wait_for_tasks loop once and
# get_finished_results will receive all the messages
message_queue = Mock()
message_queue.get_nowait.side_effect = [
queue.Empty,
(1, "TEST"),
(1, "TEST2"),
queue.Empty
]
mock_multiprocessing.Manager.return_value.Queue.return_value = message_queue
mock_pool = Mock()
mock_pending_result = Mock()
mock_pending_result.get.return_value = (1, 40)
mock_pool.apply_async.side_effect = [
mock_pending_result
]
mock_multiprocessing.Pool.return_value = mock_pool
add_command = AddCommand(10, 30)
# Force messages to come in before the task exits
# so we test 'wait_for_tasks' and not '
add_command.send_message = 'testing'
executor = TaskExecutor(2)
executor.add_task(Task(1, None, add_command), None)
executor.wait_for_tasks()
self.assertEqual(40, add_command.result)
self.assertEqual(add_command.on_message_data, ['TEST', 'TEST2'])
@patch('ddsc.core.parallel.multiprocessing')
def test_close(self, mock_multiprocessing):
executor = TaskExecutor(2)
mock_multiprocessing.Pool.assert_called_with()
mock_multiprocessing.Pool.return_value.close.assert_not_called()
executor.close()
mock_multiprocessing.Pool.return_value.close.assert_called_with()
|
|
#########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import time
import testtools
from testtools.matchers import MatchesAny, Equals, GreaterThan
from nose.tools import nottest
from cloudify.workflows import local
from cloudify.decorators import operation
IGNORED_LOCAL_WORKFLOW_MODULES = (
'worker_installer.tasks',
'plugin_installer.tasks',
'windows_agent_installer.tasks',
'windows_plugin_installer.tasks'
)
class TestExecuteOperationWorkflow(testtools.TestCase):
def setUp(self):
blueprint_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"resources/blueprints/execute_operation.yaml")
self.env = local.init_env(blueprint_path)
super(TestExecuteOperationWorkflow, self).setUp()
def test_execute_operation(self):
params = self._get_params()
self.env.execute('execute_operation', params)
self._make_filter_assertions(4)
def test_execute_operation_default_values(self):
params = {'operation': 'cloudify.interfaces.lifecycle.create'}
self.env.execute('execute_operation', params)
self._make_filter_assertions(4)
def test_execute_operation_with_operation_parameters(self):
self._test_execute_operation_with_op_params(
'cloudify.interfaces.lifecycle.create')
def test_execute_operation_with_op_params_and_kwargs_override_allowed(
self):
self._test_execute_operation_with_op_params(
'cloudify.interfaces.lifecycle.configure', True)
def test_execute_operation_with_op_params_and_kwargs_override_disallowed(
self):
self._test_exec_op_with_params_and_no_kwargs_override(False)
def test_execute_operation_with_op_params_and_default_kwargs_override(
self):
# testing kwargs override with the default value for the
# 'allow_kwargs_override' parameter (null/None)
self._test_exec_op_with_params_and_no_kwargs_override(None)
def _test_exec_op_with_params_and_no_kwargs_override(self, kw_over_val):
try:
self._test_execute_operation_with_op_params(
'cloudify.interfaces.lifecycle.configure', kw_over_val)
self.fail('expected kwargs override to be disallowed')
except RuntimeError, e:
self.assertIn(
'To allow redefinition, pass "allow_kwargs_override"', str(e))
def _test_execute_operation_with_op_params(self, op,
allow_kw_override=None):
operation_param_key = 'operation_param_key'
operation_param_value = 'operation_param_value'
op_params = {operation_param_key: operation_param_value}
params = self._get_params(op=op, op_params=op_params,
allow_kw_override=allow_kw_override)
self.env.execute('execute_operation', params)
self._make_filter_assertions(4)
instances = self.env.storage.get_node_instances()
for instance in instances:
self.assertIn('op_kwargs', instance.runtime_properties)
op_kwargs = instance.runtime_properties['op_kwargs']
self.assertIn(operation_param_key, op_kwargs)
self.assertEquals(operation_param_value,
op_kwargs[operation_param_key])
def test_execute_operation_by_nodes(self):
node_ids = ['node2', 'node3']
params = self._get_params(node_ids=node_ids)
self.env.execute('execute_operation', params)
self._make_filter_assertions(3, node_ids=node_ids)
def test_execute_operation_by_node_instances(self):
instances = self.env.storage.get_node_instances()
node_instance_ids = [instances[0].id, instances[3].id]
params = self._get_params(node_instance_ids=node_instance_ids)
self.env.execute('execute_operation', params)
self._make_filter_assertions(2, node_instance_ids=node_instance_ids)
def test_execute_operation_by_type_names(self):
type_names = ['mock_type2']
params = self._get_params(type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(3, type_names=type_names)
def test_execute_operation_by_nodes_and_types(self):
node_ids = ['node1', 'node2']
type_names = ['mock_type2']
params = self._get_params(node_ids=node_ids, type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(2, node_ids=node_ids,
type_names=type_names)
def test_execute_operation_by_nodes_types_and_node_instances(self):
node_ids = ['node2', 'node3']
type_names = ['mock_type2', 'mock_type1']
instances = self.env.storage.get_node_instances()
node_instance_ids = [next(inst.id for inst in instances if
inst.node_id == 'node2')]
params = self._get_params(node_ids=node_ids,
node_instance_ids=node_instance_ids,
type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(1, node_ids=node_ids,
node_instance_ids=node_instance_ids,
type_names=type_names)
def test_execute_operation_empty_intersection(self):
node_ids = ['node1', 'node2']
type_names = ['mock_type3']
params = self._get_params(node_ids=node_ids, type_names=type_names)
self.env.execute('execute_operation', params)
self._make_filter_assertions(0, node_ids=node_ids,
type_names=type_names)
def test_execute_operation_with_dependency_order(self):
time_diff_assertions_pairs = [
(0, 1), # node 1 instance and node 2 instance
(0, 2), # node 1 instance and node 2 instance
(1, 3), # node 2 instance and node 3 instance
(2, 3) # node 2 instance and node 3 instance
]
self._dep_order_tests_helper([],
['node1', 'node2', 'node2', 'node3'],
time_diff_assertions_pairs)
def test_execute_operation_with_indirect_dependency_order(self):
time_diff_assertions_pairs = [
(0, 1), # node 1 instance and node 3 instance
]
self._dep_order_tests_helper(['node1', 'node3'],
['node1', 'node3'],
time_diff_assertions_pairs)
def _make_filter_assertions(self, expected_num_of_visited_instances,
node_ids=None, node_instance_ids=None,
type_names=None):
num_of_visited_instances = 0
instances = self.env.storage.get_node_instances()
nodes_by_id = dict((node.id, node) for node in
self.env.storage.get_nodes())
for inst in instances:
test_op_visited = inst.runtime_properties.get('test_op_visited')
if (not node_ids or inst.node_id in node_ids) \
and \
(not node_instance_ids or inst.id in node_instance_ids) \
and \
(not type_names or (next((type for type in nodes_by_id[
inst.node_id].type_hierarchy if type in type_names),
None))):
self.assertTrue(test_op_visited)
num_of_visited_instances += 1
else:
self.assertIsNone(test_op_visited)
# this is actually an assertion to ensure the tests themselves are ok
self.assertEquals(expected_num_of_visited_instances,
num_of_visited_instances)
def _dep_order_tests_helper(self, node_ids_param,
ordered_node_ids_of_instances,
indices_pairs_for_time_diff_assertions):
params = self._get_params(
op='cloudify.interfaces.lifecycle.start',
node_ids=node_ids_param,
run_by_dep=True)
self.env.execute('execute_operation', params, task_thread_pool_size=4)
instances_and_visit_times = sorted(
((inst, inst.runtime_properties['visit_time']) for inst in
self.env.storage.get_node_instances() if 'visit_time' in
inst.runtime_properties),
key=lambda inst_and_time: inst_and_time[1])
self.assertEqual(ordered_node_ids_of_instances,
[inst_and_time[0].node_id for inst_and_time in
instances_and_visit_times])
# asserting time difference between the operation execution for the
# different nodes. this way if something breaks and the tasks aren't
# dependent on one another, there's a better chance we'll catch
# it, since even if the order of the visits happens to be correct,
# it's less likely there'll be a significant time difference between
# the visits
def assert_time_difference(earlier_inst_index, later_inst_index):
td = instances_and_visit_times[later_inst_index][1] - \
instances_and_visit_times[earlier_inst_index][1]
self.assertThat(td, MatchesAny(Equals(1), GreaterThan(1)))
for index1, index2 in indices_pairs_for_time_diff_assertions:
assert_time_difference(index1, index2)
def _get_params(self, op='cloudify.interfaces.lifecycle.create',
op_params=None, run_by_dep=False,
allow_kw_override=None, node_ids=None,
node_instance_ids=None, type_names=None):
return {
'operation': op,
'operation_kwargs': op_params or {},
'run_by_dependency_order': run_by_dep,
'allow_kwargs_override': allow_kw_override,
'node_ids': node_ids or [],
'node_instance_ids': node_instance_ids or [],
'type_names': type_names or []
}
class TestScale(testtools.TestCase):
def setUp(self):
super(TestScale, self).setUp()
blueprint_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"resources/blueprints/test-scale-blueprint.yaml")
self.env = local.init_env(blueprint_path)
def test_no_node(self):
with testtools.ExpectedException(ValueError, ".*mock doesn't exist.*"):
self.env.execute('scale', parameters={'node_id': 'mock'})
def test_zero_delta(self):
# should simply work
self.env.execute('scale', parameters={'node_id': 'node',
'delta': 0})
def test_illegal_delta(self):
with testtools.ExpectedException(ValueError, ".*-1 is illegal.*"):
self.env.execute('scale', parameters={'node_id': 'node',
'delta': -1})
class TestSubgraphWorkflowLogic(testtools.TestCase):
def setUp(self):
super(TestSubgraphWorkflowLogic, self).setUp()
blueprint_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"resources/blueprints/test-subgraph-blueprint.yaml")
self.env = local.init_env(
blueprint_path,
ignored_modules=IGNORED_LOCAL_WORKFLOW_MODULES)
def test_heal_connected_to_relationship_operations_on_on_affected(self):
# Tests CFY-2788 fix
# We run heal on node2 instance. node1 is connected to node2 and node3
# we expect that the establish/unlink operations will only be called
# for node1->node2
node2_instance_id = [i for i in self.env.storage.get_node_instances()
if i.node_id == 'node2'][0].id
self.env.execute('heal', parameters={
'node_instance_id': node2_instance_id})
node1_instance = [i for i in self.env.storage.get_node_instances()
if i.node_id == 'node1'][0]
invocations = node1_instance.runtime_properties['invocations']
self.assertEqual(4, len(invocations))
expected_unlink = invocations[:2]
expected_establish = invocations[2:]
def assertion(actual_invocations, expected_op):
has_source_op = False
has_target_op = False
for invocation in actual_invocations:
if invocation['runs_on'] == 'source':
has_source_op = True
elif invocation['runs_on'] == 'target':
has_target_op = True
else:
self.fail('Unhandled runs_on: {0}'.format(
invocation['runs_on']))
self.assertEqual(invocation['target_node'], 'node2')
self.assertEqual(invocation['operation'], expected_op)
self.assertTrue(all([has_source_op, has_target_op]))
assertion(expected_unlink,
'cloudify.interfaces.relationship_lifecycle.unlink')
assertion(expected_establish,
'cloudify.interfaces.relationship_lifecycle.establish')
@nottest
@operation
def exec_op_test_operation(ctx, **kwargs):
ctx.instance.runtime_properties['test_op_visited'] = True
if kwargs:
ctx.instance.runtime_properties['op_kwargs'] = kwargs
@nottest
@operation
def exec_op_dependency_order_test_operation(ctx, **kwargs):
ctx.instance.runtime_properties['visit_time'] = time.time()
time.sleep(1)
@operation
def source_operation(ctx, **_):
_write_operation(ctx, runs_on='source')
@operation
def target_operation(ctx, **_):
_write_operation(ctx, runs_on='target')
def _write_operation(ctx, runs_on):
invocations = ctx.source.instance.runtime_properties.get('invocations', [])
invocations.append({
'operation': ctx.operation.name,
'target_node': ctx.target.node.name,
'runs_on': runs_on})
ctx.source.instance.runtime_properties['invocations'] = invocations
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUNSGatewaySummary(NURESTObject):
""" Represents a NSGatewaySummary in the VSD
Notes:
Summary information such as alarm counts, location, version, boostrap status for Network Services Gateway
"""
__rest_name__ = "nsgatewayssummary"
__resource_name__ = "nsgatewayssummaries"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK"
CONST_PERSONALITY_NSG = "NSG"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_BOOTSTRAP_STATUS_QUARANTINED = "QUARANTINED"
CONST_BOOTSTRAP_STATUS_REVOKED = "REVOKED"
CONST_PERSONALITY_NSGDUC = "NSGDUC"
CONST_BOOTSTRAP_STATUS_MIGRATING = "MIGRATING"
CONST_BOOTSTRAP_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED"
CONST_BOOTSTRAP_STATUS_ACTIVE = "ACTIVE"
CONST_BOOTSTRAP_STATUS_INACTIVE = "INACTIVE"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT"
CONST_PERSONALITY_NSGBR = "NSGBR"
def __init__(self, **kwargs):
""" Initializes a NSGatewaySummary instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> nsgatewaysummary = NUNSGatewaySummary(id=u'xxxx-xxx-xxx-xxx', name=u'NSGatewaySummary')
>>> nsgatewaysummary = NUNSGatewaySummary(data=my_dict)
"""
super(NUNSGatewaySummary, self).__init__()
# Read/Write Attributes
self._nsg_version = None
self._major_alarms_count = None
self._last_updated_by = None
self._last_updated_date = None
self._gateway_id = None
self._gateway_name = None
self._gateway_type = None
self._latitude = None
self._address = None
self._redundant_group_id = None
self._redundant_group_name = None
self._personality = None
self._description = None
self._timezone_id = None
self._minor_alarms_count = None
self._embedded_metadata = None
self._info_alarms_count = None
self._enterprise_id = None
self._entity_scope = None
self._locality = None
self._longitude = None
self._bootstrap_status = None
self._country = None
self._creation_date = None
self._critical_alarms_count = None
self._state = None
self._owner = None
self._external_id = None
self._system_id = None
self.expose_attribute(local_name="nsg_version", remote_name="NSGVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="major_alarms_count", remote_name="majorAlarmsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_id", remote_name="gatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_name", remote_name="gatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_type", remote_name="gatewayType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="latitude", remote_name="latitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="redundant_group_id", remote_name="redundantGroupID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="redundant_group_name", remote_name="redundantGroupName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'NSG', u'NSGBR', u'NSGDUC'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="timezone_id", remote_name="timezoneID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="minor_alarms_count", remote_name="minorAlarmsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="info_alarms_count", remote_name="infoAlarmsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="locality", remote_name="locality", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="longitude", remote_name="longitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="bootstrap_status", remote_name="bootstrapStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'MIGRATING', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT', u'QUARANTINED', u'REVOKED'])
self.expose_attribute(local_name="country", remote_name="country", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="critical_alarms_count", remote_name="criticalAlarmsCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="state", remote_name="state", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def nsg_version(self):
""" Get nsg_version value.
Notes:
The NSG Version (software) as reported during bootstrapping or following an upgrade.
This attribute is named `NSGVersion` in VSD API.
"""
return self._nsg_version
@nsg_version.setter
def nsg_version(self, value):
""" Set nsg_version value.
Notes:
The NSG Version (software) as reported during bootstrapping or following an upgrade.
This attribute is named `NSGVersion` in VSD API.
"""
self._nsg_version = value
@property
def major_alarms_count(self):
""" Get major_alarms_count value.
Notes:
Total number of alarms with MAJOR severity
This attribute is named `majorAlarmsCount` in VSD API.
"""
return self._major_alarms_count
@major_alarms_count.setter
def major_alarms_count(self, value):
""" Set major_alarms_count value.
Notes:
Total number of alarms with MAJOR severity
This attribute is named `majorAlarmsCount` in VSD API.
"""
self._major_alarms_count = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def gateway_id(self):
""" Get gateway_id value.
Notes:
The ID of the NSG from which the infomation was collected.
This attribute is named `gatewayID` in VSD API.
"""
return self._gateway_id
@gateway_id.setter
def gateway_id(self, value):
""" Set gateway_id value.
Notes:
The ID of the NSG from which the infomation was collected.
This attribute is named `gatewayID` in VSD API.
"""
self._gateway_id = value
@property
def gateway_name(self):
""" Get gateway_name value.
Notes:
The name of the gateway
This attribute is named `gatewayName` in VSD API.
"""
return self._gateway_name
@gateway_name.setter
def gateway_name(self, value):
""" Set gateway_name value.
Notes:
The name of the gateway
This attribute is named `gatewayName` in VSD API.
"""
self._gateway_name = value
@property
def gateway_type(self):
""" Get gateway_type value.
Notes:
Details on the type of gateway for which the summary is given. For NSGs, the value would be NSGateway.
This attribute is named `gatewayType` in VSD API.
"""
return self._gateway_type
@gateway_type.setter
def gateway_type(self, value):
""" Set gateway_type value.
Notes:
Details on the type of gateway for which the summary is given. For NSGs, the value would be NSGateway.
This attribute is named `gatewayType` in VSD API.
"""
self._gateway_type = value
@property
def latitude(self):
""" Get latitude value.
Notes:
The latitude of the location of the NSG
"""
return self._latitude
@latitude.setter
def latitude(self, value):
""" Set latitude value.
Notes:
The latitude of the location of the NSG
"""
self._latitude = value
@property
def address(self):
""" Get address value.
Notes:
Formatted address including property number, street name, suite or office number of the NSG
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
Formatted address including property number, street name, suite or office number of the NSG
"""
self._address = value
@property
def redundant_group_id(self):
""" Get redundant_group_id value.
Notes:
The ID of the Redundant Group which has this gateway
This attribute is named `redundantGroupID` in VSD API.
"""
return self._redundant_group_id
@redundant_group_id.setter
def redundant_group_id(self, value):
""" Set redundant_group_id value.
Notes:
The ID of the Redundant Group which has this gateway
This attribute is named `redundantGroupID` in VSD API.
"""
self._redundant_group_id = value
@property
def redundant_group_name(self):
""" Get redundant_group_name value.
Notes:
The Name of the Redundant Group which has this gateway
This attribute is named `redundantGroupName` in VSD API.
"""
return self._redundant_group_name
@redundant_group_name.setter
def redundant_group_name(self, value):
""" Set redundant_group_name value.
Notes:
The Name of the Redundant Group which has this gateway
This attribute is named `redundantGroupName` in VSD API.
"""
self._redundant_group_name = value
@property
def personality(self):
""" Get personality value.
Notes:
Personality of the corresponding Network Services Gateway
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
Personality of the corresponding Network Services Gateway
"""
self._personality = value
@property
def description(self):
""" Get description value.
Notes:
A description of the NSG
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the NSG
"""
self._description = value
@property
def timezone_id(self):
""" Get timezone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timezoneID` in VSD API.
"""
return self._timezone_id
@timezone_id.setter
def timezone_id(self, value):
""" Set timezone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timezoneID` in VSD API.
"""
self._timezone_id = value
@property
def minor_alarms_count(self):
""" Get minor_alarms_count value.
Notes:
Total number of alarms with MINOR severity
This attribute is named `minorAlarmsCount` in VSD API.
"""
return self._minor_alarms_count
@minor_alarms_count.setter
def minor_alarms_count(self, value):
""" Set minor_alarms_count value.
Notes:
Total number of alarms with MINOR severity
This attribute is named `minorAlarmsCount` in VSD API.
"""
self._minor_alarms_count = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def info_alarms_count(self):
""" Get info_alarms_count value.
Notes:
Total number of alarms with INFO severity
This attribute is named `infoAlarmsCount` in VSD API.
"""
return self._info_alarms_count
@info_alarms_count.setter
def info_alarms_count(self, value):
""" Set info_alarms_count value.
Notes:
Total number of alarms with INFO severity
This attribute is named `infoAlarmsCount` in VSD API.
"""
self._info_alarms_count = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The enterprise associated with this NSG
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The enterprise associated with this NSG
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def locality(self):
""" Get locality value.
Notes:
Locality/City/County of the NSG
"""
return self._locality
@locality.setter
def locality(self, value):
""" Set locality value.
Notes:
Locality/City/County of the NSG
"""
self._locality = value
@property
def longitude(self):
""" Get longitude value.
Notes:
The longitude of the location of the NSG
"""
return self._longitude
@longitude.setter
def longitude(self, value):
""" Set longitude value.
Notes:
The longitude of the location of the NSG
"""
self._longitude = value
@property
def bootstrap_status(self):
""" Get bootstrap_status value.
Notes:
Bootstrap status of the NSG
This attribute is named `bootstrapStatus` in VSD API.
"""
return self._bootstrap_status
@bootstrap_status.setter
def bootstrap_status(self, value):
""" Set bootstrap_status value.
Notes:
Bootstrap status of the NSG
This attribute is named `bootstrapStatus` in VSD API.
"""
self._bootstrap_status = value
@property
def country(self):
""" Get country value.
Notes:
Country in which the NSG is located
"""
return self._country
@country.setter
def country(self, value):
""" Set country value.
Notes:
Country in which the NSG is located
"""
self._country = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def critical_alarms_count(self):
""" Get critical_alarms_count value.
Notes:
Total number of alarms with CRITICAL severity
This attribute is named `criticalAlarmsCount` in VSD API.
"""
return self._critical_alarms_count
@critical_alarms_count.setter
def critical_alarms_count(self, value):
""" Set critical_alarms_count value.
Notes:
Total number of alarms with CRITICAL severity
This attribute is named `criticalAlarmsCount` in VSD API.
"""
self._critical_alarms_count = value
@property
def state(self):
""" Get state value.
Notes:
State/Province/Region
"""
return self._state
@state.setter
def state(self, value):
""" Set state value.
Notes:
State/Province/Region
"""
self._state = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def system_id(self):
""" Get system_id value.
Notes:
Identifier of the gateway
This attribute is named `systemID` in VSD API.
"""
return self._system_id
@system_id.setter
def system_id(self, value):
""" Set system_id value.
Notes:
Identifier of the gateway
This attribute is named `systemID` in VSD API.
"""
self._system_id = value
|
|
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
#
# This is a generic test runner script for projects using Numpy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "scipy"
PROJECT_ROOT_FILES = ['scipy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "scipy/special/tests/test_basic.py:test_xlogy"
SAMPLE_SUBMODULE = "optimize"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
import imp
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster, constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if not args.no_build:
site_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = imp.new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cover-html',
'--cover-html-dir='+dst_dir]
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if args.build_only:
sys.exit(0)
elif args.submodule:
modname = PROJECT_MODULE + '.' + args.submodule
try:
__import__(modname)
if args.bench:
test = sys.modules[modname].bench
else:
test = sys.modules[modname].test
except (ImportError, KeyError, AttributeError) as e:
print("Cannot run tests for %s (%s)" % (modname, e))
sys.exit(2)
elif args.tests:
def fix_test_path(x):
# fix up test path
p = x.split(':')
p[0] = os.path.relpath(os.path.abspath(p[0]),
test_dir)
return ':'.join(p)
tests = [fix_test_path(x) for x in args.tests]
def test(*a, **kw):
extra_argv = kw.pop('extra_argv', ())
extra_argv = extra_argv + tests[1:]
kw['extra_argv'] = extra_argv
from numpy.testing import Tester
if args.bench:
return Tester(tests[0]).bench(*a, **kw)
else:
return Tester(tests[0]).test(*a, **kw)
else:
__import__(PROJECT_MODULE)
if args.bench:
test = sys.modules[PROJECT_MODULE].bench
else:
test = sys.modules[PROJECT_MODULE].test
# Run the tests under build/test
try:
shutil.rmtree(test_dir)
except OSError:
pass
try:
os.makedirs(test_dir)
except OSError:
pass
cwd = os.getcwd()
try:
os.chdir(test_dir)
if args.bench:
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv)
else:
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ["build"]
cmd += ['install', '--prefix=' + dst_dir]
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
return site_dir
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try: os.unlink(LCOV_OUTPUT_FILE)
except OSError: pass
try: shutil.rmtree(LCOV_HTML_DIR)
except OSError: pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import unittest
import tensorflow as tf
from tensorflow import experimental_api as api
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
if hasattr(tf, 'experimental_api'):
del tf.experimental_api
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER = 'tensorflow/tools/api/golden'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
def _KeyToFilePath(key):
"""From a given key, construct a filepath."""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
return os.path.join(_API_GOLDEN_FOLDER, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub(
'((-[a-z]){1})', _ReplaceDashWithCaps, base_filename_without_ext)
return api_object_key
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message=''):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden
files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
@unittest.skipUnless(
sys.version_info.major == 2,
'API compabitility test goldens are generated using python2.')
def testAPIBackwardsCompatibility(self):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
traverse.traverse(tf, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
expression = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*'))
golden_file_list = file_io.get_matching_files(expression)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens)
@unittest.skipUnless(
sys.version_info.major == 2,
'API compabitility test goldens are generated using python2.')
def testNewAPIBackwardsCompatibility(self):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
# TODO(annarev): Make slide_dataset available in API.
public_api_visitor.private_map['tf'] = ['slide_dataset']
traverse.traverse(api, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
expression = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*'))
golden_file_list = file_io.get_matching_files(expression)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=False,
additional_missing_object_message=
'Check if tf_export decorator/call is missing for this symbol.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
|
import sys, imp, marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion, LooseVersion
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self,name,requested_version,module,homepage='',
attribute=None,format=None
):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self,version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version)!="unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module,self.attribute,default,paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self,paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self,paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * 65536
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix,mode,kind) = find_module(module,paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module,f,path,(suffix,mode,kind))
return getattr(sys.modules[module],symbol,None)
finally:
if f:
f.close()
return extract_constant(code,symbol,default)
def extract_constant(code,symbol,default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
if sys.platform.startswith('java') or sys.platform == 'cli':
# XXX it'd be better to test assertions about bytecode instead...
del extract_constant, get_module_constant
__all__.remove('extract_constant')
__all__.remove('get_module_constant')
|
|
import collections
import datetime
import inspect
import os
import shutil
import time
import types
import uuid
import jinja2
from nile.common import log as logging
from nile.common import loopingcall
from nile.common import importutils
from nile.common import timeutils
from passlib import utils as passlib_utils
import six.moves.urllib.parse as urlparse
from nile.common import cfg
from nile.common import exception
from nile.common.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
import_class = importutils.import_class
import_object = importutils.import_object
import_module = importutils.import_module
isotime = timeutils.isotime
ENV = jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader(''),
jinja2.PackageLoader("nile", "templates")
]))
def create_method_args_string(*args, **kwargs):
"""Returns a string representation of args and keyword args.
I.e. for args=1,2,3 and kwargs={'a':4, 'b':5} you'd get: "1,2,3,a=4,b=5"
"""
# While %s turns a var into a string but in some rare cases explicit
# repr() is less likely to raise an exception.
arg_strs = [repr(arg) for arg in args]
arg_strs += ['%s=%s' % (repr(key), repr(value))
for (key, value) in kwargs.items()]
return ', '.join(arg_strs)
def stringify_keys(dictionary):
if dictionary is None:
return None
return {str(key): value for key, value in dictionary.iteritems()}
def exclude(key_values, *exclude_keys):
if key_values is None:
return None
return {key: value for key, value in key_values.iteritems()
if key not in exclude_keys}
def generate_uuid():
return str(uuid.uuid4())
def utcnow():
return datetime.datetime.utcnow()
def raise_if_process_errored(process, exception):
try:
err = process.stderr.read()
if err:
raise exception(err)
except OSError:
pass
def clean_out(folder):
for root, dirs, files in os.walk(folder):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
class cached_property(object):
"""A decorator that converts a function into a lazy property.
Taken from : https://github.com/nshah/python-memoize
The function wrapped is called the first time to retrieve the result
and than that calculated result is used the next time you access
the value:
class Foo(object):
@cached_property
def bar(self):
# calculate something important here
return 42
"""
def __init__(self, func, name=None, doc=None):
self.func = func
self.__name__ = name or func.__name__
self.__doc__ = doc or func.__doc__
def __get__(self, obj, type=None):
if obj is None:
return self
value = self.func(obj)
setattr(obj, self.__name__, value)
return value
class MethodInspector(object):
def __init__(self, func):
self._func = func
@cached_property
def required_args(self):
return self.args[0:self.required_args_count]
@cached_property
def optional_args(self):
keys = self.args[self.required_args_count: len(self.args)]
return zip(keys, self.defaults)
@cached_property
def defaults(self):
return self.argspec.defaults or ()
@cached_property
def required_args_count(self):
return len(self.args) - len(self.defaults)
@cached_property
def args(self):
args = self.argspec.args
if inspect.ismethod(self._func):
args.pop(0)
return args
@cached_property
def argspec(self):
return inspect.getargspec(self._func)
def __str__(self):
optionals = ["[{0}=<{0}>]".format(k) for k, v in self.optional_args]
required = ["{0}=<{0}>".format(arg) for arg in self.required_args]
args_str = ' '.join(required + optionals)
return "%s %s" % (self._func.__name__, args_str)
def build_polling_task(retriever, condition=lambda value: value,
sleep_time=1, time_out=None):
start_time = time.time()
def poll_and_check():
obj = retriever()
if condition(obj):
raise loopingcall.LoopingCallDone(retvalue=obj)
if time_out is not None and time.time() - start_time > time_out:
raise exception.PollTimeOut
return loopingcall.FixedIntervalLoopingCall(
f=poll_and_check).start(sleep_time, True)
def poll_until(retriever, condition=lambda value: value,
sleep_time=1, time_out=None):
"""Retrieves object until it passes condition, then returns it.
If time_out_limit is passed in, PollTimeOut will be raised once that
amount of time is eclipsed.
"""
return build_polling_task(retriever, condition=condition,
sleep_time=sleep_time, time_out=time_out).wait()
def get_id_from_href(href):
"""Return the id or uuid portion of a url.
Given: 'http://www.foo.com/bar/123?q=4'
Returns: '123'
Given: 'http://www.foo.com/bar/abc123?q=4'
Returns: 'abc123'
"""
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
def correct_id_with_req(id, request):
routing_args = request.environ.get('wsgiorg.routing_args', [])
for routing_arg in routing_args:
try:
found = routing_arg.get('format', '')
if found and found not in CONF.expected_filetype_suffixes:
return "%s.%s" % (id, found)
except (AttributeError, KeyError):
# Not the relevant routing_args entry.
pass
return id
def generate_random_password(password_length=36):
return passlib_utils.generate_password(size=password_length)
def try_recover(func):
def _decorator(*args, **kwargs):
recover_func = kwargs.pop("recover_func", None)
try:
func(*args, **kwargs)
except Exception:
if recover_func is not None:
recover_func(func)
else:
LOG.debug("No recovery method defined for %(func)s" % {
'func': func.__name__})
raise
return _decorator
def gen_ports(portstr):
from_port, sep, to_port = portstr.partition('-')
if not (to_port and from_port):
if not sep:
to_port = from_port
if int(from_port) > int(to_port):
raise ValueError
return from_port, to_port
def unpack_singleton(container):
"""Unpack singleton collections.
Check whether a given collection is a singleton (has exactly one element)
and unpack it if that is the case.
Return the original collection otherwise.
"""
if is_collection(container) and len(container) == 1:
return unpack_singleton(container[0])
return container
def is_collection(item):
"""Return True is a given item is an iterable collection, but not a string.
"""
return (isinstance(item, collections.Iterable) and
not isinstance(item, types.StringTypes))
def get_server_ip(dict):
try:
key = str(dict).split('{\'')[1].split('\':')[0]
for item in dict[key]:
return item['addr']
except Exception,e:
key = str(dict).split('{u\'')[1].split('\':')[0]
for item in dict[key]:
return item['addr']
LOG.error(_("get_server_ip error %s.") %(e))
|
|
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import datetime
import FileUtil
import json
import os
import re
import sys
from com.ziclix.python.sql import zxJDBC
from org.slf4j import LoggerFactory
from wherehows.common.schemas import SampleDataRecord
from wherehows.common.writers import FileWriter
from wherehows.common import Constant
class TeradataExtract:
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
def get_view_info(self, database_name, view_name):
"""
:param database_name:
:param view_name:
:return:
"""
view_cols = []
curs_vw = self.conn_td.cursor()
view_sql = '''
SELECT Trim(DatabaseName) DatabaseName, Trim(TableName) TableName, RequestText,
CreateTimestamp(CHAR(19)), LastAlterTimestamp(CHAR(19)), AccessCount
FROM DBC.Tables
WHERE DatabaseName = '%s'
AND TableName NOT LIKE ALL ('!_%%', '#%%', 'TMP%%', 'TEMP%%') ESCAPE '!'
AND TableKind = 'V' ORDER BY 2''' % database_name
if not view_name is None:
view_sql = view_sql + ''' AND TableName = '%s' ''' % view_name
curs_vw.execute(view_sql)
views = curs_vw.fetchall()
for vw in views:
try:
# table_sql = 'CREATE VOLATILE TABLE vt_get_column_info AS (SELECT * FROM %s.%s) WITH NO DATA NO PRIMARY INDEX' % (vw[0], vw[1])
help_column_sql = 'help column %s.%s.*' % (vw[0], vw[1])
curs_vw.execute(help_column_sql)
rows = curs_vw.fetchall()
except Exception, e:
# print str(e), vw[1], len(views)
continue
for r in rows:
column_name = r[0].strip()
data_type = r[1].strip()
nullable = r[2].strip()
format = r[3].strip()
max_length = r[4] & 0xffff
decimal_total_digits = r[5] & 0xffff if r[5] else None
decimal_fractional_digits = r[6] & 0xffff if r[6] else (0 if r[5] else None)
char_type = r[16] & 0xffff if r[16] else None
if data_type == 'I1':
data_type = 'BYTEINT'
elif data_type == 'I2':
data_type = 'SMALLINT'
elif data_type == 'I':
data_type = 'INT'
elif data_type == 'F':
data_type = 'FLOAT'
elif data_type == 'I8':
data_type = 'BIGINT'
elif data_type == 'DA':
data_type = 'DATE'
elif data_type == 'AT':
data_type = 'TIME'
elif data_type == 'TS':
data_type = 'TIMESTAMP'
elif data_type == 'SZ':
data_type = 'TIMESTAMP WITH TIMEZONE'
elif data_type == 'TZ':
data_type = 'TIME WITH TIMEZONE'
elif data_type == 'CO':
data_type = 'CLOB'
elif data_type == 'CF':
data_type = 'CHAR(' + str(max_length / char_type) + ')'
elif data_type == 'CV':
data_type = 'VARCHAR(' + str(max_length / char_type) + ')'
elif data_type == 'BV':
data_type = 'VARBYTE(' + str(max_length) + ')'
elif data_type == 'D':
data_type = 'DECIMAL(' + str(decimal_total_digits) + ',' + str(decimal_fractional_digits) + ')'
if data_type not in ['DATE', 'TIME', 'TIMESTAMP', 'TIME WITH TIMEZONE', 'TIMESTAMP WITH TIMEZONE']:
format = ''
view_cols.append((
vw[0], vw[1], vw[2], str(vw[3]), str(vw[4]), vw[5], column_name, format, nullable, data_type, max_length,
decimal_total_digits, decimal_fractional_digits))
curs_vw.close()
return view_cols
def get_table_info(self, database_name, table_name):
"""
get table, column info from teradata DBC.Tables
:param database_name:
:param table_name: not used in common case
:return:
"""
td_table_name_filter = ''
if not database_name is None:
td_database_name = database_name
elif len(table_name) > 0:
if table_name.find('.') > 0:
(td_database_name, td_table_name) = table_name.split('.')
else:
td_database_name = self.default_database
td_table_name = table_name
td_table_name_filter = "AND a.TableName = '%s' " % td_table_name
curs_td = self.conn_td.cursor()
col_stats_sql = """SELECT c.DatabaseName,
c.TableName2 TableName,
c.CreateTimestamp (FORMAT 'YYYY-MM-DDBHH:MI:SS') (CHAR(19)) createTimestamp,
c.LastAlterTimestamp (CHAR(19)) TableLastAlterTimestamp,
c.LastAccessTimestamp (FORMAT 'YYYY-MM-DDBHH:MI:SS') (CHAR(19)) LastAccessTimestamp,
c.TableAccessCount,
RTRIM(a.columnname) ColumnName,
CASE
WHEN a.columntype IN ('DA','AT','TS','SZ','TZ') THEN
RTrim(a.ColumnFormat)
ELSE
NULL
END ColumnFormat,
a.DefaultValue,
a.nullable,
a.LastAccessTimestamp (FORMAT 'YYYY-MM-DDBHH:MI:SS') (CHAR(19)) LastAccessTimestamp,
a.AccessCount,
b.UniqueValueCount (bigint) UniqueValueCount,
b.LastCollectTimestamp (FORMAT 'YYYY-MM-DDBHH:MI:SS') (CHAR(19)) LastCollectTimestamp,
CASE
WHEN a.columntype = 'I1' THEN 'BYTEINT'
WHEN a.columntype = 'I2' THEN 'SMALLINT'
WHEN a.columntype = 'I' THEN 'INT'
WHEN a.columntype = 'F' THEN 'FLOAT'
WHEN a.columntype = 'I8' THEN 'BIGINT'
WHEN a.columntype = 'DA' THEN 'DATE'
WHEN a.columntype = 'AT' THEN 'TIME'
WHEN a.columntype = 'TS' THEN 'TIMESTAMP'
WHEN a.columntype = 'SZ' THEN 'TIMESTAMP WITH TIMEZONE'
WHEN a.columntype = 'TZ' THEN 'TIME WITH TIMEZONE'
WHEN a.columntype = 'CO' THEN 'CLOB'
WHEN a.columntype = 'BV' THEN 'VARBYTE(' || ColumnLength || ')'
WHEN a.columntype = 'CF' THEN 'CHAR('|| TRIM(a.ColumnLength/a.CharType) || ')'
WHEN a.columntype = 'CV' THEN 'VARCHAR('|| TRIM(a.ColumnLength/a.CharType) || ')'
WHEN a.columntype = 'D' THEN 'DECIMAL(' || TRIM(a.DecimalTotalDigits) || ',' || TRIM(a.DecimalFractionalDigits) || ')'
END Data_Type,
a.ColumnLength,
a.DecimalTotalDigits,
a.DecimalFractionalDigits,
a.ColumnId Column_Id,
RTrim(c.CreatorName) CreatorName,
RTrim(c.TableName) OriginalTableName
FROM (
select RTrim(a.DatabaseName) DatabaseName,
case when regexp_similar(a.tableName, '[[:alnum:]_]+[[:digit:]]{8}([^[:digit:]]|$).*', 'c') = 1 then
case when regexp_substr(a.TableName, '([[:digit:]]{8})([^[:digit:]]|$)') between '20000101' and '20991231'
then rtrim(regexp_replace(a.tableName, '([[:digit:]]{8})', '${YYYYMMDD}', 1, 1, 'c'))
when regexp_substr(a.TableName, '([[:digit:]]{8})([^[:digit:]]|$)') between '01012000' and '12312099'
then rtrim(regexp_replace(a.tableName, '([[:digit:]]{8})', '${MMDDYYYY}', 1, 1, 'c'))
else RTRIM(a.tablename)
end
when regexp_similar(a.tableName, '[[:alnum:]_]+[[:digit:]]{4}_[[:digit:]]{2}([^[:digit:]]|$).*', 'c') = 1
and regexp_substr(a.TableName, '([[:digit:]]{4})_[[:digit:]]{2}([^[:digit:]]|$)') between '2000_01' and '9999_12'
then rtrim(regexp_replace(a.tableName, '([[:digit:]]{4}_[[:digit:]]{2})', '${YYYY_MM}', 1, 1, 'c'))
when regexp_similar(a.tableName, '[[:alnum:]_]+[[:digit:]]{6}([^[:digit:]]|$).*', 'c') = 1
and regexp_substr(a.TableName, '([[:digit:]]{6})([^[:digit:]]|$)') between '200001' and '999912'
then rtrim(regexp_replace(a.tableName, '([[:digit:]]{6})', '${YYYYMM}', 1, 1, 'c'))
else RTRIM(a.tablename)
end TableName2,
a.TableName,
a.CreateTimestamp,
a.LastAlterTimestamp,
a.LastAccessTimestamp,
a.AccessCount TableAccessCount,
a.CreatorName
from DBC.Tables a where a.TableKind IN ('T', 'O')
AND a.DatabaseName = '%s'
%s
AND a.TableName NOT LIKE ALL ('INFA%%', 'tmp!_%%', 'temp!_%%', '!_%%', '#%%' 'ET!_%%', 'LS!_%%', 'VT!_%%', 'LOGTABLE%%', 'backup%%', 'bkp%%', 'W!_%%') ESCAPE '!'
AND RTRIM(a.TableName) NOT LIKE ALL ('%%!_tmp', '%%!_temp', '%%!_ERR!_.', '%%!_bkp', '%%!_backup') ESCAPE '!'
AND REGEXP_SIMILAR(RTRIM(a.TableName), '.*_tmp_[0-9]+','i') = 0
AND REGEXP_SIMILAR(RTRIM(a.TableName), '.*_tmp[0-9]+','i') = 0
QUALIFY RANK() OVER (PARTITION BY DatabaseName, TableName2 ORDER BY a.TableName desc) = 1
) c
JOIN
DBC.Columns a
ON (c.databasename = a.databasename AND
c.tablename = a.tablename)
LEFT OUTER JOIN
DBC.ColumnStatsV b
ON (a.databasename = b.databasename AND
a.tablename = b.tablename AND
a.columnname = b.columnname)
ORDER BY 1, 2, a.ColumnId """ % (td_database_name, td_table_name_filter)
curs_td.execute(col_stats_sql)
rows = curs_td.fetchall()
curs_td.close()
return rows
def get_extra_table_info(self, database_name):
"""
Index, Partition, Size info
:param database_name:
:return: size, partition, indice
"""
table_size_sql = """select RTrim(TableName), cast(sum(CurrentPerm)/1024/1024 as BIGINT) size_in_mb
from DBC.TableSize where DatabaseName = '%s'
AND TableName NOT LIKE ALL ('INFA%%', 'tmp!_%%', 'temp!_%%', '!_%%', '#%%' 'ET!_%%', 'LS!_%%', 'VT!_%%', 'LOGTABLE%%', 'backup%%', 'bkp%%', 'W!_%%') ESCAPE '!'
AND RTRIM(TableName) NOT LIKE ALL ('%%!_tmp', '%%!_temp', '%%!_ERR!_.', '%%!_bkp', '%%!_backup') ESCAPE '!'
AND REGEXP_SIMILAR(RTRIM(TableName), '.*_tmp_[0-9]+','i') = 0
AND REGEXP_SIMILAR(RTRIM(TableName), '.*_tmp[0-9]+','i') = 0
group by 1 order by 1 """ % (database_name)
table_index_sql = """select RTrim(TableName), IndexNumber, IndexType, UniqueFlag, IndexName, RTrim(ColumnName), ColumnPosition, AccessCount
from DBC.Indices where DatabaseName = '%s' order by TableName, IndexNumber, ColumnPosition""" % (database_name)
table_partition_sql = """select RTrim(TableName), ConstraintText
from DBC.IndexConstraints where DatabaseName = '%s' and ConstraintType = 'Q'
order by TableName""" % (database_name)
extra_table_info = {}
curs_td = self.conn_td.cursor()
# curs_td.execute("SET QUERY_BAND = 'script=%s; pid=%d; hostname=%s; task=extra_table_info;' FOR SESSION" % (os.path.basename(__file__), os.getpid(), os.getenv('HOSTNAME')))
# get size index and partition info one by one
curs_td.execute(table_size_sql)
rows = curs_td.fetchall()
for row in rows:
full_table_name = database_name + '.' + row[0]
extra_table_info[full_table_name] = {'size_in_mb': row[1], 'partitions': [], 'indices': []}
curs_td.execute(table_partition_sql)
rows = curs_td.fetchall()
for row in rows:
full_table_name = database_name + '.' + row[0]
if full_table_name not in extra_table_info:
continue
search_result = re.search('CHECK \(/\*([0-9]+)\*/ (.*)\)$', row[1], re.IGNORECASE)
partition_level = 1
if search_result:
partition_level = int(search_result.group(1))
partition_info = search_result.group(2).replace("\r", "").replace(") IS NOT NULL AND ", ")\n").replace(
") IS NOT NULL", ")")
extra_table_info[full_table_name]['partitions'] = partition_info.split("\n")
search_result = re.search('CHECK \(\((.*)\) BETWEEN [0-9]+ AND [0-9]+\)$', row[1], re.IGNORECASE)
if search_result:
partition_info = search_result.group(1).replace("\r", "")
extra_table_info[full_table_name]['partitions'] = [partition_info]
curs_td.execute(table_index_sql)
rows = curs_td.fetchall()
table_count = 0
current_table_name = ''
full_table_name = ''
for row in rows:
if current_table_name <> row[0]:
if table_count > 0:
# finish previous table's last index
indices[-1]['column_list'] = column_list
if full_table_name in extra_table_info:
extra_table_info[full_table_name]['indices'] = indices
full_table_name = database_name + '.' + row[0]
if full_table_name not in extra_table_info:
continue
table_count += 1
current_table_name = row[0]
current_index_number = 0
indices = []
if current_index_number <> row[1]:
if current_index_number > 0:
indices[-1]['column_list'] = column_list
# new index
current_index_number = row[1]
indices.append(
{'index_number': row[1], 'index_type': index_type[row[2]], 'is_unique': row[3], 'index_name': row[4],
'access_count': row[7], 'column_list': ''})
column_list = row[5]
else:
column_list += ", %s" % row[5]
if len(indices) > 0:
indices[-1]['column_list'] = column_list
if full_table_name in extra_table_info:
extra_table_info[full_table_name]['indices'] = indices
return extra_table_info
def format_view_metadata(self, rows, schema):
"""
add view info from rows into schema
note : view's original name is the same as full name
:param rows:
:param schema:
:return:
"""
db_dict = {}
table_dict = {}
for row in rows:
if row[0] not in db_dict:
schema.append({'database': row[0], 'type': 'Teradata', 'views': []})
db_dict[row[0]] = len(schema) - 1
db_idx = db_dict[row[0]]
full_name = row[0] + '.' + row[1]
ref_table_list = []
ref_table_list = set(re.findall(r"\s+FROM\s+(\w+\.\w+)[\s,;]", row[2], re.DOTALL | re.IGNORECASE))
search_result = set(re.findall(r"\s+JOIN\s+(\w+\.\w+)[\s,;]", row[2], re.DOTALL | re.IGNORECASE))
ref_table_list = list(set(ref_table_list) | set(search_result))
if full_name not in table_dict:
schema[db_idx]['views'].append(
{'name': row[1], 'type': 'View', 'createTime': row[3], 'lastAlterTime': row[4], 'accessCount': row[5],
'referenceTables': ref_table_list, 'viewSqlText': row[2].replace("\r", "\n"), 'columns': [],
'original_name': full_name})
table_dict[full_name] = len(schema[db_idx]['views']) - 1
table_idx = table_dict[full_name]
schema[db_idx]['views'][table_idx]['columns'].append(
{'name': row[6], 'nullable': row[8], 'dataType': row[9], 'maxByteLength': row[10], 'precision': row[11],
'scale': row[12]})
column_idx = len(schema[db_idx]['views'][table_idx]['columns']) - 1
if row[7]:
schema[db_idx]['views'][table_idx]['columns'][column_idx]['columnFormat'] = row[7].strip()
self.logger.info("%s %6d views with %6d columns processed for %12s" % (
datetime.datetime.now(), table_idx + 1, len(rows), row[0]))
def format_table_metadata(self, rows, schema):
"""
add table info from rows into schema
:param rows: input. each row is a database with all it's tables
:param schema: {database : _, type : _, tables : ['name' : _, ... 'original_name' : _] }
:return:
"""
db_dict = {}
table_dict = {}
db_idx = len(schema) - 1
table_idx = -1
for row in rows:
if row[0] not in db_dict:
schema.append({'database': row[0], 'type': 'Teradata', 'tables': []})
db_idx += 1
db_dict[row[0]] = db_idx
extra_table_info = self.get_extra_table_info(row[0])
full_name = ''
if row[0]:
full_name = row[0]
if row[1]:
full_name += '.' + row[1]
elif row[1]:
full_name = row[1]
# full_name = row[0] + '.' + row[1]
original_name = row[0] + '.' + row[20]
if original_name not in extra_table_info:
self.logger.error('ERROR : {0} not in extra_table_info!'.format(original_name))
continue
if full_name not in table_dict:
schema[db_idx]['tables'].append(
{'name': row[1], 'type': 'Table', 'createTime': row[2], 'lastAlterTime': row[3], 'lastAccessTime': row[4],
'accessCount': row[5], 'owner': row[19], 'sizeInMbytes': extra_table_info[original_name]['size_in_mb'],
'partitions': extra_table_info[original_name]['partitions'],
'indices': extra_table_info[original_name]['indices'], 'columns': [], 'original_name': original_name})
table_idx += 1
table_dict[full_name] = table_idx
# print "%6d: %s: %s" % (table_idx, full_name, str(schema[db_idx]['tables'][table_idx]))
schema[db_idx]['tables'][table_idx]['columns'].append(
{'name': row[6], 'nullable': row[9], 'lastAccessTime': row[8],
'accessCount': row[11] & 0xffff if row[11] else None, 'dataType': row[14] if row[14] else 'N/A',
'maxByteLength': row[15] & 0xffff, 'precision': row[16] & 0xffff if row[16] else None,
'scale': row[17] & 0xffff if row[17] else None})
column_idx = len(schema[db_idx]['tables'][table_idx]['columns']) - 1
if not row[8] is None:
schema[db_idx]['tables'][table_idx]['columns'][column_idx]['defaultValue'] = row[8]
if not row[7] is None:
schema[db_idx]['tables'][table_idx]['columns'][column_idx]['columnFormat'] = row[7].strip()
if not row[12] is None:
schema[db_idx]['tables'][table_idx]['columns'][column_idx]['statistics'] = {
'uniqueValueCount': row[12] & 0xffff, 'lastStatsCollectTime': str(row[13])}
self.logger.info("%s %6d tables with %6d columns processed for %12s" % (
datetime.datetime.now(), table_idx + 1, len(rows), row[0]))
def get_sample_data(self, database_name, table_name):
"""
find the reference dataset (if it has), select top 10 from teradata
:return: (reference_urn, json of sample data)
"""
fullname = ''
columns = []
rows_data = []
# doesn't have permission for these databases, fetch sample from DWH_STG's correspond tables
if database_name in ['DWH_DIM', 'DWH_FACT', 'DWH_TRK', 'DWH_AGG', 'DWH_CPY', 'DWH_MSTR', 'DWH_SEC']:
fullname = 'DWH_STG."' + table_name + '"'
else:
fullname = database_name + '."' + table_name + '"'
sql = 'LOCK ROW FOR ACCESS SELECT top 10 * FROM ' + fullname
curs_td = self.conn_td.cursor()
rows = []
try:
curs_td.execute(sql)
rows = curs_td.fetchall()
for i, value in enumerate(rows[0]):
columns.append(curs_td.description[i][0])
for r in rows:
row_data = []
# encode each field to a new value
for i, value in enumerate(r):
new_value = unicode(value, errors='ignore')
if isinstance(value, bytearray):
new_value = ''.join(format(x, '02x') for x in value)
elif value is None:
new_value = ''
row_data.append(new_value)
rows_data.append(row_data)
except Exception, e:
self.logger.error('sql : ' + sql)
if len(rows) == 0:
self.logger.error("dataset {0} is empty".format(fullname))
else:
self.logger.error("dataset {0} is not accessible.".format(fullname))
self.logger.error('result : ' + str(rows))
self.logger.error(e)
pass
ref_urn = 'teradata:///' + fullname.replace('.', '/').replace('"', '')
data_with_column = map(lambda x:dict(zip(columns, x)), rows_data)
return ref_urn, json.dumps({'sample': data_with_column})
def run(self, database_name, table_name, schema_output_file, sample_output_file, sample=True):
"""
The entrance of the class, extract schema and sample data
Notice the database need to have a order that the databases have more info (DWH_STG) should be scaned first.
:param database_name:
:param table_name:
:param schema_output_file:
:return:
"""
cur = self.conn_td.cursor()
schema = []
f_log = open(self.log_file, "a")
schema_json = open(schema_output_file, 'wb')
os.chmod(schema_output_file, 0666)
if database_name is None and table_name is None: # default route: process everything
for database_name in self.databases:
self.logger.info("Collecting tables in database : " + database_name)
# table info
rows = []
begin = datetime.datetime.now().strftime("%H:%M:%S")
rows.extend(self.get_table_info(database_name, table_name))
if len(rows) > 0:
self.format_table_metadata(rows, schema)
end = datetime.datetime.now().strftime("%H:%M:%S")
f_log.write("Get table info %12s [%s -> %s]\n" % (database_name, str(begin), str(end)))
# view info
rows = []
begin = datetime.datetime.now().strftime("%H:%M:%S")
rows.extend(self.get_view_info(database_name, table_name))
if len(rows) > 0:
self.format_view_metadata(rows, schema)
end = datetime.datetime.now().strftime("%H:%M:%S")
f_log.write("Get view info %12s [%s -> %s]\n" % (database_name, str(begin), str(end)))
scaned_dict = {} # a cache of {name : {urn : _, data : _}} to avoid repeat computing
if sample:
self.logger.info("Start collecting sample data.")
open(sample_output_file, 'wb')
os.chmod(sample_output_file, 0666)
sample_file_writer = FileWriter(sample_output_file)
# collect sample data
for onedatabase in schema:
database_name = onedatabase['database']
if 'tables' in onedatabase:
alltables = onedatabase['tables']
else:
alltables = onedatabase['views']
for onetable in alltables:
table_name = onetable['original_name'].split('.')[1]
if table_name in scaned_dict:
sample_record = SampleDataRecord('teradata', '/' + database_name + '/' + table_name,
scaned_dict[table_name]['ref_urn'], scaned_dict[table_name]['data'])
else:
(ref_urn, sample_data) = self.get_sample_data(database_name, table_name)
sample_record = SampleDataRecord('teradata', '/' + database_name + '/' + table_name, '', sample_data)
scaned_dict[table_name] = {'ref_urn': ref_urn, 'data': sample_data}
sample_file_writer.append(sample_record)
sample_file_writer.close()
# print 'byte size of schema : ' + str(sys.getsizeof(schema))
schema_json.write(json.dumps(schema, indent=None) + '\n')
cur.close()
schema_json.close()
f_log.close()
if __name__ == "__main__":
args = sys.argv[1]
# connection
username = args[Constant.TD_DB_USERNAME_KEY]
password = args[Constant.TD_DB_PASSWORD_KEY]
JDBC_DRIVER = args[Constant.TD_DB_DRIVER_KEY]
JDBC_URL = args[Constant.TD_DB_URL_KEY]
e = TeradataExtract()
e.conn_td = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER)
do_sample = False
if Constant.TD_LOAD_SAMPLE in args:
do_sample = FileUtil.parse_bool(args[Constant.TD_LOAD_SAMPLE], False)
if datetime.datetime.now().strftime('%a') not in args[Constant.TD_COLLECT_SAMPLE_DATA_DAYS]:
do_sample = False
temp_dir = FileUtil.etl_temp_dir(args, "TERADATA")
try:
e.conn_td.cursor().execute(
"SET QUERY_BAND = 'script=%s; pid=%d; ' FOR SESSION;" % ('TeradataExtract.py', os.getpid()))
e.conn_td.commit()
e.log_file = os.path.join(temp_dir, args[Constant.TD_LOG_KEY])
e.databases = args[Constant.TD_TARGET_DATABASES_KEY].split(',')
e.default_database = args[Constant.TD_DEFAULT_DATABASE_KEY]
index_type = {'P': 'Primary Index', 'K': 'Primary Key', 'S': 'Secondary Index', 'Q': 'Partitioned Primary Index',
'J': 'Join Index', 'U': 'Unique Index'}
schema_output_file = os.path.join(temp_dir, args[Constant.TD_SCHEMA_OUTPUT_KEY])
sample_output_file = os.path.join(temp_dir, args[Constant.TD_SAMPLE_OUTPUT_KEY])
e.run(None, None, schema_output_file, sample_output_file, sample=do_sample)
finally:
e.conn_td.close()
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core custom tags."""
__author__ = 'John Orr (jorr@google.com)'
import os
import re
import urllib
import urlparse
from xml.etree import cElementTree
import markdown
import appengine_config
from common import crypto
from common import jinja_utils
from common import schema_fields
from common import tags
from common import utils as common_utils
from controllers import utils
from models import courses
from models import custom_modules
from models import models
from models import roles
from models import services
from models import transforms
from modules.core_tags import messages
from modules.oeditor import oeditor
_MODULE_PATH = '/modules/core_tags'
_STATIC_URL = _MODULE_PATH + '/_static/'
_OEDITOR_STATIC_URL = '/modules/oeditor/_static/'
_DRIVE_TAG_REFRESH_SCRIPT = _STATIC_URL + 'js/drive_tag_refresh.js'
_IFRAME_RESIZE_SCRIPT = _OEDITOR_STATIC_URL + 'js/resize_iframes.js'
_PARENT_FRAME_SCRIPT = _STATIC_URL + 'js/drive_tag_parent_frame.js'
_SCRIPT_MANAGER_SCRIPT = _STATIC_URL + 'js/drive_tag_script_manager.js'
_TEMPLATES_ABSPATH = os.path.join(os.path.dirname(__file__), 'templates')
_GOOGLE_DRIVE_TAG_PATH = _MODULE_PATH + '/googledrivetag'
_GOOGLE_DRIVE_TAG_RENDERER_PATH = _MODULE_PATH + '/googledrivetagrenderer'
def _escape_url(url, force_https=True):
"""Escapes/quotes url parts to sane user input."""
scheme, netloc, path, query, unused_fragment = urlparse.urlsplit(url)
if force_https:
scheme = 'https'
path = urllib.quote(path)
query = urllib.quote_plus(query, '=?&;')
return urlparse.urlunsplit((scheme, netloc, path, query, unused_fragment))
def _replace_url_query(url, new_query):
"""Replaces the query part of a URL with a new one."""
scheme, netloc, path, _, fragment = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, new_query, fragment))
class _Runtime(object):
"""Derives runtime configuration state from CB application context."""
def __init__(self, app_context):
self._app_context = app_context
self._environ = self._app_context.get_environ()
def can_edit(self):
return roles.Roles.is_course_admin(self._app_context)
def courses_can_use_google_apis(self):
return courses.COURSES_CAN_USE_GOOGLE_APIS.value
def configured(self):
return (
self.courses_can_use_google_apis() and
bool(self.get_api_key()) and
bool(self.get_client_id()))
def get_api_key(self):
course, google, api_key = courses.CONFIG_KEY_GOOGLE_API_KEY.split(':')
return self._environ.get(course, {}).get(google, {}).get(api_key, '')
def get_client_id(self):
course, google, client_id = courses.CONFIG_KEY_GOOGLE_CLIENT_ID.split(
':')
return self._environ.get(
course, {}
).get(
google, {}
).get(
client_id, '')
def get_slug(self):
return self._app_context.get_slug()
class CoreTag(tags.BaseTag):
"""All core custom tags derive from this class."""
@classmethod
def vendor(cls):
return 'gcb'
@classmethod
def create_icon_url(cls, name):
"""Creates a URL for an icon with a specific name."""
return os.path.join(_STATIC_URL, 'images', name)
class GoogleDoc(CoreTag):
"""Custom tag for a Google Doc."""
@classmethod
def name(cls):
return 'Google Doc'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url(_replace_url_query(link, 'embedded=true'))
iframe = cElementTree.XML("""
<iframe class="google-doc" title="Google Doc" type="text/html" frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return self.create_icon_url('docs.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleDoc.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the Document link.
# Changes to the publication status of a document or to its
# contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Document Link', 'string',
description=messages.DOCUMENT_LINK_DESCRIPTION))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '300'},
description=messages.DOCUMENT_HEIGHT_DESCRIPTION))
return reg
class GoogleDrive(CoreTag, tags.ContextAwareTag):
"""Custom tag for Google Drive items."""
CONTENT_CHUNK_TYPE = 'google-drive'
@classmethod
def additional_dirs(cls):
return [_TEMPLATES_ABSPATH]
@classmethod
def extra_css_files(cls):
return ['google_drive_tag.css']
@classmethod
def extra_js_files(cls):
return ['drive_tag_child_frame.js', 'google_drive_tag_lightbox.js']
@classmethod
def name(cls):
return 'Google Drive'
@classmethod
def on_register(cls):
oeditor.ObjectEditor.EXTRA_SCRIPT_TAG_URLS.append(
cls._oeditor_extra_script_tags_urls)
@classmethod
def on_unregister(cls):
oeditor.ObjectEditor.EXTRA_SCRIPT_TAG_URLS.remove(
cls._oeditor_extra_script_tags_urls)
@classmethod
def _get_tag_renderer_url(cls, slug, type_id, resource_id):
args = urllib.urlencode(
{'type_id': type_id, 'resource_id': resource_id})
slug = '' if slug == '/' else slug # Courses may be at / or /slug.
return '%s%s?%s' % (slug, _GOOGLE_DRIVE_TAG_RENDERER_PATH, args)
@classmethod
def _oeditor_extra_script_tags_urls(cls):
script_urls = []
if courses.COURSES_CAN_USE_GOOGLE_APIS.value:
# Order matters here because scripts are inserted in the order they
# are found in this list, and later ones may refer to symbols from
# earlier ones.
script_urls.append(_SCRIPT_MANAGER_SCRIPT)
script_urls.append(_PARENT_FRAME_SCRIPT)
return script_urls
def get_icon_url(self):
return self.create_icon_url('drive.png')
def get_schema(self, handler):
api_key = None
client_id = None
if handler:
runtime = _Runtime(handler.app_context)
if not runtime.configured():
return self.unavailable_schema(
services.help_urls.make_learn_more_message(
messages.GOOGLE_DRIVE_UNAVAILABLE,
'core_tags:google_drive:unavailable'))
api_key = runtime.get_api_key()
client_id = runtime.get_client_id()
reg = schema_fields.FieldRegistry(GoogleDrive.name())
reg.add_property(
schema_fields.SchemaField(
'document-id', 'Document ID', 'string',
description=messages.DOCUMENT_ID_DESCRIPTION,
extra_schema_dict_values={
'api-key': api_key,
'client-id': client_id,
'type-id': self.CONTENT_CHUNK_TYPE,
'xsrf-token': GoogleDriveRESTHandler.get_xsrf_token(),
}, i18n=False))
return reg
def render(self, node, context):
runtime = _Runtime(context.handler.app_context)
resource_id = node.attrib.get('document-id')
src = self._get_tag_renderer_url(
runtime.get_slug(), self.CONTENT_CHUNK_TYPE, resource_id)
tag = cElementTree.Element('div')
tag.set('class', 'google-drive google-drive-container')
if runtime.can_edit():
controls = cElementTree.Element('div')
controls.set('class', 'google-drive google-drive-controls')
controls.set('data-api-key', runtime.get_api_key())
controls.set('data-client-id', runtime.get_client_id())
controls.set('data-document-id', resource_id)
controls.set(
'data-xsrf-token', GoogleDriveRESTHandler.get_xsrf_token())
tag.append(controls)
iframe = cElementTree.Element('iframe')
iframe.set(
'class',
'google-drive google-drive-content-iframe gcb-needs-resizing')
iframe.set('frameborder', '0')
iframe.set('scrolling', 'no')
iframe.set('src', src)
iframe.set('title', 'Google Drive')
iframe.set('width', '100%')
tag.append(iframe)
return tag
def rollup_header_footer(self, context):
runtime = _Runtime(context.handler.app_context)
can_edit = runtime.can_edit()
srcs = [_IFRAME_RESIZE_SCRIPT]
if can_edit: # Harmless but wasteful to give to non-admins.
srcs = [_SCRIPT_MANAGER_SCRIPT] + srcs
header = cElementTree.Element('div')
for src in srcs:
script = cElementTree.Element('script')
script.set('src', src)
header.append(script)
# Put in footer so other scripts will already be loaded when our main
# fires. Give script to admins only (though note that even if non-admins
# grab the script we won't give them the XSRF tokens they need to issue
# CB AJAX ops).
footer = cElementTree.Element('div')
if can_edit:
script = cElementTree.Element('script')
script.set('src', _DRIVE_TAG_REFRESH_SCRIPT)
footer.append(script)
return (header, footer)
class GoogleDriveRESTHandler(utils.BaseRESTHandler):
_XSRF_TOKEN_NAME = 'modules-core-tags-google-drive'
XSRF_TOKEN_REQUEST_KEY = 'xsrf_token'
@classmethod
def get_xsrf_token(cls):
return crypto.XsrfTokenManager.create_xsrf_token(cls._XSRF_TOKEN_NAME)
def put(self):
if not courses.COURSES_CAN_USE_GOOGLE_APIS.value:
self.error(404)
return
request = transforms.loads(self.request.get('request', ''))
if not self.assert_xsrf_token_or_fail(
request, self._XSRF_TOKEN_NAME, {}):
return
contents = request.get('contents')
document_id = request.get('document_id')
type_id = request.get('type_id')
if not (contents and document_id):
transforms.send_json_response(
self, 400, 'Save failed; no Google Drive item chosen.')
return
if not type_id:
transforms.send_json_response(
self, 400, 'Save failed; type_id not set')
return
key = None
try:
key = self._save_content_chunk(contents, type_id, document_id)
except Exception, e: # On purpose. pylint: disable=broad-except
transforms.send_json_response(
self, 500, 'Error when saving: %s' % e)
return
transforms.send_json_response(
self, 200, 'Success.', payload_dict={'key': str(key)})
def _save_content_chunk(self, contents, type_id, resource_id):
key = None
uid = models.ContentChunkDAO.make_uid(type_id, resource_id)
matches = models.ContentChunkDAO.get_by_uid(uid)
if not matches:
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': 'text/html',
'contents': contents,
'resource_id': resource_id,
'type_id': type_id,
}))
else:
# There is a data race in the DAO -- it's possible to create two
# entries at the same time with the same UID. If that happened,
# use the first one saved.
dto = matches[0]
dto.contents = contents
dto.content_type = 'text/html'
key = models.ContentChunkDAO.save(dto)
return key
class GoogleDriveTagRenderer(utils.BaseHandler):
def get(self):
if not courses.COURSES_CAN_USE_GOOGLE_APIS.value:
self.error(404)
return
resource_id = self.request.get('resource_id')
type_id = self.request.get('type_id')
if not (resource_id and type_id):
self._handle_error(400, 'Bad request')
return
matches = models.ContentChunkDAO.get_by_uid(
models.ContentChunkDAO.make_uid(type_id, resource_id))
if not matches:
self._handle_error(404, 'Content chunk not found')
return
# There is a data race in the DAO -- it's possible to create two entries
# at the same time with the same UID. If that happened, use the first
# one saved.
chunk = matches[0]
template = jinja_utils.get_template(
'drive_item.html', [_TEMPLATES_ABSPATH])
self.response.out.write(template.render({'contents': chunk.contents}))
def _handle_error(self, code, message):
template = jinja_utils.get_template(
'drive_error.html', [_TEMPLATES_ABSPATH])
self.error(code)
self.response.out.write(template.render({
'code': code,
'message': message,
}))
class GoogleSpreadsheet(CoreTag):
"""Custom tag for a Google Spreadsheet."""
@classmethod
def name(cls):
return 'Google Spreadsheet'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url('%s&chrome=false' % link.split('&output')[0])
iframe = cElementTree.XML("""
<iframe class="google-spreadsheet" title="Google Spreadsheet" type="text/html"
frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return self.create_icon_url('spreadsheets.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleSpreadsheet.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the link above 'Copy
# and paste the link above'. Changes to the publication status of a
# document or to its contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Link', 'string',
description=messages.GOOGLE_SPREADSHEET_LINK_DESCRIPTION))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string',
description=messages.GOOGLE_SPREADSHEET_HEIGHT_DESCRIPTION,
extra_schema_dict_values={'value': '300'},
i18n=False, optional=True))
return reg
class YouTube(CoreTag):
@classmethod
def name(cls):
return 'YouTube Video'
def render(self, node, handler):
video_id = node.attrib.get('videoid')
if handler.can_record_student_events():
return self._render_with_tracking(video_id)
else:
return self._render_no_tracking(video_id)
def _render_no_tracking(self, video_id):
"""Embed video without event tracking support."""
you_tube_url = (
'https://www.youtube.com/embed/%s'
'?feature=player_embedded&rel=0') % video_id
iframe = cElementTree.XML("""
<div class="gcb-video-container">
<iframe class="youtube-player" title="YouTube Video Player"
type="text/html" frameborder="0" allowfullscreen="allowfullscreen">
</iframe>
</div>""")
iframe[0].set('src', you_tube_url)
return iframe
def _render_with_tracking(self, video_id):
"""Embed video and enable event tracking."""
video_id = jinja_utils.js_string_raw(video_id)
uid = common_utils.generate_instance_id()
dom = cElementTree.XML("""
<p>
<script></script>
<script></script>
</p>""")
dom.attrib['id'] = uid
dom[0].attrib['src'] = os.path.join(
_STATIC_URL, 'js', 'youtube_video.js')
dom[1].text = 'gcbTagYoutubeEnqueueVideo("%s", "%s");' % (video_id, uid)
return dom
def get_icon_url(self):
return self.create_icon_url('youtube.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(YouTube.name())
reg.add_property(schema_fields.SchemaField(
'videoid', 'Video ID', 'string',
description=messages.VIDEO_ID_DESCRIPTION))
return reg
class Html5Video(CoreTag):
@classmethod
def name(cls):
return 'HTML5 Video'
def render(self, node, handler):
if handler.can_record_student_events():
tracking_text = (
'<script src="' + os.path.join(
_STATIC_URL, 'js', 'html5_video.js') + '">' +
'</script>' +
'<script>' +
' gcbTagHtml5TrackVideo("%s");' % (
jinja_utils.js_string_raw(node.attrib.get('instanceid'))) +
'</script>')
else:
tracking_text = ''
video_text = (
'<div>' +
' <video></video>'
'%s' % tracking_text +
'</div>')
video = cElementTree.XML(video_text)
video[0].set('id', node.attrib.get('instanceid'))
video[0].set('src', node.attrib.get('url'))
if node.attrib.get('width'):
video[0].set('width', node.attrib.get('width'))
if node.attrib.get('height'):
video[0].set('height', node.attrib.get('height'))
video[0].set('controls', 'true')
return video
def get_icon_url(self):
return self.create_icon_url('html5-badge-h-solo.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(Html5Video.name())
reg.add_property(
schema_fields.SchemaField(
'url', 'Video URL', 'url',
description=messages.HTML5_VIDEO_URL_DESCRIPTION,
optional=False))
reg.add_property(schema_fields.SchemaField(
'width', 'Width', 'integer',
description=messages.HTML5_VIDEO_WIDTH_DESCRIPTION,
optional=True))
reg.add_property(schema_fields.SchemaField(
'height', 'Height', 'integer',
description=messages.HTML5_VIDEO_HEIGHT_DESCRIPTION,
optional=True))
return reg
class GoogleGroup(CoreTag):
@classmethod
def name(cls):
return 'Google Group'
def render(self, node, handler):
# Note: in Firefox, this component requires a full hostname to work.
# If you are working in the development environment and are accessing
# this component at localhost, please replace 'localhost' with
# '127.0.0.1' instead.
_, netloc, _, _, _ = urlparse.urlsplit(handler.request.uri)
parent_url_suffix = ''
if (appengine_config.PRODUCTION_MODE or
not netloc.startswith('localhost')):
parent_url_suffix = (
'?parenturl=%s' % urllib.quote(handler.request.uri, safe=''))
group_name = node.attrib.get('group')
category_name = node.attrib.get('category')
embedded_forum_url = (
'https://groups.google.com/forum/embed/%s#!categories/%s/%s' % (
parent_url_suffix,
urllib.quote(group_name),
urllib.quote(category_name)
))
iframe = cElementTree.XML("""
<p>
<iframe class="forum-embed" title="Google Group Embed"
type="text/html" width="700" height="300" frameborder="0">
</iframe>
</p>""")
iframe[0].set('src', embedded_forum_url)
return iframe
def get_icon_url(self):
return self.create_icon_url('forumembed.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleGroup.name())
reg.add_property(schema_fields.SchemaField(
'group', 'Group Name', 'string', i18n=False,
description=services.help_urls.make_learn_more_message(
messages.RTE_GOOGLE_GROUP_GROUP_NAME,
'core_tags:google_group:name')))
reg.add_property(schema_fields.SchemaField(
'category', 'Category Name', 'string', optional=True, i18n=False,
description=messages.RTE_GOOGLE_GROUP_CATEGORY_NAME))
return reg
class IFrame(CoreTag):
def render(self, node, unused_handler):
src = node.attrib.get('src')
title = node.attrib.get('title')
height = node.attrib.get('height') or '400'
width = node.attrib.get('width') or '650'
iframe = cElementTree.XML(
'<iframe style="border: 0;"></iframe>'
)
iframe.set('src', _escape_url(src, force_https=False))
iframe.set('title', title)
iframe.set('width', width)
iframe.set('height', height)
return iframe
def get_icon_url(self):
return self.create_icon_url('iframe.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(IFrame.name())
reg.add_property(schema_fields.SchemaField(
'src', 'Embed URL', 'string',
description=messages.RTE_IFRAME_EMBED_URL,
extra_schema_dict_values={'_type': 'url', 'showMsg': True}))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string', description=messages.RTE_IFRAME_TITLE))
reg.add_property(schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False, optional=True,
extra_schema_dict_values={'value': '400'},
description=messages.RTE_IFRAME_HEIGHT))
reg.add_property(schema_fields.SchemaField(
'width', 'Width', 'string', i18n=False, optional=True,
extra_schema_dict_values={'value': '650'},
description=messages.RTE_IFRAME_WIDTH))
return reg
class Include(CoreTag):
@classmethod
def name(cls):
return 'HTML Asset'
def render(self, node, handler):
template_path = re.sub('^/+', '', node.attrib.get('path'))
base_path = os.path.dirname(template_path)
base_file = os.path.basename(template_path)
handler.init_template_values(handler.app_context.get_environ())
handler.template_value['base_path'] = base_path
html_text = handler.render_template_to_html(
handler.template_value, base_file,
additional_dirs=[
os.path.join(appengine_config.BUNDLE_ROOT, 'views'),
appengine_config.BUNDLE_ROOT,
os.path.join(appengine_config.BUNDLE_ROOT, base_path),
])
return tags.html_string_to_element_tree(html_text)
def get_icon_url(self):
return self.create_icon_url('include.png')
def get_schema(self, handler):
expected_prefix = os.path.join(appengine_config.BUNDLE_ROOT,
'assets/html')
select_data = []
if handler:
all_files = handler.app_context.fs.list(expected_prefix,
include_inherited=True)
for name in all_files:
if name.startswith(expected_prefix):
name = name.replace(appengine_config.BUNDLE_ROOT, '')
select_data.append(
(name, name.replace('/assets/html/', '')))
reg = schema_fields.FieldRegistry(Include.name())
reg.add_property(schema_fields.SchemaField(
'path', 'File Path', 'string', optional=False,
select_data=select_data,
description=messages.HTML_ASSET_FILE_PATH_DESCRIPTION))
return reg
class Markdown(tags.ContextAwareTag, CoreTag):
@classmethod
def name(cls):
return 'Markdown'
@classmethod
def required_modules(cls):
return super(Markdown, cls).required_modules() + ['gcb-code']
@classmethod
def additional_dirs(cls):
return [os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'core_tags', 'resources')]
def get_icon_url(self):
return self.create_icon_url('markdown.png')
def render(self, node, context):
# The markdown is "text" type in the schema and so is presented in the
# tag's body.
html = ''
if node.text:
html = markdown.markdown(node.text)
return tags.html_string_to_element_tree(
'<div class="gcb-markdown">%s</div>' % html)
def rollup_header_footer(self, context):
"""Include markdown css only when markdown tag is present."""
header = tags.html_string_to_element_tree(
'<link href="{}/css/markdown.css" rel="stylesheet">'.format(
_STATIC_URL))
footer = tags.html_string_to_element_tree('')
return (header, footer)
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(Markdown.name())
reg.add_property(schema_fields.SchemaField(
'markdown', 'Markdown', 'text',
description=services.help_urls.make_learn_more_message(
messages.RTE_MARKDOWN_MARKDOWN, 'core_tags:markdown:markdown'),
extra_schema_dict_values={
'mode': 'markdown', '_type': 'code',
}, optional=False))
return reg
custom_module = None
def register_module():
"""Registers this module in the registry."""
custom_tags = [
GoogleDoc, GoogleDrive, GoogleSpreadsheet, YouTube, Html5Video,
GoogleGroup, IFrame, Include, Markdown]
def make_binding_name(custom_tag):
return 'gcb-%s' % custom_tag.__name__.lower()
def on_module_disable():
for custom_tag in custom_tags:
tags.Registry.remove_tag_binding(make_binding_name(custom_tag))
# Unregsiter extra libraries required by GoogleDrive
GoogleDrive.on_unregister()
def on_module_enable():
for custom_tag in custom_tags:
tags.Registry.add_tag_binding(
make_binding_name(custom_tag), custom_tag)
# Register extra libraries required by GoogleDrive
GoogleDrive.on_register()
global custom_module # pylint: disable=global-statement
global_routes = []
namespaced_routes = [
(_GOOGLE_DRIVE_TAG_PATH, GoogleDriveRESTHandler),
(_GOOGLE_DRIVE_TAG_RENDERER_PATH, GoogleDriveTagRenderer),
]
custom_module = custom_modules.Module(
'Core Custom Tags Module',
'A module that provides core custom tags.',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enable,
notify_module_disabled=on_module_disable)
return custom_module
|
|
from django.views.generic.list import ListView
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from orders.models import LineFollowerStage, LineFollowerJuniorStage
from results.models import LineFollowerResult, LineFollowerJuniorResult, \
ConstructionResult, DroneResult, StairClimbingResult, \
ColorSelectingResult, ScenarioResult, InnovativeJuryResult, \
InnovativeJury, InnovativeTotalResult
from sumo.models import *
from simulation.models import SimulationStageMatchResult, SimulationStage
RESULTS_DICT = {
"line_follower": LineFollowerResult,
"line_follower_junior": LineFollowerJuniorResult,
"construction": ConstructionResult,
"drone": DroneResult,
"stair_climbing": StairClimbingResult,
"color_selecting": ColorSelectingResult,
"scenario": ScenarioResult,
"innovative": InnovativeJuryResult,
}
class ResultListView(ListView):
template_name = 'results/result_list.html'
def dispatch(self, *args, **kwargs):
category = self.kwargs.get('slug')
if not category in dict(settings.ALL_CATEGORIES).keys():
raise Http404
if not settings.PROJECT_RESULTS or \
not category in dict(settings.RESULT_CATEGORIES).keys():
raise PermissionDenied
if category == 'line_follower':
return HttpResponseRedirect(
reverse('line_follower_stage_result_list'))
elif category == 'line_follower_junior':
return redirect(reverse('line_follower_junior_stage_result_list'))
elif category == 'micro_sumo':
return HttpResponseRedirect(reverse('sumo_result_home'))
elif category == 'innovative':
return HttpResponseRedirect(reverse('innovative_referee'))
return super(ResultListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
result_model = RESULTS_DICT[self.kwargs.get('slug')]
return result_model.objects.filter(is_best=True)
def get_context_data(self, **kwargs):
context = super(ResultListView, self).get_context_data(**kwargs)
context['category'] = dict(
settings.ALL_CATEGORIES)[self.kwargs.get('slug')]
return context
class LineFollowerStageResultListView(ListView):
model = LineFollowerStage
template_name = 'results/line_follower_stage_list.html'
def dispatch(self, *args, **kwargs):
if not settings.PROJECT_ORDERS or \
not "line_follower" in dict(settings.RESULT_CATEGORIES).keys() or \
not LineFollowerStage.objects.filter(results_available=True).exists():
raise PermissionDenied
return super(LineFollowerStageResultListView, self).dispatch(
*args, **kwargs)
def get_queryset(self):
return LineFollowerStage.objects.filter(results_available=True)
class SimulationStageResultListView(ListView):
model = LineFollowerStage
template_name = 'results/simulation_stage_list.html'
def dispatch(self, *args, **kwargs):
if not settings.PROJECT_ORDERS or \
not "simulation" in dict(settings.RESULT_CATEGORIES).keys():
raise PermissionDenied
return super(SimulationStageResultListView, self).dispatch(
*args, **kwargs)
def get_queryset(self):
return SimulationStage.objects.all()
class LineFollowerResultListView(ListView):
model = LineFollowerResult
template_name = 'results/result_list.html'
def dispatch(self, *args, **kwargs):
order = self.kwargs.get("order")
if not LineFollowerStage.objects.filter(
order=order, results_available=True).exists():
return PermissionDenied
return super(LineFollowerResultListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LineFollowerResultListView, self).get_context_data(
**kwargs)
context['category'] = dict(settings.ALL_CATEGORIES)["line_follower"]
context['stage'] = LineFollowerStage.objects.filter(
order=self.kwargs.get("order"))[0]
return context
def get_queryset(self):
return LineFollowerResult.objects.filter(
stage__order=self.kwargs.get("order"), is_best=True)
class SimulationResultListView(ListView):
model = SimulationStageMatchResult
template_name = 'results/simulation_result_list.html'
def dispatch(self, *args, **kwargs):
number = self.kwargs.get("number")
if not SimulationStage.objects.filter(number=number).exists():
return PermissionDenied
return super(SimulationResultListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SimulationResultListView, self).get_context_data(
**kwargs)
context['category'] = dict(settings.ALL_CATEGORIES)["simulation"]
context['stage'] = SimulationStage.objects.filter(
number=self.kwargs.get("number"))[0]
return context
def get_queryset(self):
return SimulationStageMatchResult.objects.filter(
match__stage__number=self.kwargs.get("number"), match__raund=1)
class LineFollowerJuniorStageResultListView(ListView):
model = LineFollowerJuniorStage
template_name = 'results/line_follower_junior_stage_list.html'
def dispatch(self, *args, **kwargs):
if not settings.PROJECT_ORDERS or \
not "line_follower_junior" in dict(settings.RESULT_CATEGORIES).keys() or \
not LineFollowerJuniorStage.objects.filter(results_available=True).exists():
raise PermissionDenied
return super(LineFollowerJuniorStageResultListView, self).dispatch(
*args, **kwargs)
def get_queryset(self):
return LineFollowerJuniorStage.objects.filter(results_available=True)
class LineFollowerJuniorResultListView(ListView):
model = LineFollowerJuniorResult
template_name = 'results/junior_result_list.html'
def dispatch(self, *args, **kwargs):
order = self.kwargs.get("order")
if not LineFollowerJuniorStage.objects.filter(
order=order, results_available=True).exists():
return PermissionDenied
return super(LineFollowerJuniorResultListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LineFollowerJuniorResultListView, self).get_context_data(
**kwargs)
context['category'] = dict(settings.ALL_CATEGORIES)["line_follower_junior"]
context['stage'] = LineFollowerJuniorStage.objects.filter(
order=self.kwargs.get("order"))[0]
return context
def get_queryset(self):
return LineFollowerJuniorResult.objects.filter(
stage__order=self.kwargs.get("order"), is_best=True)
class SumoResultHomeView(TemplateView):
template_name = "results/sumo_home.html"
def dispatch(self, *args, **kwargs):
if not "micro_sumo" in dict(settings.RESULT_CATEGORIES).keys():
raise PermissionDenied
return super(SumoResultHomeView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SumoResultHomeView, self).get_context_data(**kwargs)
context["groups"] = settings.SUMO_GROUP_RESULTS
context["stages"] = settings.SUMO_STAGE_RESULTS
context["final"] = settings.SUMO_FINAL_RESULTS
return context
class SumoResultGroupListView(ListView):
model = SumoGroup
template_name = 'results/sumo_group_list.html'
def dispatch(self, *args, **kwargs):
if not settings.SUMO_GROUP_RESULTS:
raise PermissionDenied
return super(SumoResultGroupListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return SumoGroup.objects.filter(is_final=False)
class SumoResultGroupDetailView(DetailView):
model = SumoGroup
template_name = "results/sumo_group_detail.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_GROUP_RESULTS:
raise PermissionDenied
return super(SumoResultGroupDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
group = self.get_object()
context = super(SumoResultGroupDetailView, self).get_context_data(
**kwargs)
context["matches"] = SumoGroupMatch.objects.filter(group=group)
context["teams"] = SumoGroupTeam.objects.filter(group=group)
return context
class SumoResultStageListView(ListView):
model = SumoStage
template_name = "results/sumo_stage_list.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_STAGE_RESULTS:
raise PermissionDenied
return super(SumoResultStageListView, self).dispatch(*args, **kwargs)
class SumoResultStageDetailView(ListView):
model = SumoStageMatch
template_name = "results/sumo_stage_detail.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_STAGE_RESULTS:
raise PermissionDenied
return super(SumoResultStageDetailView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return SumoStageMatch.objects.filter(stage__pk=self.kwargs.get("pk"))
class SumoResultFinalDetailView(TemplateView):
template_name = "results/sumo_group_detail.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_FINAL_RESULTS:
raise PermissionDenied
return super(SumoResultFinalDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SumoResultFinalDetailView, self).get_context_data(**kwargs)
group = SumoGroup.objects.get(is_final=True)
context["group"] = group
context["teams"] = SumoGroupTeam.objects.filter(group=group)
return context
class InnovativeResultView(ListView):
template_name = "results/innovative_result.html"
def dispatch(self, *args, **kwargs):
return super(InnovativeResultView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return InnovativeTotalResult.objects.filter(project__is_confirmed=True).order_by("-score")
def get_context_data(self, **kwargs):
category = self.kwargs.get('slug')
context = super(InnovativeResultView, self).get_context_data(**kwargs)
context['category'] = dict(settings.ALL_CATEGORIES)["innovative"]
return context
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import random
import unittest
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.core.periodic_table import Element
class IonTest(unittest.TestCase):
def setUp(self):
self.comp = []
self.comp.append(Ion.from_formula("Li+"))
self.comp.append(Ion.from_formula("MnO4-"))
self.comp.append(Ion.from_formula("Mn++"))
self.comp.append(Ion.from_formula("PO3-2"))
self.comp.append(Ion.from_formula("Fe(CN)6-3"))
self.comp.append(Ion.from_formula("Fe(CN)6----"))
self.comp.append(Ion.from_formula("Fe2((PO4)3(CO3)5)2-3"))
self.comp.append(Ion.from_formula("Ca[2+]"))
self.comp.append(Ion.from_formula("NaOH(aq)"))
def test_init_(self):
c = Composition({"Fe": 4, "O": 16, "P": 4})
charge = 4
self.assertEqual("Fe4 P4 O16 +4", Ion(c, charge).formula)
f = {1: 1, 8: 1}
charge = -1
self.assertEqual("H1 O1 -1", Ion(Composition(f), charge).formula)
self.assertEqual("S2 O3 -2", Ion(Composition(S=2, O=3), -2).formula)
def test_charge_from_formula(self):
self.assertEqual(Ion.from_formula("Li+").charge, 1)
self.assertEqual(Ion.from_formula("Li[+]").charge, 1)
self.assertEqual(Ion.from_formula("Ca[2+]").charge, 2)
self.assertEqual(Ion.from_formula("Ca[+2]").charge, 2)
self.assertEqual(Ion.from_formula("Ca++").charge, 2)
self.assertEqual(Ion.from_formula("Ca[++]").charge, 2)
self.assertEqual(Ion.from_formula("Ca2+").charge, 1)
self.assertEqual(Ion.from_formula("Cl-").charge, -1)
self.assertEqual(Ion.from_formula("Cl[-]").charge, -1)
self.assertEqual(Ion.from_formula("SO4[-2]").charge, -2)
self.assertEqual(Ion.from_formula("SO4-2").charge, -2)
self.assertEqual(Ion.from_formula("SO42-").charge, -1)
self.assertEqual(Ion.from_formula("SO4--").charge, -2)
self.assertEqual(Ion.from_formula("SO4[--]").charge, -2)
self.assertEqual(Ion.from_formula("Na[+-+]").charge, 1)
def test_special_formulas(self):
special_formulas = [
("Cl-", "Cl[-1]"),
("H+", "H[+1]"),
("F-", "F[-1]"),
("H4O4", "H2O2(aq)"),
("OH-", "OH[-1]"),
("CH3COO-", "CH3COO[-1]"),
("CH3COOH", "CH3COOH(aq)"),
("CH3OH", "CH3OH(aq)"),
("H4CO", "CH3OH(aq)"),
("C2H6O", "C2H5OH(aq)"),
("C3H8O", "C3H7OH(aq)"),
("C4H10O", "C4H9OH(aq)"),
("Fe(OH)4+", "FeO2.2H2O[+1]"),
("Zr(OH)4", "ZrO2.2H2O(aq)"),
]
for tup in special_formulas:
self.assertEqual(Ion.from_formula(tup[0]).reduced_formula, tup[1])
self.assertEqual(Ion.from_formula("Fe(OH)4+").get_reduced_formula_and_factor(hydrates=False), ("Fe(OH)4", 1))
self.assertEqual(Ion.from_formula("Zr(OH)4").get_reduced_formula_and_factor(hydrates=False), ("Zr(OH)4", 1))
def test_formula(self):
correct_formulas = [
"Li1 +1",
"Mn1 O4 -1",
"Mn1 +2",
"P1 O3 -2",
"Fe1 C6 N6 -3",
"Fe1 C6 N6 -4",
"Fe2 P6 C10 O54 -3",
"Ca1 +2",
"Na1 H1 O1 (aq)",
]
all_formulas = [c.formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
self.assertRaises(ValueError, Ion.from_formula, "(co2)(po4)2")
def test_mixed_valence(self):
comp = Ion(Composition({"Fe2+": 2, "Fe3+": 4, "Li+": 8}))
self.assertEqual(comp.reduced_formula, "Li4Fe3(aq)")
self.assertEqual(comp.alphabetical_formula, "Fe6 Li8 (aq)")
self.assertEqual(comp.formula, "Li8 Fe6 (aq)")
def test_alphabetical_formula(self):
correct_formulas = [
"Li1 +1",
"Mn1 O4 -1",
"Mn1 +2",
"O3 P1 -2",
"C6 Fe1 N6 -3",
"C6 Fe1 N6 -4",
"C10 Fe2 O54 P6 -3",
"Ca1 +2",
"H1 Na1 O1 (aq)",
]
all_formulas = [c.alphabetical_formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
def test_num_atoms(self):
correct_num_atoms = [1, 5, 1, 4, 13, 13, 72, 1, 3]
all_natoms = [c.num_atoms for c in self.comp]
self.assertEqual(all_natoms, correct_num_atoms)
def test_anonymized_formula(self):
expected_formulas = [
"A+1",
"AB4-1",
"A+2",
"AB3-2",
"AB6C6-3",
"AB6C6-4",
"AB3C5D27-3",
"A+2",
"ABC(aq)",
]
for i in range(len(self.comp)):
self.assertEqual(self.comp[i].anonymized_formula, expected_formulas[i])
def test_from_dict(self):
sym_dict = {"P": 1, "O": 4, "charge": -2}
self.assertEqual(
Ion.from_dict(sym_dict).reduced_formula,
"PO4[-2]",
"Creation form sym_amount dictionary failed!",
)
def test_as_dict(self):
c = Ion.from_dict({"Mn": 1, "O": 4, "charge": -1})
d = c.as_dict()
correct_dict = {"Mn": 1.0, "O": 4.0, "charge": -1.0}
self.assertEqual(d, correct_dict)
self.assertEqual(d["charge"], correct_dict["charge"])
correct_dict = {"Mn": 1.0, "O": 4.0, "charge": -1}
d = c.to_reduced_dict
self.assertEqual(d, correct_dict)
self.assertEqual(d["charge"], correct_dict["charge"])
def test_equals(self):
random_z = random.randint(1, 92)
fixed_el = Element.from_Z(random_z)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp1 = Ion(Composition({fixed_el: 1, Element.from_Z(other_z): 0}), 1)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp2 = Ion(Composition({fixed_el: 1, Element.from_Z(other_z): 0}), 1)
self.assertEqual(
comp1,
comp2,
"Composition equality test failed. " + f"{comp1.formula} should be equal to {comp2.formula}",
)
self.assertEqual(comp1.__hash__(), comp2.__hash__(), "Hashcode equality test failed!")
def test_equality(self):
self.assertTrue(self.comp[0] == (self.comp[0]))
self.assertFalse(self.comp[0] == (self.comp[1]))
self.assertFalse(self.comp[0] != (self.comp[0]))
self.assertTrue(self.comp[0] != (self.comp[1]))
def test_mul(self):
self.assertEqual(
(self.comp[1] * 4).formula,
"Mn4 O16 -4",
"Incorrect composition after addition!",
)
def test_len(self):
self.assertEqual(len(self.comp[1]), 2, "Lengths are not equal!")
def test_to_latex_string(self):
correct_latex = [
"Li$^{+1}$",
"MnO$_{4}$$^{-1}$",
"Mn$^{+2}$",
"PO$_{3}$$^{-2}$",
"Fe(CN)$_{6}$$^{-3}$",
"Fe(CN)$_{6}$$^{-4}$",
"FeP$_{3}$C$_{5}$O$_{27}$$^{-3}$",
"Ca$^{+2}$",
"NaOH",
]
all_latex = [c.to_latex_string() for c in self.comp]
self.assertEqual(all_latex, correct_latex)
if __name__ == "__main__":
unittest.main()
|
|
"""Various tests that determine whether updating capabilities for
multiple ActorSystems in a Convention are working correctly. These
tests run somewhat slowly because they must allow time for
coordination of effects an hysteresis of same between the multiple
systems (which should not be an issue under normal operations).
"""
import pytest
from pytest import raises
from thespian.test import *
import time
from thespian.actors import *
from datetime import timedelta
from thespian.system.timing import timePeriodSeconds
MAX_ASK_WAIT_PERIOD = timedelta(seconds=7)
UPDATE_WAIT_PERIOD = timedelta(milliseconds=300)
EXIT_WAIT_PERIOD = timedelta(milliseconds=500)
update_wait = lambda: time.sleep(timePeriodSeconds(UPDATE_WAIT_PERIOD))
exit_wait = lambda: time.sleep(timePeriodSeconds(EXIT_WAIT_PERIOD))
colors = ['Red', 'Blue', 'Green', 'Yellow']
class SetCap(object):
def __init__(self, capName, capValue):
self.capName = capName
self.capValue = capValue
class GetCaps(object):
def __init__(self):
self.caps = None
self.reqs = None
class ColorActorBase(Actor):
"""This actor has a particular color (identified by self.color), and
requires that color to be a capability of the ActorSystem it runs in.
If given a string message, returns it with "Got: " prefixed to
the string.
If given a tuple message, the tuple should be a series of
colors (strings), ending with a text message. It will forward
the tuple to the sub-actor specified by the first color in the
tuple (removing that color from the tuple); the last sub-actor
to receive the message will send it back to the original sender
(which was appended to the tuple by the first recipient).
"""
def __init__(self):
self._subs = {}
def receiveMessage(self, msg, sender):
if type(msg) == type("hi"):
self.send(sender, "Got: " + msg)
elif isinstance(msg, SetCap):
self.updateCapability(msg.capName, msg.capValue)
self.send(sender, 'ok')
elif type(msg) == type((1,2)):
if type(msg[-1]) == type(""):
msg = tuple(list(msg) + [sender])
if len(msg) > 2:
fwdTo = msg[0]
fwdMsg = tuple(list(msg)[1:])
if fwdTo not in self._subs:
self._subs[fwdTo] = self.createActor(fwdTo)
self.send(self._subs[fwdTo], fwdMsg)
else:
self.send(msg[1], msg[0])
elif isinstance(msg, ChildActorExited):
for each in self._subs:
if self._subs[each] == msg.childAddress:
del self._subs[each]
break
elif isinstance(msg, GetCaps):
msg.caps = getattr(self, 'init_caps', '<no caps avail>')
msg.reqs = getattr(self, 'init_reqs', '<no reqs avail>')
self.send(sender, msg)
class RedActor(ColorActorBase):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return capabilities.get('Red', False)
def __init__(self, capabilities):
self.init_caps = capabilities
super(RedActor, self).__init__()
class GreenActor(ColorActorBase):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return capabilities.get('Green', False)
def __init__(self, requirements):
self.init_reqs = requirements
super(GreenActor, self).__init__()
class BlueActor(ColorActorBase):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return capabilities.get('Blue', False)
def __init__(self, requirements, capabilities):
self.init_reqs = requirements
self.init_caps = capabilities
super(BlueActor, self).__init__()
class OrangeActor(ColorActorBase):
# This actor has no actorSystemCapabilityCheck, but it still can
# see the capabilities and requirements
def __init__(self, capabilities, requirements):
self.init_reqs = requirements
self.init_caps = capabilities
super(OrangeActor, self).__init__()
class PurpleActor(ColorActorBase):
# simple, no requirements, just anywhere
pass
@pytest.fixture
def asys_trio(request, asys):
asys2 = similar_asys(asys, in_convention=True, start_wait=False)
asys3 = similar_asys(asys, in_convention=True)
request.addfinalizer(lambda asys2=asys2, asys3=asys3:
asys2.shutdown() == asys3.shutdown())
return (asys, asys2, asys3)
class TestFuncSingleSystemCapabilityUpdates(object):
def test00_systemUpdatable(self, asys):
asys.updateCapability('Colors', ['Red', 'Blue', 'Green'])
asys.updateCapability('Here', True)
asys.updateCapability('Here')
def test01_actorUpdatable(self, asys):
orange = asys.createActor(OrangeActor)
assert 'ok' == asys.ask(orange, SetCap('Blue', True), 1)
class TestFuncCapabilityUpdates(object):
def test00_systemsRunnable(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
def test01_defaultSystemsDoNotSupportColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
raises(NoCompatibleSystemForActor, asys1.createActor, RedActor)
raises(NoCompatibleSystemForActor, asys1.createActor, BlueActor)
raises(NoCompatibleSystemForActor, asys1.createActor, GreenActor)
def test02_addColorCapabilitiesAllowsColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
# Setup Systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create one actor in each system
red = asys1.createActor(RedActor, targetActorRequirements={'hue': 'red'})
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor, targetActorRequirements={'hue': 'blue'})
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
# Check the actors knowledge of capabilities and requirements
gc = asys1.ask(red, GetCaps(), 1)
assert gc.caps['Red']
assert not gc.caps.get('Green', False)
assert not gc.caps.get('Blue', False)
assert 'Thespian Version' in gc.caps
assert 'Thespian Generation' in gc.caps
assert gc.reqs == '<no reqs avail>'
gc = asys1.ask(green, GetCaps(), 1)
assert gc.caps == '<no caps avail>'
assert gc.reqs is None
gc = asys1.ask(blue, GetCaps(), 1)
assert gc.caps['Blue']
assert not gc.caps.get('Green', False)
assert not gc.caps.get('Red', False)
assert 'Thespian Version' in gc.caps
assert 'Thespian Generation' in gc.caps
assert gc.reqs['hue'] == 'blue'
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
def test02_1_addColorCapabilitiesAllowsColorActorsAndSubActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
# Setup Systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create one actor in each system
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
orange = asys1.createActor(OrangeActor)
purple = asys1.createActor(PurpleActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert orange is not None
assert purple is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
assert isinstance(orange, ActorAddress)
assert isinstance(purple, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
assert "Got: aloha" == asys1.ask(orange, 'aloha', 1)
assert "Got: aloha" == asys1.ask(purple, 'aloha', 1)
# Check the actors knowledge of capabilities and requirements
gc = asys1.ask(orange, GetCaps(), 1)
assert gc.caps['Red']
assert not gc.caps.get('Green', False)
assert not gc.caps.get('Blue', False)
assert 'Thespian Version' in gc.caps
assert 'Thespian Generation' in gc.caps
assert gc.reqs is None
gc = asys1.ask(purple, GetCaps(), 1)
assert gc.caps == '<no caps avail>'
assert gc.reqs == '<no reqs avail>'
# Create a chain of multiple colors from each top level
assert "path1" == asys1.ask(red, (BlueActor, GreenActor, RedActor,
GreenActor, BlueActor, RedActor,
"path1"),
MAX_ASK_WAIT_PERIOD)
assert "path2" == asys1.ask(green, (BlueActor, GreenActor, RedActor,
GreenActor, BlueActor, RedActor,
"path2"),
MAX_ASK_WAIT_PERIOD)
assert "path3" == asys1.ask(blue, (BlueActor, GreenActor, RedActor,
GreenActor, OrangeActor, BlueActor,
RedActor,
"path3"),
MAX_ASK_WAIT_PERIOD)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
# Created a long path, so allow time for actor exits to
# propagate
exit_wait()
exit_wait()
exit_wait()
def test03_addMultipleColorCapabilitiesToOneActorSystemAllowsColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys2.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors (two in system Two)
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
def test04_addMultipleColorCapabilitiesToLeaderActorSystemAllowsColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
# Setup systems
asys1.updateCapability('Red', True)
asys1.updateCapability('Green', True)
asys1.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors (all in system One)
red = asys1.createActor(RedActor)
green = asys1.createActor('thespian.test.test_updateSystemCapabilities.GreenActor')
blue = asys1.createActor(BlueActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
def test04_1_actorAddCapabilitiesEnablesOtherActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
# Setup system (only one needed, because an Actor can only
# modify its own system)
asys1.updateCapability('Red', True)
# Create Actors (all in system One)
red = asys1.createActor(RedActor)
raises(NoCompatibleSystemForActor, asys1.createActor, BlueActor)
raises(NoCompatibleSystemForActor, asys1.createActor, GreenActor)
orange = asys1.createActor(OrangeActor)
# Verify actors are responsive
assert "Got: Hello" == asys1.ask(red, 'Hello', 1)
assert "Got: Aloha" == asys1.ask(orange, 'Aloha', 1)
# Now have Red add a couple of capabilities
assert 'ok' == asys1.ask(red, SetCap('Green', True), 1)
assert 'ok' == asys1.ask(red, SetCap('Blue', True), 1)
time.sleep(0.1) # allow actor to process these messages
# And create some Actors needing those capabilities
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
assert "Got: Aloha" == asys1.ask(orange, 'Aloha', 1)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
def test05_removingColorCapabilitiesKillsExistingColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
orange = asys1.createActor(OrangeActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert orange is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
assert isinstance(orange, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
assert "Got: aloha" == asys1.ask(orange, 'aloha', 1)
# Remove color capabilities from ActorSystems
asys1.updateCapability('Red', None)
asys2.updateCapability('Green', None)
asys3.updateCapability('Blue', None)
update_wait() # processing time allowance
update_wait()
update_wait()
# Verify all Actors are no longer present.
assert asys1.ask(red, '1', 1) is None
assert asys1.ask(green, '2', 1) is None
assert asys1.ask(blue, '3', 1) is None
assert "Got: aloha" == asys1.ask(orange, 'aloha', 1)
exit_wait()
def test05_1_removingColorCapabilitiesViaActorKillsExistingColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
orange = asys1.createActor(OrangeActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert orange is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
assert isinstance(orange, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
assert "Got: aloha" == asys1.ask(orange, 'aloha', 1)
# Remove color capabilities from ActorSystems
assert 'ok' == asys1.ask(red, SetCap('Red', False), 1)
assert 'ok' == asys1.ask(blue, SetCap('Blue', False), 1)
update_wait() # allow actor to process these messages
update_wait()
# Verify affected Actors are no longer present.
assert asys1.ask(red, '1', 1) is None
assert "Got: Howdy" == asys1.ask(green, 'Howdy', 1)
assert asys1.ask(blue, '3', 1) is None
assert "Got: aloha" == asys1.ask(orange, 'aloha', 1)
# Tell actors to exit
asys1.tell(green, ActorExitRequest())
asys1.tell(orange, ActorExitRequest())
exit_wait()
def test06_removingColorCapabilitiesPreventsNewColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
max_wait = 0.3
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', MAX_ASK_WAIT_PERIOD)
assert "Got: howdy" == asys1.ask(green, 'howdy', MAX_ASK_WAIT_PERIOD)
assert "Got: greetings" == asys1.ask(blue, 'greetings', MAX_ASK_WAIT_PERIOD)
# Remove one Capability and verify that all Actors created via that ActorSystem are removed
asys3.updateCapability('Blue', None)
update_wait()
assert asys1.ask(blue, 'yono', max_wait) is None
assert "Got: hellono" == asys1.ask(red, 'hellono', MAX_ASK_WAIT_PERIOD)
assert "Got: hino" == asys1.ask(green, 'hino', MAX_ASK_WAIT_PERIOD)
asys1.updateCapability('Red', None)
update_wait() # wait for capability update to propagate
assert asys1.ask(red, 'hello', max_wait) is None
assert 'Got: hi' == asys1.ask(green, 'hi', max_wait)
assert asys1.ask(blue, 'yo', max_wait) is None
# Verify no Actors requiring the removed capabilities can be
# created, but other kinds can still be created.
raises(NoCompatibleSystemForActor, asys1.createActor, RedActor)
red = None
green = asys1.createActor(GreenActor)
raises(NoCompatibleSystemForActor, asys1.createActor, BlueActor)
# Add back the Blue capability and verify the Actor can now be created
asys3.updateCapability('Blue', True)
update_wait()
blue = asys1.createActor(BlueActor)
assert red is None
assert green is not None
assert blue is not None
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
assert "Got: howdy howdy" == asys1.ask(green, 'howdy howdy', MAX_ASK_WAIT_PERIOD)
assert "Got: greetings all" == asys1.ask(blue, 'greetings all',
MAX_ASK_WAIT_PERIOD)
assert asys1.ask(blue, (RedActor, 'hey, red'), max_wait) is None
assert "hey, blue" == asys1.ask(green, (BlueActor, 'hey, blue'),
MAX_ASK_WAIT_PERIOD*10)
assert "hey, green" == asys1.ask(blue, (GreenActor, 'hey, green'),
MAX_ASK_WAIT_PERIOD*10)
# Remove remaining capabilities
asys2.updateCapability('Green', None)
assert 'ok' == asys1.ask(blue, SetCap('Blue', None), 1)
update_wait()
# No new actors can be created for any color
raises(NoCompatibleSystemForActor, asys1.createActor, RedActor)
raises(NoCompatibleSystemForActor, asys1.createActor, BlueActor)
raises(NoCompatibleSystemForActor, asys1.createActor, GreenActor)
def test07_removingNonExistentCapabilitiesHasNoEffect(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', MAX_ASK_WAIT_PERIOD)
assert "Got: howdy" == asys1.ask(green, 'howdy', MAX_ASK_WAIT_PERIOD)
assert "Got: greetings" == asys1.ask(blue, 'greetings', MAX_ASK_WAIT_PERIOD)
assert 'long path' == asys1.ask(blue, (RedActor, GreenActor, RedActor,
BlueActor, GreenActor,
'long path'),
MAX_ASK_WAIT_PERIOD)
# Verify sub-actors are responsive
assert 'bluered' == asys1.ask(blue, (RedActor, 'bluered'), MAX_ASK_WAIT_PERIOD)
assert "greenblue" == asys1.ask(green, (BlueActor, 'greenblue'),
MAX_ASK_WAIT_PERIOD)
assert "bluegreen" == asys1.ask(blue, (GreenActor, 'bluegreen'),
MAX_ASK_WAIT_PERIOD)
# Remove non-color capabilities from ActorSystems
asys1.updateCapability('Frog', None)
update_wait()
assert 'ok' == asys1.ask(blue, SetCap('Bark', None), MAX_ASK_WAIT_PERIOD)
asys1.updateCapability('Cow', None)
update_wait()
# Verify actors are still responsive
assert "Got: hello" == asys1.ask(red, 'hello', MAX_ASK_WAIT_PERIOD)
assert "Got: howdy" == asys1.ask(green, 'howdy', MAX_ASK_WAIT_PERIOD)
assert "Got: greetings" == asys1.ask(blue, 'greetings', MAX_ASK_WAIT_PERIOD)
# Verify sub-actors are still responsive
assert 'hey, red' == asys1.ask(blue, (RedActor, 'hey, red'), MAX_ASK_WAIT_PERIOD)
assert "howdy howdy" == asys1.ask(green, (BlueActor, 'howdy howdy'),
MAX_ASK_WAIT_PERIOD)
assert "greetings all" == asys1.ask(red, (GreenActor, 'greetings all'),
MAX_ASK_WAIT_PERIOD)
# Verify new sub-actors can be created
assert 'long path' == asys1.ask(blue, (RedActor, GreenActor, RedActor,
BlueActor, GreenActor,
'long path'),
MAX_ASK_WAIT_PERIOD)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
# Created a long path, so allow time for actor exits to
# propagate
exit_wait()
exit_wait()
exit_wait()
def test08_settingCapabilityToSameValueHasNoEffect(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
max_wait = 0.9
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
# Verify sub-actors are responsive
assert 'hey, red' == asys1.ask(blue, (RedActor, 'hey, red'), MAX_ASK_WAIT_PERIOD)
assert "howdy howdy" == asys1.ask(green, (GreenActor, 'howdy howdy'),
MAX_ASK_WAIT_PERIOD)
assert "greetings all" == asys1.ask(red, (BlueActor, 'greetings all'),
MAX_ASK_WAIT_PERIOD)
# Remove non-color capabilities from ActorSystems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
assert 'ok' == asys1.ask(blue, SetCap('Blue', True), MAX_ASK_WAIT_PERIOD)
# Verify actors are still responsive
assert "Got: hello" == asys1.ask(red, 'hello', MAX_ASK_WAIT_PERIOD)
assert "Got: howdy" == asys1.ask(green, 'howdy', MAX_ASK_WAIT_PERIOD)
assert "Got: greetings" == asys1.ask(blue, 'greetings', MAX_ASK_WAIT_PERIOD)
# Verify sub-actors are still responsive
assert 'hey, red' == asys1.ask(blue, (RedActor, 'hey, red'), MAX_ASK_WAIT_PERIOD)
assert "howdy howdy" == asys1.ask(green, (RedActor, 'howdy howdy'),
MAX_ASK_WAIT_PERIOD)
assert "greetings all" == asys1.ask(red, (BlueActor, 'greetings all'),
MAX_ASK_WAIT_PERIOD)
# Verify new sub-actors can be created
assert 'long path' == asys1.ask(blue, (RedActor, GreenActor, RedActor,
BlueActor, GreenActor,
'long path'),
MAX_ASK_WAIT_PERIOD)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
# Created a long path, so allow time for actor exits to
# propagate
exit_wait()
exit_wait()
exit_wait()
def test09_removingCapabilityTwiceHasNoEffectTheSecondTime(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1, "simpleSystemBase", "multiprocQueueBase")
max_wait = 0.4
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors
red = asys1.createActor(RedActor)
green = asys1.createActor('thespian.test.test_updateSystemCapabilities.GreenActor')
blue = asys1.createActor(BlueActor)
# Verify got valid ActorAddresses
assert red is not None
assert green is not None
assert blue is not None
assert isinstance(red, ActorAddress)
assert isinstance(green, ActorAddress)
assert isinstance(blue, ActorAddress)
# Verify actors are responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
# Verify sub-actors are responsive
assert 'hey, red' == asys1.ask(blue, (RedActor, 'hey, red'), MAX_ASK_WAIT_PERIOD)
assert "howdy howdy" == asys1.ask(green, (BlueActor, 'howdy howdy'),
MAX_ASK_WAIT_PERIOD)
# assert "greetings all" == asys1.ask(red, (BlueActor, 'greetings all'), max_wait)
# Remove color capabilities from two ActorSystems
asys2.updateCapability('Green')
asys3.updateCapability('Blue')
# Verify can no longer create associated Actors
# Note: removing Blue from Three should have cause red's
# BlueActor child to exit. If it did, the next assertNone
# will pass.
assert asys1.ask(red, (BlueActor, 'hello'), 1) is None
assert asys1.ask(red, (GreenActor, 'greetings'), 1) is None
# Verify can still create Actors where attributes remain
assert 'go time' == asys1.ask(red, (RedActor, 'go time'), 1)
# Remove color capabilities from two ActorSystems AGAIN
asys2.updateCapability('Green')
asys3.updateCapability('Blue')
# Verify can no longer create associated Actors
assert asys1.ask(red, (BlueActor, 'hello'), 1) is None
assert asys1.ask(red, (GreenActor, 'greetings'), 1) is None
# Verify can still create Actors where attributes remain
assert 'go time' == asys1.ask(red, (RedActor, 'go time'), 1)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
# test: removing capability via None value is the same as no value
def test10_removingColorCapabilitiesOnOtherActorSystemsDoesNotAffectExistingColorActors(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
# Setup systems
asys1.updateCapability('Red', True)
asys2.updateCapability('Green', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
update_wait()
# Create Actors in those systems
red = asys1.createActor(RedActor)
green = asys1.createActor(GreenActor)
blue = asys1.createActor(BlueActor)
# Remove (non-existent) capabilities from other systems
asys3.updateCapability('Red', None)
asys1.updateCapability('Green', None)
asys2.updateCapability('Blue', None)
# Verify actors are still responsive
assert "Got: hello" == asys1.ask(red, 'hello', 1)
assert "Got: howdy" == asys1.ask(green, 'howdy', 1)
assert "Got: greetings" == asys1.ask(blue, 'greetings', 1)
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(green, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
def _actorCount(self, asys1, startAddr):
s = asys1.ask(startAddr, Thespian_StatusReq(), 1)
assert isinstance(s, Thespian_ActorStatus)
return 1 + sum([self._actorCount(asys1, C) for C in s.childActors])
def test11_allSubActorsNotifiedOfCapabilityChanges(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
# Setup systems
asys1.updateCapability('Red', True)
asys1.updateCapability('Green', True)
asys1.updateCapability('Blue', True)
asys1.updateCapability('Orange', True)
update_wait()
# Create Actors in those systems
red = asys1.createActor(RedActor)
assert 'long path' == asys1.ask(red, (GreenActor, RedActor,
OrangeActor, BlueActor,
GreenActor,
'long path'),
MAX_ASK_WAIT_PERIOD)
assert 6 == self._actorCount(asys1, red)
# Now remove a capability needed by a deep sub-Actor and
# verify that sub-Actor (and it's children) are gone.
asys1.updateCapability('Blue')
update_wait()
assert 4 == self._actorCount(asys1, red)
asys1.tell(red, ActorExitRequest())
exit_wait()
def test11_1_capabilityRemovalOnlyAffectsOneSystem(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
# Creates sub-actors in another system. Removal of a
# capability on the current environment should not cause
# impact to sub-actors in another environment
# Setup systems
asys2.updateCapability('Green', True)
asys2.updateCapability('Red', True)
asys2.updateCapability('Blue', True)
update_wait() # wait for hysteresis delay of multiple updates
update_wait()
update_wait()
update_wait()
update_wait()
# Create parent in system one with child in system two
parent = asys1.createActor(OrangeActor)
r = asys1.ask(parent, (RedActor, "red"), MAX_ASK_WAIT_PERIOD)
assert "red" == r
r = self._actorCount(asys1, parent)
assert 2 == r
# Add capability associated with child in primary system
asys1.updateCapability('Red', True)
update_wait() # allow capabilities to update
# Remove capability associated with child from primary system;
# this should not cause the child to exit because it is still
# in a valid system.
asys1.updateCapability('Red', None)
update_wait() # allow capabilities to update
assert 2 == self._actorCount(asys1, parent)
# Removal of the capability in the system hosting the child does cause the child to exit
asys2.updateCapability('Red', None)
update_wait() # allow capabilities to update
assert 1 == self._actorCount(asys1, parent)
asys1.tell(parent, ActorExitRequest())
exit_wait()
def test12_updateCapabilitiesAffectsActorDrivenCreateRequests(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
max_wait = 0.95
# Setup systems
asys1.updateCapability('Red', True)
asys3.updateCapability('Blue', True)
update_wait() # Allow for propagation (with hysteresis)
update_wait()
# Create Actors in those systems
red = asys1.createActor(RedActor)
blue = asys1.createActor(BlueActor)
raises(NoCompatibleSystemForActor, asys1.createActor, GreenActor)
# Verify a sub-actor cannot create another sub-actor that
# requires a capability that isn't present (fails to create a
# GreenActor).
assert asys1.ask(blue, (RedActor, GreenActor, RedActor, OrangeActor,
BlueActor, GreenActor,
'long path'),
max_wait) is None
# Now have Red add a couple of capabilities
assert 'ok' == asys1.ask(red, SetCap('Green', True), 1)
update_wait() # allow capabilities to settle
# Verify that added capability enables a sub-actor to creat new Actors
assert 'long path' == asys1.ask(blue, (RedActor, GreenActor,
RedActor, OrangeActor,
BlueActor, GreenActor,
'long path'),
MAX_ASK_WAIT_PERIOD)
# Remove that capability again
assert 'ok' == asys1.ask(red, SetCap('Green', None), 1)
time.sleep(max_wait) # allow capabilities to settle
# Now verify that sub-actor cannot create Green actors again
assert asys1.ask(blue, (RedActor, GreenActor, RedActor,
BlueActor, GreenActor,
'long path'),
max_wait) is None
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
exit_wait()
def test13_removeOriginalCapabilitiesAffectsActorDrivenCreateRequests(self, asys_trio):
asys1, asys2, asys3 = asys_trio
actor_system_unsupported(asys1,
"simpleSystemBase",
"multiprocQueueBase")
max_wait = 0.5
# Setup systems
asys1.updateCapability('Red', True)
asys1.updateCapability('Blue', True)
asys1.updateCapability('Green', True) # same system
# Create Actors in those systems
red = asys1.createActor(RedActor)
blue = asys1.createActor(BlueActor)
assert 'long path' == asys1.ask(blue, (GreenActor, RedActor,
BlueActor, GreenActor,
'long path'),
MAX_ASK_WAIT_PERIOD)
# Remove an originally-existing capability
assert 'ok' == asys1.ask(red, SetCap('Green', None), 1)
update_wait() # allow capabilities to settle
# Now verify that sub-actor cannot create Green actors anymore
assert asys1.ask(blue, (GreenActor, RedActor, BlueActor, GreenActor,
'long path'),
max_wait) is None
# Tell actors to exit
asys1.tell(red, ActorExitRequest())
asys1.tell(blue, ActorExitRequest())
exit_wait()
exit_wait()
# test can create lots of various sub-actors, ensuring the capabilities are plumbed
# test creation of subactor failure, then add capability, then subactor can be created
# test creation of subactor, then removal of capability, then recreation elsewhere works
# n.b. no test for MultiprocQueue because it does not support conventions
|
|
#from builtins import range
from collections import namedtuple
from datetime import datetime
import csv
import math
import time
import tensorflow.python.platform
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 64,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('forward_only', False,
"""Only run the forward pass.""")
tf.app.flags.DEFINE_boolean('forward_backward_only', False,
"""Only run the forward-forward pass.""")
tf.app.flags.DEFINE_string('data_format', 'NCHW',
"""The data format for Convnet operations.
Can be either NHWC or NCHW.
""")
tf.app.flags.DEFINE_string('csv_file', '',
"""File to output timing information to in csv
format. If not file is passed in, csv file will
not be cteated.
""")
parameters = []
conv_counter = 1
pool_counter = 1
affine_counter = 1
TimingEntry = namedtuple(
'TimingEntry', ['info_string', 'timestamp', 'num_batches', 'mean', 'sd'])
def _conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType):
global conv_counter
global parameters
name = 'conv' + str(conv_counter)
conv_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(tf.truncated_normal([kH, kW, nIn, nOut],
dtype=tf.float32,
stddev=1e-1), name='weights')
if FLAGS.data_format == 'NCHW':
strides = [1, 1, dH, dW]
else:
strides = [1, dH, dW, 1]
conv = tf.nn.conv2d(inpOp, kernel, strides, padding=padType,
data_format=FLAGS.data_format)
biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.reshape(tf.nn.bias_add(conv, biases,
data_format=FLAGS.data_format),
conv.get_shape())
conv1 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
return conv1
def _affine(inpOp, nIn, nOut):
global affine_counter
global parameters
name = 'affine' + str(affine_counter)
affine_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(tf.truncated_normal([nIn, nOut],
dtype=tf.float32,
stddev=1e-1), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
trainable=True, name='biases')
affine1 = tf.nn.relu_layer(inpOp, kernel, biases, name=name)
parameters += [kernel, biases]
return affine1
def _mpool(inpOp, kH, kW, dH, dW, padding):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
if FLAGS.data_format == 'NCHW':
ksize = [1, 1, kH, kW]
strides = [1, 1, dH, dW]
else:
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.max_pool(inpOp,
ksize=ksize,
strides=strides,
padding=padding,
data_format=FLAGS.data_format,
name=name)
def _apool(inpOp, kH, kW, dH, dW, padding):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
if FLAGS.data_format == 'NCHW':
ksize = [1, 1, kH, kW]
strides = [1, 1, dH, dW]
else:
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.avg_pool(inpOp,
ksize=ksize,
strides=strides,
padding=padding,
data_format=FLAGS.data_format,
name=name)
def _inception(inp, inSize, o1s, o2s1, o2s2, o3s1, o3s2, o4s1, o4s2):
conv1 = _conv(inp, inSize, o1s, 1, 1, 1, 1, 'SAME')
conv3_ = _conv(inp, inSize, o2s1, 1, 1, 1, 1, 'SAME')
conv3 = _conv(conv3_, o2s1, o2s2, 3, 3, 1, 1, 'SAME')
conv5_ = _conv(inp, inSize, o3s1, 1, 1, 1, 1, 'SAME')
conv5 = _conv(conv5_, o3s1, o3s2, 5, 5, 1, 1, 'SAME')
pool_ = _mpool(inp, o4s1, o4s1, 1, 1, 'SAME')
pool = _conv(pool_, inSize, o4s2, 1, 1, 1, 1, 'SAME')
if FLAGS.data_format == 'NCHW':
channel_dim = 1
else:
channel_dim = 3
incept = tf.concat([conv1, conv3, conv5, pool], channel_dim )
return incept
def loss(logits, labels):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
concated = tf.concat([indices, labels], 1 )
onehot_labels = tf.sparse_to_dense(
concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=onehot_labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def inference(images):
conv1 = _conv (images, 3, 64, 7, 7, 2, 2, 'SAME')
pool1 = _mpool(conv1, 3, 3, 2, 2, 'SAME')
conv2 = _conv (pool1, 64, 64, 1, 1, 1, 1, 'SAME')
conv3 = _conv (conv2, 64, 192, 3, 3, 1, 1, 'SAME')
pool3 = _mpool(conv3, 3, 3, 2, 2, 'SAME')
incept3a = _inception(pool3, 192, 64, 96, 128, 16, 32, 3, 32)
incept3b = _inception(incept3a, 256, 128, 128, 192, 32, 96, 3, 64)
pool4 = _mpool(incept3b, 3, 3, 2, 2, 'SAME')
incept4a = _inception(pool4, 480, 192, 96, 208, 16, 48, 3, 64)
incept4b = _inception(incept4a, 512, 160, 112, 224, 24, 64, 3, 64)
incept4c = _inception(incept4b, 512, 128, 128, 256, 24, 64, 3, 64)
incept4d = _inception(incept4c, 512, 112, 144, 288, 32, 64, 3, 64)
incept4e = _inception(incept4d, 528, 256, 160, 320, 32, 128, 3, 128)
pool5 = _mpool(incept4e, 3, 3, 2, 2, 'SAME')
incept5a = _inception(pool5, 832, 256, 160, 320, 32, 128, 3, 128)
incept5b = _inception(incept5a, 832, 384, 192, 384, 48, 128, 3, 128)
pool6 = _apool(incept5b, 7, 7, 1, 1, 'VALID')
resh1 = tf.reshape(pool6, [-1, 1024])
affn1 = _affine(resh1, 1024, 1000)
return affn1
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
if not isinstance(target, list):
target = [target]
target_op = tf.group(*target)
for i in range(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target_op)
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
print ('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, FLAGS.num_batches, mn, sd))
return TimingEntry(info_string, datetime.now(), FLAGS.num_batches, mn, sd)
def store_data_in_csv(timing_entries):
with open(FLAGS.csv_file, 'wb') as csvfile:
writer = csv.writer(csvfile)
for timing_entry in timing_entries:
writer.writerow(
[timing_entry.info_string, timing_entry.timestamp,
timing_entry.num_batches, timing_entry.mean, timing_entry.sd])
def run_benchmark():
global parameters
timing_entries = []
with tf.Graph().as_default():
# Generate some dummy images.
image_size = 224
if FLAGS.data_format == 'NCHW':
image_shape = [FLAGS.batch_size, 3, image_size, image_size]
else:
image_shape = [FLAGS.batch_size, image_size, image_size, 3]
images = tf.Variable(tf.random_normal(image_shape,
dtype=tf.float32,
stddev=1e-1))
labels = tf.Variable(tf.ones([FLAGS.batch_size],
dtype=tf.int32))
# Build a Graph that computes the logits predictions from the
# inference model.
last_layer = inference(images)
# Build an initialization operation.
init = tf.global_variables_initializer()
# Start running operations on the Graph.
sess = tf.Session('')
sess.run(init)
run_forward = True
run_forward_backward = True
if FLAGS.forward_only and FLAGS.forward_backward_only:
raise ValueError("Cannot specify --forward_only and "
"--forward_backward_only at the same time.")
if FLAGS.forward_only:
run_forward_backward = False
elif FLAGS.forward_backward_only:
run_forward = False
if run_forward:
# Run the forward benchmark.
timing_entries.append(time_tensorflow_run(sess, last_layer, "Forward"))
if run_forward_backward:
# Add a simple objective so we can calculate the backward pass.
objective = loss(last_layer, labels)
# Compute the gradient with respect to all the parameters.
grad = tf.gradients(objective, parameters)
# Run the backward benchmark.
timing_entries.append(time_tensorflow_run(sess, grad, "Forward-backward"))
if FLAGS.csv_file:
store_data_in_csv(timing_entries)
def main(_):
run_benchmark()
if __name__ == '__main__':
tf.app.run()
|
|
# -*- coding: utf-8 -*-
"""
Dynamic DynamoDB
Auto provisioning functionality for Amazon Web Service DynamoDB tables.
APACHE LICENSE 2.0
Copyright 2013-2014 Sebastian Dahlgren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sys
import time
import calendar
from datetime import timedelta, datetime
from dateutil.relativedelta import relativedelta
from boto.exception import JSONResponseError, BotoServerError
from dynamic_dynamodb.aws import dynamodb
from dynamic_dynamodb.core import gsi, table
from dynamic_dynamodb.daemon import Daemon
from dynamic_dynamodb.config_handler import get_global_option, get_table_option, get_configured_rotated_key_names, get_configured_time_series_key_names
from dynamic_dynamodb.log_handler import LOGGER as logger
CHECK_STATUS = {
'tables': {},
'gsis': {}
}
class DynamicDynamoDBDaemon(Daemon):
""" Daemon for Dynamic DynamoDB"""
def run(self):
""" Run the daemon
:type check_interval: int
:param check_interval: Delay in seconds between checks
"""
try:
while True:
execute()
except Exception as error:
logger.exception(error)
def main():
""" Main function called from dynamic-dynamodb """
try:
if get_global_option('daemon'):
daemon = DynamicDynamoDBDaemon(
'{0}/dynamic-dynamodb.{1}.pid'.format(
get_global_option('pid_file_dir'),
get_global_option('instance')))
if get_global_option('daemon') == 'start':
logger.debug('Starting daemon')
try:
daemon.start()
logger.info('Daemon started')
except IOError as error:
logger.error('Could not create pid file: {0}'.format(error))
logger.error('Daemon not started')
elif get_global_option('daemon') == 'stop':
logger.debug('Stopping daemon')
daemon.stop()
logger.info('Daemon stopped')
sys.exit(0)
elif get_global_option('daemon') == 'restart':
logger.debug('Restarting daemon')
daemon.restart()
logger.info('Daemon restarted')
elif get_global_option('daemon') in ['foreground', 'fg']:
logger.debug('Starting daemon in foreground')
daemon.run()
logger.info('Daemon started in foreground')
else:
print(
'Valid options for --daemon are start, '
'stop, restart, and foreground')
sys.exit(1)
else:
if get_global_option('run_once'):
execute()
else:
while True:
execute()
except Exception as error:
logger.exception(error)
def create_time_series_tables(rotate_suffix, rotate_window_start, rotate_scavenge, table_name, table_key, tables_and_gsis, next_table_names):
day_of_month = datetime.utcnow().day
sourcedate = datetime.utcnow()
first_day_next_month = datetime(sourcedate.year, sourcedate.month, 1) + relativedelta(months=1)
rotate_datetime = first_day_next_month - timedelta(seconds=rotate_window_start)
logger.info("rotate_datetime " + unicode(rotate_datetime))
next_check = datetime.utcnow() + timedelta(seconds=get_global_option( 'check_interval' ))
logger.info("next_check " + unicode(next_check))
cur_table_name = table_name + sourcedate.strftime( rotate_suffix )
logger.info('cur_table_name ' + cur_table_name)
next_table_name = table_name + first_day_next_month.strftime( rotate_suffix )
logger.info('next_table_name ' + next_table_name)
dynamodb.ensure_created( cur_table_name, table_name )
tables_and_gsis.add( ( cur_table_name, table_key ) )
# check if the next check is after the rotate date time and create the next time series table
if next_check > rotate_datetime:
dynamodb.ensure_created( next_table_name, table_name )
next_table_names.add( ( next_table_name, cur_table_name, table_key ) )
# delete old existing table name
delete_utc_datetime = sourcedate - relativedelta(months=rotate_scavenge)
logger.info('delete_utc_datetime ' + unicode(delete_utc_datetime))
existing_table_names = dynamodb.get_rotated_table_names( table_name )
for existing_table_name in existing_table_names:
existing_utc_datetime_str = existing_table_name[ len (table_name) : ]
if existing_utc_datetime_str.startswith("_"):
continue
try:
existing_utc_datetime = datetime.strptime( existing_utc_datetime_str, rotate_suffix )
logger.info(unicode(existing_utc_datetime))
if existing_utc_datetime < delete_utc_datetime:
logger.info('existing_table_name ' + existing_table_name)
dynamodb.ensure_deleted( existing_table_name )
except ValueError:
logger.warn( 'Could not parse date (with {0} format) from {1} for table {2}'.format(
rotate_suffix,
existing_utc_datetime_str,
existing_table_name ) )
def create_rotating_tables(rotate_suffix, rotate_interval, rotate_window_start, rotate_scavenge, table_name, table_key, tables_and_gsis, next_table_names):
time_delta = timedelta(seconds=rotate_interval)
time_delta_totalseconds = rotate_interval
epoch = datetime.utcfromtimestamp(0)
cur_timedelta = datetime.utcnow() - epoch
cur_timedelta_totalseconds = (cur_timedelta.microseconds + (cur_timedelta.seconds + cur_timedelta.days*24*3600) * 1e6) / 1e6
cur_utc_datetime = datetime.utcnow() - timedelta(seconds=(cur_timedelta_totalseconds%time_delta_totalseconds))
cur_table_name = table_name + cur_utc_datetime.strftime( rotate_suffix )
dynamodb.ensure_created( cur_table_name, table_name )
tables_and_gsis.add(
( cur_table_name, table_key ) )
next_utc_datetime = cur_utc_datetime + time_delta
if rotate_window_start != None:
rotate_datetime = next_utc_datetime - timedelta(seconds=rotate_window_start)
logger.info("rotate_datetime " + unicode(rotate_datetime))
next_check = datetime.utcnow() + timedelta(seconds=get_global_option( 'check_interval' ))
logger.info("next_check " + unicode(next_check))
if next_check > rotate_datetime:
next_utc_time_delta = cur_utc_datetime + time_delta
next_table_name = table_name + next_utc_time_delta.strftime( rotate_suffix )
dynamodb.ensure_created( next_table_name, table_name )
next_table_names.add( ( next_table_name, cur_table_name, table_key ) )
till_next_timedelta = next_utc_datetime - datetime.utcnow()
till_next_timedelta_totalseconds = (till_next_timedelta.microseconds + (till_next_timedelta.seconds + till_next_timedelta.days*24*3600) * 1e6) / 1e6
logger.info( 'next table delta {0} < {1}'.format( till_next_timedelta_totalseconds, get_global_option('check_interval') ) )
if till_next_timedelta_totalseconds < get_global_option( 'check_interval' ):
next_utc_time_delta = cur_utc_datetime + time_delta
next_table_name = table_name + next_utc_time_delta.strftime( rotate_suffix )
dynamodb.ensure_created( next_table_name, table_name )
next_table_names.add( ( next_table_name, cur_table_name, table_key ) )
prev_utc_datetime = cur_utc_datetime
prev_index = 1
while rotate_scavenge == -1 or prev_index < rotate_scavenge:
prev_utc_datetime = prev_utc_datetime - time_delta
prev_table_name = table_name + prev_utc_datetime.strftime( rotate_suffix )
if dynamodb.exists( prev_table_name ):
tables_and_gsis.add(
( prev_table_name, table_key )
)
elif rotate_scavenge == -1:
break
prev_index += 1
if rotate_scavenge > 0:
delete_utc_datetime = prev_utc_datetime - time_delta
delete_table_name = table_name + delete_utc_datetime.strftime( rotate_suffix )
dynamodb.ensure_deleted( delete_table_name )
existing_table_names = dynamodb.get_rotated_table_names( table_name )
for existing_table_name in existing_table_names:
existing_utc_datetime_str = existing_table_name[ len (table_name) : ]
try:
existing_utc_datetime = datetime.strptime( existing_utc_datetime_str, rotate_suffix )
if existing_utc_datetime < delete_utc_datetime:
dynamodb.ensure_deleted( existing_table_name )
except ValueError:
logger.warn( 'Could not parse date (with {0} format) from {1} for table {2}'.format(
rotate_suffix,
existing_utc_datetime_str,
existing_table_name ) )
def execute():
""" Ensure provisioning """
boto_server_error_retries = 3
# Ensure provisioning
tables_and_gsis = set( dynamodb.get_tables_and_gsis() )
rotated_key_names = get_configured_rotated_key_names()
time_series_key_names = get_configured_time_series_key_names()
next_table_names = set()
for table_name, table_key in sorted(tables_and_gsis):
if table_key in rotated_key_names:
rotate_suffix = get_table_option(table_key, 'rotate_suffix')
rotate_interval = get_table_option(table_key, 'rotate_interval')
rotate_window_start = get_table_option(table_key, 'rotate_window_start')
rotate_scavenge = get_table_option(table_key, 'rotate_scavenge')
create_rotating_tables(rotate_suffix, rotate_interval, rotate_window_start, rotate_scavenge, table_name, table_key, tables_and_gsis, next_table_names)
elif table_key in time_series_key_names:
# if rotate rotate_interval_unit is defined, then it should be the day in month or day in week
# if rotate_interval_unit is not defined, default last day is used.
rotate_suffix = get_table_option(table_key, 'rotate_suffix')
rotate_interval_unit = get_table_option(table_key, 'rotate_interval_unit')
rotate_window_start = get_table_option(table_key, 'rotate_window_start')
rotate_scavenge = get_table_option(table_key, 'rotate_scavenge')
if rotate_interval_unit == 'month':
create_time_series_tables(rotate_suffix, rotate_window_start, rotate_scavenge, table_name, table_key, tables_and_gsis, next_table_names)
for table_name, table_key in sorted(tables_and_gsis):
try:
table_num_consec_read_checks = \
CHECK_STATUS['tables'][table_name]['reads']
except KeyError:
table_num_consec_read_checks = 0
try:
table_num_consec_write_checks = \
CHECK_STATUS['tables'][table_name]['writes']
except KeyError:
table_num_consec_write_checks = 0
try:
# The return var shows how many times the scale-down criteria
# has been met. This is coupled with a var in config,
# "num_intervals_scale_down", to delay the scale-down
table_num_consec_read_checks, table_num_consec_write_checks = \
table.ensure_provisioning(
table_name,
table_key,
table_num_consec_read_checks,
table_num_consec_write_checks)
CHECK_STATUS['tables'][table_name] = {
'reads': table_num_consec_read_checks,
'writes': table_num_consec_write_checks
}
gsi_names = set()
# Add regexp table names
for gst_instance in dynamodb.table_gsis(table_name):
gsi_name = gst_instance[u'IndexName']
try:
gsi_keys = get_table_option(table_key, 'gsis').keys()
except AttributeError:
# Continue if there are not GSIs configured
continue
for gsi_key in gsi_keys:
try:
if re.match(gsi_key, gsi_name):
logger.debug(
'Table {0} GSI {1} matches '
'GSI config key {2}'.format(
table_name, gsi_name, gsi_key))
gsi_names.add((gsi_name, gsi_key))
except re.error:
logger.error('Invalid regular expression: "{0}"'.format(
gsi_key))
sys.exit(1)
for gsi_name, gsi_key in sorted(gsi_names):
try:
gsi_num_consec_read_checks = \
CHECK_STATUS['gsis'][gsi_name]['reads']
except KeyError:
gsi_num_consec_read_checks = 0
try:
gsi_num_consec_write_checks = \
CHECK_STATUS['gsis'][gsi_name]['writes']
except KeyError:
gsi_num_consec_write_checks = 0
gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
gsi.ensure_provisioning(
table_name,
table_key,
gsi_name,
gsi_key,
gsi_num_consec_read_checks,
gsi_num_consec_write_checks)
CHECK_STATUS['gsis'][gsi_name] = {
'reads': gsi_num_consec_read_checks,
'writes': gsi_num_consec_write_checks
}
except JSONResponseError as error:
exception = error.body['__type'].split('#')[1]
if exception == 'ResourceNotFoundException':
logger.error('{0} - Table {1} does not exist anymore'.format(
table_name,
table_name))
continue
except BotoServerError as error:
if boto_server_error_retries > 0:
logger.error(
'Unknown boto error. Status: "{0}". '
'Reason: "{1}". Message: {2}'.format(
error.status,
error.reason,
error.message))
logger.error(
'Please bug report if this error persists')
boto_server_error_retries -= 1
continue
else:
raise
for next_table_name, cur_table_name, key_name in next_table_names:
cur_table_read_units = dynamodb.get_provisioned_table_read_units( cur_table_name )
cur_table_write_units = dynamodb.get_provisioned_table_write_units( cur_table_name )
dynamodb.update_table_provisioning( next_table_name, key_name, cur_table_read_units, cur_table_write_units)
# Sleep between the checks
if not get_global_option('run_once'):
logger.debug('Sleeping {0} seconds until next check'.format(
get_global_option('check_interval')))
time.sleep(get_global_option('check_interval'))
|
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import Organization, Inventory, InventorySource, Project
@pytest.fixture
def base_inventory():
org = Organization.objects.create(name='test-org')
inv = Inventory.objects.create(name='test-inv', organization=org)
return inv
@pytest.fixture
def project(base_inventory):
return Project.objects.create(
name='test-proj',
organization=base_inventory.organization,
scm_type='git',
scm_url='https://github.com/ansible/test-playbooks.git',
)
@pytest.mark.django_db
def test_inventory_source_create(run_module, admin_user, base_inventory, project):
source_path = '/var/lib/awx/example_source_path/'
result = run_module('tower_inventory_source', dict(
name='foo',
inventory=base_inventory.name,
state='present',
source='scm',
source_path=source_path,
source_project=project.name
), admin_user)
assert result.pop('changed', None), result
inv_src = InventorySource.objects.get(name='foo')
assert inv_src.inventory == base_inventory
result.pop('invocation')
assert result == {
'id': inv_src.id,
'name': 'foo',
}
@pytest.mark.django_db
def test_create_inventory_source_implied_org(run_module, admin_user):
org = Organization.objects.create(name='test-org')
inv = Inventory.objects.create(name='test-inv', organization=org)
# Credential is not required for ec2 source, because of IAM roles
result = run_module('tower_inventory_source', dict(
name='Test Inventory Source',
inventory='test-inv',
source='ec2',
state='present'
), admin_user)
assert result.pop('changed', None), result
inv_src = InventorySource.objects.get(name='Test Inventory Source')
assert inv_src.inventory == inv
result.pop('invocation')
assert result == {
"name": "Test Inventory Source",
"id": inv_src.id,
}
@pytest.mark.django_db
def test_create_inventory_source_multiple_orgs(run_module, admin_user):
org = Organization.objects.create(name='test-org')
Inventory.objects.create(name='test-inv', organization=org)
# make another inventory by same name in another org
org2 = Organization.objects.create(name='test-org-number-two')
inv2 = Inventory.objects.create(name='test-inv', organization=org2)
result = run_module('tower_inventory_source', dict(
name='Test Inventory Source',
inventory=inv2.id,
source='ec2',
state='present'
), admin_user)
assert result.pop('changed', None), result
inv_src = InventorySource.objects.get(name='Test Inventory Source')
assert inv_src.inventory == inv2
result.pop('invocation')
assert result == {
"name": "Test Inventory Source",
"id": inv_src.id,
}
@pytest.mark.django_db
def test_create_inventory_source_with_venv(run_module, admin_user, base_inventory, mocker, project):
path = '/var/lib/awx/venv/custom-venv/foobar13489435/'
source_path = '/var/lib/awx/example_source_path/'
with mocker.patch('awx.main.models.mixins.get_custom_venv_choices', return_value=[path]):
result = run_module('tower_inventory_source', dict(
name='foo',
inventory=base_inventory.name,
state='present',
source='scm',
source_project=project.name,
custom_virtualenv=path,
source_path=source_path
), admin_user)
assert result.pop('changed'), result
inv_src = InventorySource.objects.get(name='foo')
assert inv_src.inventory == base_inventory
result.pop('invocation')
assert inv_src.custom_virtualenv == path
@pytest.mark.django_db
def test_custom_venv_no_op(run_module, admin_user, base_inventory, mocker, project):
"""If the inventory source is modified, then it should not blank fields
unrelated to the params that the user passed.
This enforces assumptions about the behavior of the AnsibleModule
default argument_spec behavior.
"""
source_path = '/var/lib/awx/example_source_path/'
inv_src = InventorySource.objects.create(
name='foo',
inventory=base_inventory,
source_project=project,
source='scm',
custom_virtualenv='/venv/foobar/'
)
# mock needed due to API behavior, not incorrect client behavior
with mocker.patch('awx.main.models.mixins.get_custom_venv_choices', return_value=['/venv/foobar/']):
result = run_module('tower_inventory_source', dict(
name='foo',
description='this is the changed description',
inventory=base_inventory.name,
source='scm', # is required, but behavior is arguable
state='present',
source_project=project.name,
source_path=source_path
), admin_user)
assert result.pop('changed', None), result
inv_src.refresh_from_db()
assert inv_src.custom_virtualenv == '/venv/foobar/'
assert inv_src.description == 'this is the changed description'
@pytest.mark.django_db
def test_falsy_value(run_module, admin_user, base_inventory):
result = run_module('tower_inventory_source', dict(
name='falsy-test',
inventory=base_inventory.name,
source='ec2',
update_on_launch=True
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed', None), result
inv_src = InventorySource.objects.get(name='falsy-test')
assert inv_src.update_on_launch is True
result = run_module('tower_inventory_source', dict(
name='falsy-test',
inventory=base_inventory.name,
# source='ec2',
update_on_launch=False
), admin_user)
inv_src.refresh_from_db()
assert inv_src.update_on_launch is False
# Tests related to source-specific parameters
#
# We want to let the API return issues with "this doesn't support that", etc.
#
# GUI OPTIONS:
# - - - - - - - manual: file: scm: ec2: gce azure_rm vmware sat cloudforms openstack rhv tower custom
# credential ? ? o o r r r r r r r r o
# source_project ? ? r - - - - - - - - - -
# source_path ? ? r - - - - - - - - - -
# verbosity ? ? o o o o o o o o o o o
# overwrite ? ? o o o o o o o o o o o
# overwrite_vars ? ? o o o o o o o o o o o
# update_on_launch ? ? o o o o o o o o o o o
# UoPL ? ? o - - - - - - - - - -
# source_regions ? ? - o o o - - - - - - -
# instance_filters ? ? - o - - o - - - - o -
# group_by ? ? - o - - o - - - - - -
# source_vars* ? ? - o - o o o o o - - -
# environmet vars* ? ? o - - - - - - - - - o
# source_script ? ? - - - - - - - - - - r
#
# UoPL - update_on_project_launch
# * - source_vars are labeled environment_vars on project and custom sources
@pytest.mark.django_db
def test_missing_required_credential(run_module, admin_user, base_inventory):
result = run_module('tower_inventory_source', dict(
name='Test Azure Source',
inventory=base_inventory.name,
source='azure_rm',
state='present'
), admin_user)
assert result.pop('failed', None) is True, result
assert 'Credential is required for a cloud source' in result.get('msg', '')
@pytest.mark.django_db
def test_source_project_not_for_cloud(run_module, admin_user, base_inventory, project):
result = run_module('tower_inventory_source', dict(
name='Test ec2 Inventory Source',
inventory=base_inventory.name,
source='ec2',
state='present',
source_project=project.name
), admin_user)
assert result.pop('failed', None) is True, result
assert 'Cannot set source_project if not SCM type' in result.get('msg', '')
@pytest.mark.django_db
def test_source_path_not_for_cloud(run_module, admin_user, base_inventory):
result = run_module('tower_inventory_source', dict(
name='Test ec2 Inventory Source',
inventory=base_inventory.name,
source='ec2',
state='present',
source_path='where/am/I'
), admin_user)
assert result.pop('failed', None) is True, result
assert 'Cannot set source_path if not SCM type' in result.get('msg', '')
@pytest.mark.django_db
def test_scm_source_needs_project(run_module, admin_user, base_inventory):
result = run_module('tower_inventory_source', dict(
name='SCM inventory without project',
inventory=base_inventory.name,
state='present',
source='scm',
source_path='/var/lib/awx/example_source_path/'
), admin_user)
assert result.pop('failed', None), result
assert 'Project required for scm type sources' in result.get('msg', '')
|
|
#!/usr/bin/env python
import saml2
from saml2 import create_class_from_xml_string, class_name, make_vals, md
from saml2.saml import NameID, Issuer, SubjectLocality, AuthnContextClassRef
from saml2.saml import SubjectConfirmationData, SubjectConfirmation
from saml2.saml import Attribute
from py.test import raises
import saml2_data
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
ITEMS = {
NameID: ["""<?xml version="1.0" encoding="utf-8"?>
<NameID xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
SPProvidedID="sp provided id">
roland@example.com
</NameID>
""", """<?xml version="1.0" encoding="utf-8"?>
<NameID xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
SPNameQualifier="https://foo.example.com/sp"
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">_1632879f09d08ea5ede2dc667cbed7e429ebc4335c</NameID>
""", """<?xml version="1.0" encoding="utf-8"?>
<NameID xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"
NameQualifier="http://authentic.example.com/saml/metadata"
SPNameQualifier="http://auth.example.com/saml/metadata">test
</NameID>"""],
Issuer: """<?xml version="1.0" encoding="utf-8"?>
<Issuer xmlns="urn:oasis:names:tc:SAML:2.0:assertion">
http://www.example.com/test
</Issuer>
""",
SubjectLocality: """<?xml version="1.0" encoding="utf-8"?>
<SubjectLocality xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
Address="127.0.0.1" DNSName="localhost"/>
""",
SubjectConfirmationData:
"""<?xml version="1.0" encoding="utf-8"?>
<SubjectConfirmationData xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
InResponseTo="_1683146e27983964fbe7bf8f08961108d166a652e5"
NotOnOrAfter="2010-02-18T13:52:13.959Z"
NotBefore="2010-01-16T12:00:00Z"
Recipient="http://192.168.0.10/saml/sp" />""",
SubjectConfirmation:
"""<?xml version="1.0" encoding="utf-8"?>
<SubjectConfirmation xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer"><NameID
Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
NameQualifier="http://authentic.example.com/saml/metadata">test@example.com
</NameID>
<SubjectConfirmationData
NotOnOrAfter="2010-02-17T17:02:38Z"
Recipient="http://auth.example.com/saml/proxySingleSignOnRedirect"
InResponseTo="_59B3A01B03334032C31E434C63F89E3E"/></SubjectConfirmation>"""
}
#def pytest_generate_tests(metafunc):
# if "target_class" in metafunc.funcargnames:
# for tcl,xml in ITEMS.items():
# metafunc.addcall(funcargs={"target_class":tcl,"xml_string":xml})
def _eq(l1, l2):
return set(l1) == set(l2)
def test_create_class_from_xml_string_nameid():
kl = create_class_from_xml_string(NameID, ITEMS[NameID][0])
assert kl != None
assert kl.format == "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
assert kl.sp_provided_id == "sp provided id"
assert kl.text.strip() == "roland@example.com"
assert _eq(kl.keyswv(), ['sp_provided_id', 'format', 'text'])
assert class_name(kl) == "urn:oasis:names:tc:SAML:2.0:assertion:NameID"
assert _eq(kl.keys(), ['sp_provided_id', 'sp_name_qualifier',
'name_qualifier', 'format', 'text'])
kl = create_class_from_xml_string(NameID, ITEMS[NameID][1])
assert kl != None
assert kl.format == "urn:oasis:names:tc:SAML:2.0:nameid-format:transient"
assert kl.sp_name_qualifier == "https://foo.example.com/sp"
assert kl.text.strip() == "_1632879f09d08ea5ede2dc667cbed7e429ebc4335c"
assert _eq(kl.keyswv(), ['sp_name_qualifier', 'format', 'text'])
assert class_name(kl) == "urn:oasis:names:tc:SAML:2.0:assertion:NameID"
kl = create_class_from_xml_string(NameID, ITEMS[NameID][2])
assert kl != None
assert kl.format == "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"
assert kl.name_qualifier == "http://authentic.example.com/saml/metadata"
assert kl.sp_name_qualifier == "http://auth.example.com/saml/metadata"
assert kl.text.strip() == "test"
assert _eq(kl.keyswv(), ['sp_name_qualifier', 'format', 'name_qualifier',
'text'])
assert class_name(kl) == "urn:oasis:names:tc:SAML:2.0:assertion:NameID"
def test_create_class_from_xml_string_issuer():
kl = create_class_from_xml_string(Issuer, ITEMS[Issuer])
assert kl != None
assert kl.text.strip() == "http://www.example.com/test"
assert _eq(kl.keyswv(), ['text'])
assert class_name(kl) == "urn:oasis:names:tc:SAML:2.0:assertion:Issuer"
def test_create_class_from_xml_string_subject_locality():
kl = create_class_from_xml_string(SubjectLocality, ITEMS[SubjectLocality])
assert kl != None
assert _eq(kl.keyswv(), ['address', "dns_name"])
assert kl.address == "127.0.0.1"
assert kl.dns_name == "localhost"
assert class_name(
kl) == "urn:oasis:names:tc:SAML:2.0:assertion:SubjectLocality"
def test_create_class_from_xml_string_subject_confirmation_data():
kl = create_class_from_xml_string(SubjectConfirmationData,
ITEMS[SubjectConfirmationData])
assert kl != None
assert _eq(kl.keyswv(), ['in_response_to', 'not_on_or_after',
'not_before', 'recipient'])
assert kl.in_response_to == "_1683146e27983964fbe7bf8f08961108d166a652e5"
assert kl.not_on_or_after == "2010-02-18T13:52:13.959Z"
assert kl.not_before == "2010-01-16T12:00:00Z"
assert kl.recipient == "http://192.168.0.10/saml/sp"
assert class_name(kl) == \
"urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationData"
def test_create_class_from_xml_string_subject_confirmation():
kl = create_class_from_xml_string(SubjectConfirmation,
ITEMS[SubjectConfirmation])
assert kl != None
assert _eq(kl.keyswv(), ['method', 'name_id',
'subject_confirmation_data'])
assert kl.method == "urn:oasis:names:tc:SAML:2.0:cm:bearer"
name_id = kl.name_id
assert _eq(name_id.keyswv(), ['format', 'name_qualifier', 'text'])
assert name_id.format == "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
assert name_id.name_qualifier == "http://authentic.example.com/saml/metadata"
assert name_id.text.strip() == "test@example.com"
subject_confirmation_data = kl.subject_confirmation_data
assert _eq(subject_confirmation_data.keyswv(), ['not_on_or_after',
'recipient',
'in_response_to'])
assert subject_confirmation_data.recipient == \
"http://auth.example.com/saml/proxySingleSignOnRedirect"
assert subject_confirmation_data.not_on_or_after == "2010-02-17T17:02:38Z"
assert subject_confirmation_data.in_response_to == \
"_59B3A01B03334032C31E434C63F89E3E"
assert class_name(kl) == \
"urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmation"
def test_create_class_from_xml_string_wrong_class_spec():
kl = create_class_from_xml_string(SubjectConfirmationData,
ITEMS[SubjectConfirmation])
assert kl == None
def test_ee_1():
ee = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?><foo>bar</foo>""")
assert ee != None
print ee.__dict__
assert ee.attributes == {}
assert ee.tag == "foo"
assert ee.namespace == None
assert ee.children == []
assert ee.text == "bar"
def test_ee_2():
ee = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?><foo id="xyz">bar</foo>""")
assert ee != None
print ee.__dict__
assert ee.attributes == {"id": "xyz"}
assert ee.tag == "foo"
assert ee.namespace == None
assert ee.children == []
assert ee.text == "bar"
def test_ee_3():
ee = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?>
<foo xmlns="urn:mace:example.com:saml:ns"
id="xyz">bar</foo>""")
assert ee != None
print ee.__dict__
assert ee.attributes == {"id": "xyz"}
assert ee.tag == "foo"
assert ee.namespace == "urn:mace:example.com:saml:ns"
assert ee.children == []
assert ee.text == "bar"
def test_ee_4():
ee = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?>
<foo xmlns="urn:mace:example.com:saml:ns">
<id>xyz</id><bar>tre</bar></foo>""")
assert ee != None
print ee.__dict__
assert ee.attributes == {}
assert ee.tag == "foo"
assert ee.namespace == "urn:mace:example.com:saml:ns"
assert len(ee.children) == 2
assert ee.text.strip() == ""
cid = ee.find_children("id", "urn:mace:example.com:saml:namespace")
assert cid == []
ids = ee.find_children("id", "urn:mace:example.com:saml:ns")
assert ids != []
cid = ids[0]
print cid.__dict__
assert cid.attributes == {}
assert cid.tag == "id"
assert cid.namespace == "urn:mace:example.com:saml:ns"
assert cid.children == []
assert cid.text.strip() == "xyz"
def test_ee_5():
ee = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?>
<foo xmlns="urn:mace:example.com:saml:ns">bar</foo>""")
ce = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?>
<educause xmlns="urn:mace:example.com:saml:cu">rev</educause>""")
ee.children.append(ce)
assert ee != None
print ee.__dict__
assert ee.attributes == {}
assert ee.tag == "foo"
assert ee.namespace == "urn:mace:example.com:saml:ns"
assert len(ee.children) == 1
assert ee.text.strip() == "bar"
c = ee.children[0]
print c.__dict__
child = ee.find_children(namespace="urn:mace:example.com:saml:cu")
assert len(child) == 1
child = ee.find_children(namespace="urn:mace:example.com:saml:ns")
assert len(child) == 0
child = ee.find_children("educause", "urn:mace:example.com:saml:cu")
assert len(child) == 1
child = ee.find_children("edugain", "urn:mace:example.com:saml:cu")
assert len(child) == 0
print ee.to_string()
def test_ee_6():
ee = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?>
<foo xmlns="urn:mace:example.com:saml:ns">bar</foo>""")
ce = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?>
<educause xmlns="urn:mace:example.com:saml:cu">rev</educause>""")
et = ee.transfer_to_element_tree()
ce.become_child_element_of(et)
pee = saml2._extension_element_from_element_tree(et)
assert pee != None
print pee.__dict__
assert pee.attributes == {}
assert pee.tag == "foo"
assert pee.namespace == "urn:mace:example.com:saml:ns"
assert len(pee.children) == 1
assert pee.text.strip() == "bar"
c = pee.children[0]
print c.__dict__
child = pee.find_children(namespace="urn:mace:example.com:saml:cu")
assert len(child) == 1
child = pee.find_children(namespace="urn:mace:example.com:saml:ns")
assert len(child) == 0
child = pee.find_children("educause", "urn:mace:example.com:saml:cu")
assert len(child) == 1
child = pee.find_children("edugain", "urn:mace:example.com:saml:cu")
assert len(child) == 0
print pee.to_string()
NAMEID_WITH_ATTRIBUTE_EXTENSION = """<?xml version="1.0" encoding="utf-8"?>
<NameID xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
xmlns:local="urn:mace:example.com:saml:assertion"
Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
SPProvidedID="sp provided id"
local:Foo="BAR">
roland@example.com
</NameID>
"""
def test_nameid_with_extension():
kl = create_class_from_xml_string(NameID, NAMEID_WITH_ATTRIBUTE_EXTENSION)
assert kl != None
print kl.__dict__
assert kl.format == "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
assert kl.sp_provided_id == "sp provided id"
assert kl.text.strip() == "roland@example.com"
assert _eq(kl.keyswv(), ['sp_provided_id', 'format',
'extension_attributes', 'text'])
assert class_name(kl) == "urn:oasis:names:tc:SAML:2.0:assertion:NameID"
assert _eq(kl.keys(), ['sp_provided_id', 'sp_name_qualifier',
'name_qualifier', 'format', 'text'])
assert kl.extension_attributes == {
'{urn:mace:example.com:saml:assertion}Foo': 'BAR'}
SUBJECT_CONFIRMATION_WITH_MEMBER_EXTENSION = """<?xml version="1.0" encoding="utf-8"?>
<SubjectConfirmation xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<NameID
Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
NameQualifier="http://authentic.example.com/saml/metadata">test@example.com
</NameID>
<SubjectConfirmationData
NotOnOrAfter="2010-02-17T17:02:38Z"
Recipient="http://auth.example.com/saml/proxySingleSignOnRedirect"
InResponseTo="_59B3A01B03334032C31E434C63F89E3E"/>
<local:Trustlevel xmlns:local="urn:mace:example.com:saml:assertion">
Excellent
</local:Trustlevel>
</SubjectConfirmation>"""
def test_subject_confirmation_with_extension():
kl = create_class_from_xml_string(SubjectConfirmation,
SUBJECT_CONFIRMATION_WITH_MEMBER_EXTENSION)
assert kl != None
print kl.__dict__
assert kl.extension_attributes == {}
assert kl.method == "urn:oasis:names:tc:SAML:2.0:cm:bearer"
name_id = kl.name_id
assert _eq(name_id.keyswv(), ['format', 'name_qualifier', 'text'])
assert name_id.format == "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
assert name_id.name_qualifier == "http://authentic.example.com/saml/metadata"
assert name_id.text.strip() == "test@example.com"
subject_confirmation_data = kl.subject_confirmation_data
assert _eq(subject_confirmation_data.keyswv(), ['not_on_or_after',
'recipient',
'in_response_to'])
assert subject_confirmation_data.recipient == \
"http://auth.example.com/saml/proxySingleSignOnRedirect"
assert subject_confirmation_data.not_on_or_after == "2010-02-17T17:02:38Z"
assert subject_confirmation_data.in_response_to == \
"_59B3A01B03334032C31E434C63F89E3E"
assert len(kl.extension_elements) == 1
ee = kl.extension_elements[0]
assert ee.tag == "Trustlevel"
assert ee.namespace == "urn:mace:example.com:saml:assertion"
assert ee.text.strip() == "Excellent"
def test_to_fro_string_1():
kl = create_class_from_xml_string(SubjectConfirmation,
SUBJECT_CONFIRMATION_WITH_MEMBER_EXTENSION)
txt = kl.to_string()
cpy = create_class_from_xml_string(SubjectConfirmation, txt)
print kl.__dict__
print cpy.__dict__
assert kl.text.strip() == cpy.text.strip()
assert _eq(kl.keyswv(), cpy.keyswv())
assert len(kl.extension_elements) == len(cpy.extension_elements)
klee = kl.extension_elements[0]
cpyee = cpy.extension_elements[0]
assert klee.text.strip() == cpyee.text.strip()
assert klee.tag == cpyee.tag
assert klee.namespace == cpyee.namespace
def test_make_vals_str():
kl = make_vals("Jeter", md.GivenName, part=True)
assert isinstance(kl, md.GivenName)
assert kl.text == "Jeter"
def test_make_vals_list_of_strs():
cp = md.ContactPerson()
make_vals(["Derek", "Sanderson"], md.GivenName, cp, "given_name")
assert len(cp.given_name) == 2
assert _eq([i.text for i in cp.given_name], ["Sanderson", "Derek"])
def test_attribute_element_to_extension_element():
attr = create_class_from_xml_string(Attribute, saml2_data.TEST_ATTRIBUTE)
ee = saml2.element_to_extension_element(attr)
print ee.__dict__
assert ee.tag == "Attribute"
assert ee.namespace == 'urn:oasis:names:tc:SAML:2.0:assertion'
assert _eq(ee.attributes.keys(), ['FriendlyName', 'Name', 'NameFormat'])
assert ee.attributes["FriendlyName"] == 'test attribute'
assert ee.attributes["Name"] == "testAttribute"
assert ee.attributes["NameFormat"] == \
'urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified'
assert len(ee.children) == 2
for child in ee.children:
# children are also extension element instances
assert child.namespace == 'urn:oasis:names:tc:SAML:2.0:assertion'
assert child.tag == "AttributeValue"
def test_ee_7():
ee = saml2.extension_element_from_string(
"""<?xml version='1.0' encoding='UTF-8'?>
<ExternalEntityAttributeAuthority
xmlns="urn:oasis:names:tc:SAML:metadata:dynamicsaml">
<AssertingEntity>
<NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
http://federationX.org
</NameID>
</AssertingEntity>
<RetrievalEndpoint>
https://federationX.org/?ID=a87s76a5765da76576a57as
</RetrievalEndpoint>
</ExternalEntityAttributeAuthority>
""")
print ee.__dict__
assert len(ee.children) == 2
for child in ee.children:
assert child.namespace == "urn:oasis:names:tc:SAML:metadata:dynamicsaml"
assert _eq(["AssertingEntity", "RetrievalEndpoint"],
[c.tag for c in ee.children])
aes = [c for c in ee.children if c.tag == "AssertingEntity"]
assert len(aes) == 1
assert len(aes[0].children) == 1
assert _eq(aes[0].attributes.keys(), [])
nid = aes[0].children[0]
assert nid.tag == "NameID"
assert nid.namespace == "urn:oasis:names:tc:SAML:metadata:dynamicsaml"
assert len(nid.children) == 0
assert _eq(nid.attributes.keys(), ["Format"])
assert nid.text.strip() == "http://federationX.org"
def test_extension_element_loadd():
ava = {'attributes': {},
'tag': 'ExternalEntityAttributeAuthority',
'namespace': 'urn:oasis:names:tc:SAML:metadata:dynamicsaml',
'children': [{
"tag": "AssertingEntity",
"namespace": "urn:oasis:names:tc:SAML:metadata:dynamicsaml",
"children": [{
"tag": "NameID",
"namespace": "urn:oasis:names:tc:SAML:metadata:dynamicsaml",
"text": "http://federationX.org",
"attributes": {
"Format": "urn:oasis:names:tc:SAML:2.0:nameid-format:entity"
},
}]
}, {
"tag": "RetrievalEndpoint",
"namespace": "urn:oasis:names:tc:SAML:metadata"
":dynamicsaml",
"text": "https://federationX.org/?ID=a87s76a5765da76576a57as",
}],
}
ee = saml2.ExtensionElement(ava["tag"]).loadd(ava)
print ee.__dict__
assert len(ee.children) == 2
for child in ee.children:
assert child.namespace == "urn:oasis:names:tc:SAML:metadata:dynamicsaml"
assert _eq(["AssertingEntity", "RetrievalEndpoint"],
[c.tag for c in ee.children])
aes = [c for c in ee.children if c.tag == "AssertingEntity"]
assert len(aes) == 1
assert len(aes[0].children) == 1
assert _eq(aes[0].attributes.keys(), [])
nid = aes[0].children[0]
assert nid.tag == "NameID"
assert nid.namespace == "urn:oasis:names:tc:SAML:metadata:dynamicsaml"
assert len(nid.children) == 0
assert _eq(nid.attributes.keys(), ["Format"])
assert nid.text.strip() == "http://federationX.org"
def test_extensions_loadd():
ava = {"extension_elements": [
{
'attributes': {},
'tag': 'ExternalEntityAttributeAuthority',
'namespace': 'urn:oasis:names:tc:SAML:metadata:dynamicsaml',
'children': [
{"tag": "AssertingEntity",
"namespace": "urn:oasis:names:tc:SAML:metadata:dynamicsaml",
"children": [
{"tag": "NameID",
"namespace": "urn:oasis:names:tc:SAML:metadata:dynamicsaml",
"text": "http://federationX.org",
"attributes": {
"Format": "urn:oasis:names:tc:SAML:2.0:nameid-format:entity"
},
}]
},
{
"tag": "RetrievalEndpoint",
"namespace": "urn:oasis:names:tc:SAML:metadata:dynamicsaml",
"text": "https://federationX.org/?ID=a87s76a5765da76576a57as",
}],
}],
"extension_attributes": {
"foo": "bar",
}
}
extension = saml2.SamlBase()
extension.loadd(ava)
print extension.__dict__
assert len(extension.extension_elements) == 1
ee = extension.extension_elements[0]
assert len(ee.children) == 2
for child in ee.children:
assert child.namespace == "urn:oasis:names:tc:SAML:metadata:dynamicsaml"
assert _eq(["AssertingEntity", "RetrievalEndpoint"],
[c.tag for c in ee.children])
aes = [c for c in ee.children if c.tag == "AssertingEntity"]
assert len(aes) == 1
assert len(aes[0].children) == 1
assert _eq(aes[0].attributes.keys(), [])
nid = aes[0].children[0]
assert nid.tag == "NameID"
assert nid.namespace == "urn:oasis:names:tc:SAML:metadata:dynamicsaml"
assert len(nid.children) == 0
assert _eq(nid.attributes.keys(), ["Format"])
assert nid.text.strip() == "http://federationX.org"
assert extension.extension_attributes.keys() == ["foo"]
assert extension.extension_attributes["foo"] == "bar"
|
|
"""
Read data from ECMWF MACC Reanalysis.
"""
import threading
import pandas as pd
try:
import netCDF4
except ImportError:
class netCDF4:
@staticmethod
def Dataset(*a, **kw):
raise ImportError(
'Reading ECMWF data requires netCDF4 to be installed.')
try:
from ecmwfapi import ECMWFDataServer
except ImportError:
def ECMWFDataServer(*a, **kw):
raise ImportError(
'To download data from ECMWF requires the API client.\nSee https:/'
'/confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets'
)
#: map of ECMWF MACC parameter keynames and codes used in API
PARAMS = {
"tcwv": "137.128",
"aod550": "207.210",
'aod469': '213.210',
'aod670': '214.210',
'aod865': '215.210',
"aod1240": "216.210",
}
def _ecmwf(server, startdate, stopdate, params, targetname):
# see http://apps.ecmwf.int/datasets/data/macc-reanalysis/levtype=sfc/
server.retrieve({
"class": "mc",
"dataset": "macc",
"date": "%s/to/%s" % (startdate, stopdate),
"expver": "rean",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": params,
"step": "3/6/9/12/15/18/21/24",
"stream": "oper",
"format": "netcdf",
"time": "00:00:00",
"type": "fc",
"target": targetname,
})
def get_ecmwf_macc(filename, params, startdate, stopdate, lookup_params=True,
server=None, target=_ecmwf):
"""
Download data from ECMWF MACC Reanalysis API.
Parameters
----------
filename : str
full path of file where to save data, ``.nc`` appended if not given
params : str or sequence of str
keynames of parameter[s] to download
startdate : datetime.datetime or datetime.date
UTC date
stopdate : datetime.datetime or datetime.date
UTC date
lookup_params : bool, default True
optional flag, if ``False``, then codes are already formatted
server : ecmwfapi.api.ECMWFDataServer
optionally provide a server object, default is ``None``
target : callable
optional function that calls ``server.retrieve`` to pass to thread
Returns
-------
t : thread
a thread object, use it to check status by calling `t.is_alive()`
Notes
-----
To download data from ECMWF requires the API client and a registration
key. Please read the documentation in `Access ECMWF Public Datasets
<https://confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets>`_.
Follow the instructions in step 4 and save the ECMWF registration key
as `$HOME\.ecmwfapirc` or set `ECMWF_API_KEY` as the path to the key.
This function returns a daemon thread that runs in the background. Exiting
Python will kill this thread, however this thread will not block the main
thread or other threads. This thread will terminate when the file is
downloaded or if the thread raises an unhandled exception. You may submit
multiple requests simultaneously to break up large downloads. You can also
check the status and retrieve downloads online at
http://apps.ecmwf.int/webmars/joblist/. This is useful if you kill the
thread. Downloads expire after 24 hours.
.. warning:: Your request may be queued online for an hour or more before
it begins to download
Precipitable water :math:`P_{wat}` is equivalent to the total column of
water vapor (TCWV), but the units given by ECMWF MACC Reanalysis are kg/m^2
at STP (1-atm, 25-C). Divide by ten to convert to centimeters of
precipitable water:
.. math::
P_{wat} \\left( \\text{cm} \\right) \
= TCWV \\left( \\frac{\\text{kg}}{\\text{m}^2} \\right) \
\\frac{100 \\frac{\\text{cm}}{\\text{m}}} \
{1000 \\frac{\\text{kg}}{\\text{m}^3}}
The keynames available for the ``params`` argument are given by
:const:`pvlib.iotools.ecmwf_macc.PARAMS` which maps the keys to codes used
in the API. The following keynames are available:
======= =========================================
keyname description
======= =========================================
tcwv total column water vapor in kg/m^2 at STP
aod550 aerosol optical depth measured at 550-nm
aod469 aerosol optical depth measured at 469-nm
aod670 aerosol optical depth measured at 670-nm
aod865 aerosol optical depth measured at 865-nm
aod1240 aerosol optical depth measured at 1240-nm
======= =========================================
If ``lookup_params`` is ``False`` then ``params`` must contain the codes
preformatted according to the ECMWF MACC Reanalysis API. This is useful if
you want to retrieve codes that are not mapped in
:const:`pvlib.iotools.ecmwf_macc.PARAMS`.
Specify a custom ``target`` function to modify how the ECMWF API function
``server.retrieve`` is called. The ``target`` function must have the
following signature in which the parameter definitions are similar to
:func:`pvlib.iotools.get_ecmwf_macc`. ::
target(server, startdate, stopdate, params, filename) -> None
Examples
--------
Retrieve the AOD measured at 550-nm and the total column of water vapor for
November 1, 2012.
>>> from datetime import date
>>> from pvlib.iotools import get_ecmwf_macc
>>> filename = 'aod_tcwv_20121101.nc' # .nc extension added if missing
>>> params = ('aod550', 'tcwv')
>>> start = end = date(2012, 11, 1)
>>> t = get_ecmwf_macc(filename, params, start, end)
>>> t.is_alive()
True
"""
if not filename.endswith('nc'):
filename += '.nc'
if lookup_params:
try:
params = '/'.join(PARAMS.get(p) for p in params)
except TypeError:
params = PARAMS.get(params)
startdate = startdate.strftime('%Y-%m-%d')
stopdate = stopdate.strftime('%Y-%m-%d')
if not server:
server = ECMWFDataServer()
t = threading.Thread(target=target, daemon=True,
args=(server, startdate, stopdate, params, filename))
t.start()
return t
class ECMWF_MACC(object):
"""container for ECMWF MACC reanalysis data"""
TCWV = 'tcwv' # total column water vapor in kg/m^2 at (1-atm,25-degC)
def __init__(self, filename):
self.data = netCDF4.Dataset(filename)
# data variables and dimensions
variables = set(self.data.variables.keys())
dimensions = set(self.data.dimensions.keys())
self.keys = tuple(variables - dimensions)
# size of lat/lon dimensions
self.lat_size = self.data.dimensions['latitude'].size
self.lon_size = self.data.dimensions['longitude'].size
# spatial resolution in degrees
self.delta_lat = -180.0 / (self.lat_size - 1) # from north to south
self.delta_lon = 360.0 / self.lon_size # from west to east
# time resolution in hours
self.time_size = self.data.dimensions['time'].size
self.start_time = self.data['time'][0]
self.stop_time = self.data['time'][-1]
self.time_range = self.stop_time - self.start_time
self.delta_time = self.time_range / (self.time_size - 1)
def get_nearest_indices(self, latitude, longitude):
"""
Get nearest indices to (latitude, longitude).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
Returns
-------
idx_lat : int
index of nearest latitude
idx_lon : int
index of nearest longitude
"""
# index of nearest latitude
idx_lat = int(round((latitude - 90.0) / self.delta_lat))
# avoid out of bounds latitudes
if idx_lat < 0:
idx_lat = 0 # if latitude == 90, north pole
elif idx_lat > self.lat_size:
idx_lat = self.lat_size # if latitude == -90, south pole
# adjust longitude from -180/180 to 0/360
longitude = longitude % 360.0
# index of nearest longitude
idx_lon = int(round(longitude / self.delta_lon)) % self.lon_size
return idx_lat, idx_lon
def interp_data(self, latitude, longitude, utc_time, param):
"""
Interpolate ``param`` values to ``utc_time`` using indices nearest to
(``latitude, longitude``).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
utc_time : datetime.datetime or datetime.date
Naive or UTC date or datetime to interpolate
param : str
Name of the parameter to interpolate from the data
Returns
-------
Interpolated ``param`` value at (``utc_time, latitude, longitude``)
Examples
--------
Use this to get a single value of a parameter in the data at a specific
time and set of (latitude, longitude) coordinates.
>>> from datetime import datetime
>>> from pvlib.iotools import ecmwf_macc
>>> data = ecmwf_macc.ECMWF_MACC('aod_tcwv_20121101.nc')
>>> dt = datetime(2012, 11, 1, 11, 33, 1)
>>> data.interp_data(38.2, -122.1, dt, 'aod550')
"""
nctime = self.data['time'] # time
ilat, ilon = self.get_nearest_indices(latitude, longitude)
# time index before
before = netCDF4.date2index(utc_time, nctime, select='before')
fbefore = self.data[param][before, ilat, ilon]
fafter = self.data[param][before + 1, ilat, ilon]
dt_num = netCDF4.date2num(utc_time, nctime.units)
time_ratio = (dt_num - nctime[before]) / self.delta_time
return fbefore + (fafter - fbefore) * time_ratio
def read_ecmwf_macc(filename, latitude, longitude, utc_time_range=None):
"""
Read data from ECMWF MACC reanalysis netCDF4 file.
Parameters
----------
filename : string
full path to netCDF4 data file.
latitude : float
latitude in degrees
longitude : float
longitude in degrees
utc_time_range : sequence of datetime.datetime
pair of start and stop naive or UTC date-times
Returns
-------
data : pandas.DataFrame
dataframe for specified range of UTC date-times
"""
ecmwf_macc = ECMWF_MACC(filename)
try:
ilat, ilon = ecmwf_macc.get_nearest_indices(latitude, longitude)
nctime = ecmwf_macc.data['time']
if utc_time_range:
start_idx = netCDF4.date2index(
utc_time_range[0], nctime, select='before')
stop_idx = netCDF4.date2index(
utc_time_range[-1], nctime, select='after')
time_slice = slice(start_idx, stop_idx + 1)
else:
time_slice = slice(0, ecmwf_macc.time_size)
times = netCDF4.num2date(nctime[time_slice], nctime.units)
df = {k: ecmwf_macc.data[k][time_slice, ilat, ilon]
for k in ecmwf_macc.keys}
if ECMWF_MACC.TCWV in df:
# convert total column water vapor in kg/m^2 at (1-atm, 25-degC) to
# precipitable water in cm
df['precipitable_water'] = df[ECMWF_MACC.TCWV] / 10.0
finally:
ecmwf_macc.data.close()
return pd.DataFrame(df, index=times.astype('datetime64[s]'))
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from django.core.urlresolvers import reverse
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from desktop.lib.django_util import format_preserving_redirect
from desktop.lib.parameterization import substitute_variables
from filebrowser.views import location_to_url
from beeswax import hive_site
from beeswax.conf import HIVE_SERVER_HOST, HIVE_SERVER_PORT, LIST_PARTITIONS_LIMIT, SERVER_CONN_TIMEOUT, \
AUTH_USERNAME, AUTH_PASSWORD, APPLY_NATURAL_SORT_MAX, QUERY_PARTITIONS_LIMIT
from beeswax.common import apply_natural_sort
from beeswax.design import hql_query
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import QueryHistory, QUERY_TYPES
LOG = logging.getLogger(__name__)
DBMS_CACHE = {}
DBMS_CACHE_LOCK = threading.Lock()
def get(user, query_server=None):
global DBMS_CACHE
global DBMS_CACHE_LOCK
if query_server is None:
query_server = get_query_server_config()
DBMS_CACHE_LOCK.acquire()
try:
DBMS_CACHE.setdefault(user.username, {})
if query_server['server_name'] not in DBMS_CACHE[user.username]:
# Avoid circular dependency
from beeswax.server.hive_server2_lib import HiveServerClientCompatible
if query_server['server_name'] == 'impala':
from impala.dbms import ImpalaDbms
from impala.server import ImpalaServerClient
DBMS_CACHE[user.username][query_server['server_name']] = ImpalaDbms(HiveServerClientCompatible(ImpalaServerClient(query_server, user)), QueryHistory.SERVER_TYPE[1][0])
else:
from beeswax.server.hive_server2_lib import HiveServerClient
DBMS_CACHE[user.username][query_server['server_name']] = HiveServer2Dbms(HiveServerClientCompatible(HiveServerClient(query_server, user)), QueryHistory.SERVER_TYPE[1][0])
return DBMS_CACHE[user.username][query_server['server_name']]
finally:
DBMS_CACHE_LOCK.release()
def get_query_server_config(name='beeswax', server=None):
if name == 'impala':
from impala.dbms import get_query_server_config as impala_query_server_config
query_server = impala_query_server_config()
else:
kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
query_server = {
'server_name': 'beeswax', # Aka HiveServer2 now
'server_host': HIVE_SERVER_HOST.get(),
'server_port': HIVE_SERVER_PORT.get(),
'principal': kerberos_principal,
'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
'protocol': 'https' if hiveserver2_use_ssl() else 'http',
'host': HIVE_SERVER_HOST.get(),
'port': hive_site.hiveserver2_thrift_http_port(),
'end_point': hive_site.hiveserver2_thrift_http_path()
},
'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
'auth_username': AUTH_USERNAME.get(),
'auth_password': AUTH_PASSWORD.get()
}
if name == 'sparksql': # Spark SQL is almost the same as Hive
from spark.conf import SQL_SERVER_HOST as SPARK_SERVER_HOST, SQL_SERVER_PORT as SPARK_SERVER_PORT
query_server.update({
'server_name': 'sparksql',
'server_host': SPARK_SERVER_HOST.get(),
'server_port': SPARK_SERVER_PORT.get()
})
debug_query_server = query_server.copy()
debug_query_server['auth_password_used'] = bool(debug_query_server.pop('auth_password'))
LOG.debug("Query Server: %s" % debug_query_server)
return query_server
class QueryServerException(Exception):
# Ideally the query handle will be stored here too.
def __init__(self, e, message=''):
super(QueryServerException, self).__init__(e)
self.message = message
class QueryServerTimeoutException(Exception):
def __init__(self, message=''):
super(QueryServerTimeoutException, self).__init__(message)
self.message = message
class NoSuchObjectException: pass
class HiveServer2Dbms(object):
def __init__(self, client, server_type):
self.client = client
self.server_type = server_type
self.server_name = self.client.query_server['server_name']
@classmethod
def to_matching_wildcard(cls, identifier=None):
cleaned = "*"
if identifier and identifier.strip() != "*":
cleaned = "*%s*" % identifier.strip().strip("*")
return cleaned
def get_databases(self, database_names='*'):
if database_names != '*':
database_names = self.to_matching_wildcard(database_names)
databases = self.client.get_databases(schemaName=database_names)
if len(databases) <= APPLY_NATURAL_SORT_MAX.get():
databases = apply_natural_sort(databases)
return databases
def get_database(self, database):
return self.client.get_database(database)
def get_tables_meta(self, database='default', table_names='*', table_types=None):
if self.server_name == 'beeswax':
identifier = self.to_matching_wildcard(table_names)
else:
identifier = None
tables = self.client.get_tables_meta(database, identifier, table_types)
if len(tables) <= APPLY_NATURAL_SORT_MAX.get():
tables = apply_natural_sort(tables, key='name')
return tables
def get_tables(self, database='default', table_names='*', table_types=None):
if self.server_name == 'beeswax':
identifier = self.to_matching_wildcard(table_names)
else:
identifier = None
tables = self.client.get_tables(database, identifier, table_types)
if len(tables) <= APPLY_NATURAL_SORT_MAX.get():
tables = apply_natural_sort(tables)
return tables
def get_table(self, database, table_name):
return self.client.get_table(database, table_name)
def alter_table(self, database, table_name, new_table_name=None, comment=None, tblproperties=None):
hql = 'ALTER TABLE `%s`.`%s`' % (database, table_name)
if new_table_name:
table_name = new_table_name
hql += ' RENAME TO `%s`' % table_name
elif comment:
hql += " SET TBLPROPERTIES ('comment' = '%s')" % comment
elif tblproperties:
hql += " SET TBLPROPERTIES (%s)" % ' ,'.join("'%s' = '%s'" % (k, v) for k, v in tblproperties.items())
timeout = SERVER_CONN_TIMEOUT.get()
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=timeout)
if handle:
self.close(handle)
else:
msg = _("Failed to execute alter table statement: %s") % hql
raise QueryServerException(msg)
return self.client.get_table(database, table_name)
def get_column(self, database, table_name, column_name):
table = self.client.get_table(database, table_name)
for col in table.cols:
if col.name == column_name:
return col
return None
def alter_column(self, database, table_name, column_name, new_column_name, column_type, comment=None,
partition_spec=None, cascade=False):
hql = 'ALTER TABLE `%s`.`%s`' % (database, table_name)
if partition_spec:
hql += ' PARTITION (%s)' % partition_spec
hql += ' CHANGE COLUMN `%s` `%s` %s' % (column_name, new_column_name, column_type.upper())
if comment:
hql += " COMMENT '%s'" % comment
if cascade:
hql += ' CASCADE'
timeout = SERVER_CONN_TIMEOUT.get()
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=timeout)
if handle:
self.close(handle)
else:
msg = _("Failed to execute alter column statement: %s") % hql
raise QueryServerException(msg)
return self.get_column(database, table_name, new_column_name)
def execute_query(self, query, design):
return self.execute_and_watch(query, design=design)
def select_star_from(self, database, table):
if table.partition_keys: # Filter on max number of partitions for partitioned tables
hql = self._get_sample_partition_query(database, table, limit=10000) # Currently need a limit
else:
hql = "SELECT * FROM `%s`.`%s`" % (database, table.name)
return self.execute_statement(hql)
def get_select_star_query(self, database, table):
if table.partition_keys: # Filter on max number of partitions for partitioned tables
hql = self._get_sample_partition_query(database, table, limit=10000) # Currently need a limit
else:
hql = "SELECT * FROM `%s`.`%s`" % (database, table.name)
return hql
def execute_statement(self, hql):
if self.server_name == 'impala':
query = hql_query(hql, QUERY_TYPES[1])
else:
query = hql_query(hql, QUERY_TYPES[0])
return self.execute_and_watch(query)
def fetch(self, query_handle, start_over=False, rows=None):
no_start_over_support = [config_variable for config_variable in self.get_default_configuration(False)
if config_variable.key == 'support_start_over'
and config_variable.value == 'false']
if no_start_over_support:
start_over = False
return self.client.fetch(query_handle, start_over, rows)
def close_operation(self, query_handle):
return self.client.close_operation(query_handle)
def open_session(self, user):
return self.client.open_session(user)
def close_session(self, session):
resp = self.client.close_session(session)
if resp.status.statusCode != 0:
session.status_code = resp.status.statusCode
session.save()
raise QueryServerException(_('Failed to close session, session handle may already be closed or timed out.'))
else:
session.status_code = 4 # Set to ttypes.TStatusCode.INVALID_HANDLE_STATUS
session.save()
return session
def cancel_operation(self, query_handle):
resp = self.client.cancel_operation(query_handle)
if self.client.query_server['server_name'] == 'impala':
resp = self.client.close_operation(query_handle)
return resp
def get_sample(self, database, table, column=None, nested=None):
result = None
hql = None
limit = 100
if column or nested: # Could do column for any type, then nested with partitions
if self.server_name == 'impala':
from impala.dbms import ImpalaDbms
select_clause, from_clause = ImpalaDbms.get_nested_select(database, table.name, column, nested)
hql = 'SELECT %s FROM %s LIMIT %s' % (select_clause, from_clause, limit)
else:
# Filter on max # of partitions for partitioned tables
# Impala's SHOW PARTITIONS is different from Hive, so we only support Hive for now
if self.server_name != 'impala' and table.partition_keys:
hql = self._get_sample_partition_query(database, table, limit)
else:
hql = "SELECT * FROM `%s`.`%s` LIMIT %s" % (database, table.name, limit)
if hql:
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
return result
def _get_sample_partition_query(self, database, table, limit):
max_parts = QUERY_PARTITIONS_LIMIT.get()
partitions = self.get_partitions(database, table, partition_spec=None, max_parts=max_parts)
if partitions and max_parts:
# Need to reformat partition specs for where clause syntax
partition_specs = [part.partition_spec.replace(',', ' AND ') for part in partitions]
partition_filters = ' OR '.join(['(%s)' % partition_spec for partition_spec in partition_specs])
partition_clause = 'WHERE %s' % partition_filters
else:
partition_clause = ''
return "SELECT * FROM `%(database)s`.`%(table)s` %(partition_clause)s LIMIT %(limit)s" % \
{'database': database, 'table': table.name, 'partition_clause': partition_clause, 'limit': limit}
def analyze_table(self, database, table):
if self.server_name == 'impala':
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
table_obj = self.get_table(database, table)
partition_spec = ''
if table_obj.partition_keys:
partition_keys = ','.join([part.name for part in table_obj.partition_keys])
partition_spec = 'PARTITION(%(partition_keys)s)' % {'partition_keys': partition_keys}
hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` %(partition_spec)s COMPUTE STATISTICS' % \
{'database': database, 'table': table, 'partition_spec': partition_spec}
return self.execute_statement(hql)
def analyze_table_columns(self, database, table):
if self.server_name == 'impala':
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
table_obj = self.get_table(database, table)
if table_obj.partition_keys:
raise NotImplementedError('HIVE-4861: COMPUTE STATISTICS FOR COLUMNS not supported for partitioned-tables.')
else:
hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` COMPUTE STATISTICS FOR COLUMNS' % {'database': database, 'table': table}
return self.execute_statement(hql)
def get_table_stats(self, database, table):
stats = []
if self.server_name == 'impala':
hql = 'SHOW TABLE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
stats = list(result.rows())
else:
table = self.get_table(database, table)
stats = table.stats
return stats
def get_table_columns_stats(self, database, table, column):
if self.server_name == 'impala':
hql = 'SHOW COLUMN STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
hql = 'DESCRIBE FORMATTED `%(database)s`.`%(table)s` `%(column)s`' % {'database': database, 'table': table, 'column': column}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
data = list(result.rows())
if self.server_name == 'impala':
data = [col for col in data if col[0] == column][0]
return [
{'col_name': data[0]},
{'data_type': data[1]},
{'distinct_count': data[2]},
{'num_nulls': data[3]},
{'max_col_len': data[4]},
{'avg_col_len': data[5]},
]
else:
return [
{'col_name': data[2][0]},
{'data_type': data[2][1]},
{'min': data[2][2]},
{'max': data[2][3]},
{'num_nulls': data[2][4]},
{'distinct_count': data[2][5]},
{'avg_col_len': data[2][6]},
{'max_col_len': data[2][7]},
{'num_trues': data[2][8]},
{'num_falses': data[2][9]}
]
else:
return []
def get_top_terms(self, database, table, column, limit=30, prefix=None):
limit = min(limit, 100)
prefix_match = ''
if prefix:
prefix_match = "WHERE CAST(%(column)s AS STRING) LIKE '%(prefix)s%%'" % {'column': column, 'prefix': prefix}
hql = 'SELECT %(column)s, COUNT(*) AS ct FROM `%(database)s`.`%(table)s` %(prefix_match)s GROUP BY %(column)s ORDER BY ct DESC LIMIT %(limit)s' % {
'database': database, 'table': table, 'column': column, 'prefix_match': prefix_match, 'limit': limit,
}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=60.0) # Hive is very slow
if handle:
result = self.fetch(handle, rows=limit)
self.close(handle)
return list(result.rows())
else:
return []
def drop_table(self, database, table):
if table.is_view:
hql = "DROP VIEW `%s`.`%s`" % (database, table.name,)
else:
hql = "DROP TABLE `%s`.`%s`" % (database, table.name,)
return self.execute_statement(hql)
def load_data(self, database, table, form, design):
hql = "LOAD DATA INPATH"
hql += " '%s'" % form.cleaned_data['path']
if form.cleaned_data['overwrite']:
hql += " OVERWRITE"
hql += " INTO TABLE "
hql += "`%s`.`%s`" % (database, table.name,)
if form.partition_columns:
hql += " PARTITION ("
vals = []
for key, column_name in form.partition_columns.iteritems():
vals.append("%s='%s'" % (column_name, form.cleaned_data[key]))
hql += ", ".join(vals)
hql += ")"
query = hql_query(hql, database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def drop_tables(self, database, tables, design):
hql = []
for table in tables:
if table.is_view:
hql.append("DROP VIEW `%s`.`%s`" % (database, table.name,))
else:
hql.append("DROP TABLE `%s`.`%s`" % (database, table.name,))
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def drop_database(self, database):
return self.execute_statement("DROP DATABASE `%s`" % database)
def drop_databases(self, databases, design):
hql = []
for database in databases:
hql.append("DROP DATABASE `%s`" % database)
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def _get_and_validate_select_query(self, design, query_history):
query = design.get_query_statement(query_history.statement_number)
if not query.strip().lower().startswith('select'):
raise Exception(_('Only SELECT statements can be saved. Provided query: %(query)s') % {'query': query})
return query
def insert_query_into_directory(self, query_history, target_dir):
design = query_history.design.get_design()
database = design.query['database']
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = "INSERT OVERWRITE DIRECTORY '%s' %s" % (target_dir, query)
return self.execute_statement(hql)
def create_table_as_a_select(self, request, query_history, target_database, target_table, result_meta):
design = query_history.design.get_design()
database = design.query['database']
# Case 1: Hive Server 2 backend or results straight from an existing table
if result_meta.in_tablename:
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = 'CREATE TABLE `%s`.`%s` AS %s' % (target_database, target_table, query)
query_history = self.execute_statement(hql)
else:
# FYI: this path is dead since moving to HiveServer2
#
# Case 2: The results are in some temporary location
# Beeswax backward compatibility and optimization
# 1. Create table
cols = ''
schema = result_meta.schema
for i, field in enumerate(schema.fieldSchemas):
if i != 0:
cols += ',\n'
cols += '`%s` %s' % (field.name, field.type)
# The representation of the delimiter is messy.
# It came from Java as a string, which might has been converted from an integer.
# So it could be "1" (^A), or "10" (\n), or "," (a comma literally).
delim = result_meta.delim
if not delim.isdigit():
delim = str(ord(delim))
hql = '''
CREATE TABLE `%s` (
%s
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\%s'
STORED AS TextFile
''' % (target_table, cols, delim.zfill(3))
query = hql_query(hql)
self.execute_and_wait(query)
try:
# 2. Move the results into the table's storage
table_obj = self.get_table('default', target_table)
table_loc = request.fs.urlsplit(table_obj.path_location)[2]
result_dir = request.fs.urlsplit(result_meta.table_dir)[2]
request.fs.rename_star(result_dir, table_loc)
LOG.debug("Moved results from %s to %s" % (result_meta.table_dir, table_loc))
request.info(request, _('Saved query results as new table %(table)s.') % {'table': target_table})
query_history.save_state(QueryHistory.STATE.expired)
except Exception, ex:
query = hql_query('DROP TABLE `%s`' % target_table)
try:
self.execute_and_wait(query)
except Exception, double_trouble:
LOG.exception('Failed to drop table "%s" as well: %s' % (target_table, double_trouble))
raise ex
url = format_preserving_redirect(request, reverse('metastore:index'))
return query_history
def use(self, database):
query = hql_query('USE `%s`' % database)
return self.client.use(query)
def get_log(self, query_handle, start_over=True):
return self.client.get_log(query_handle, start_over)
def get_state(self, handle):
return self.client.get_state(handle)
def get_operation_status(self, handle):
return self.client.get_operation_status(handle)
def execute_and_wait(self, query, timeout_sec=30.0, sleep_interval=0.5):
"""
Run query and check status until it finishes or timeouts.
Check status until it finishes or timeouts.
"""
handle = self.client.query(query)
curr = time.time()
end = curr + timeout_sec
while curr <= end:
state = self.client.get_state(handle)
if state not in (QueryHistory.STATE.running, QueryHistory.STATE.submitted):
return handle
time.sleep(sleep_interval)
curr = time.time()
# Query timed out, so attempt to cancel operation and raise exception
msg = "The query timed out after %(timeout)d seconds, canceled query." % {'timeout': timeout_sec}
LOG.warning(msg)
try:
self.cancel_operation(handle)
except Exception, e:
msg = "Failed to cancel query."
LOG.warning(msg)
self.close_operation(handle)
raise QueryServerException(e, message=msg)
raise QueryServerTimeoutException(message=msg)
def execute_next_statement(self, query_history, hql_query):
if query_history.is_success() or query_history.is_expired():
# We need to go to the next statement only if the previous one passed
query_history.statement_number += 1
else:
# We need to update the query in case it was fixed
query_history.refresh_design(hql_query)
query_history.last_state = QueryHistory.STATE.submitted.index
query_history.save()
query = query_history.design.get_design()
# In case of multiquery, we need to re-replace the parameters as we save the non substituted query
if query._data_dict['query']['is_parameterized']:
real_query = substitute_variables(query._data_dict['query']['query'], query_history.get_extra('parameters'))
query._data_dict['query']['query'] = real_query
return self.execute_and_watch(query, query_history=query_history)
def execute_and_watch(self, query, design=None, query_history=None):
"""
Run query and return a QueryHistory object in order to see its progress on a Web page.
"""
hql_query = query.hql_query
if query_history is None:
query_history = QueryHistory.build(
owner=self.client.user,
query=hql_query,
server_host='%(server_host)s' % self.client.query_server,
server_port='%(server_port)d' % self.client.query_server,
server_name='%(server_name)s' % self.client.query_server,
server_type=self.server_type,
last_state=QueryHistory.STATE.submitted.index,
design=design,
notify=query.query.get('email_notify', False),
query_type=query.query['type'],
statement_number=0
)
query_history.save()
LOG.debug("Made new QueryHistory id %s user %s query: %s..." % (query_history.id, self.client.user, query_history.query[:25]))
try:
handle = self.client.query(query, query_history.statement_number)
if not handle.is_valid():
msg = _("Server returning invalid handle for query id %(id)d [%(query)s]...") % {'id': query_history.id, 'query': query[:40]}
raise QueryServerException(msg)
except QueryServerException, ex:
LOG.exception(ex)
# Kind of expected (hql compile/syntax error, etc.)
if hasattr(ex, 'handle') and ex.handle:
query_history.server_id, query_history.server_guid = ex.handle.id, ex.handle.id
query_history.log_context = ex.handle.log_context
query_history.save_state(QueryHistory.STATE.failed)
raise ex
# All good
query_history.server_id, query_history.server_guid = handle.get()
query_history.operation_type = handle.operation_type
query_history.has_results = handle.has_result_set
query_history.modified_row_count = handle.modified_row_count
query_history.log_context = handle.log_context
query_history.query_type = query.query['type']
query_history.set_to_running()
query_history.save()
LOG.debug("Updated QueryHistory id %s user %s statement_number: %s" % (query_history.id, self.client.user, query_history.statement_number))
return query_history
def get_results_metadata(self, handle):
return self.client.get_results_metadata(handle)
def close(self, handle):
return self.client.close(handle)
def get_partitions(self, db_name, table, partition_spec=None, max_parts=None, reverse_sort=True):
if max_parts is None or max_parts > LIST_PARTITIONS_LIMIT.get():
max_parts = LIST_PARTITIONS_LIMIT.get()
return self.client.get_partitions(db_name, table.name, partition_spec, max_parts=max_parts, reverse_sort=reverse_sort)
def get_partition(self, db_name, table_name, partition_spec):
table = self.get_table(db_name, table_name)
partitions = self.get_partitions(db_name, table, partition_spec=partition_spec)
if len(partitions) != 1:
raise NoSuchObjectException(_("Query did not return exactly one partition result"))
partition = partitions[0]
partition_query = " AND ".join(partition.partition_spec.split(','))
hql = "SELECT * FROM `%s`.`%s` WHERE %s" % (db_name, table_name, partition_query)
return self.execute_statement(hql)
def describe_partition(self, db_name, table_name, partition_spec):
return self.client.get_table(db_name, table_name, partition_spec=partition_spec)
def drop_partitions(self, db_name, table_name, partition_specs, design):
hql = []
for partition_spec in partition_specs:
hql.append("ALTER TABLE `%s`.`%s` DROP IF EXISTS PARTITION (%s) PURGE" % (db_name, table_name, partition_spec))
query = hql_query(';'.join(hql), db_name)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def get_indexes(self, db_name, table_name):
hql = 'SHOW FORMATTED INDEXES ON `%(table)s` IN `%(database)s`' % {'table': table_name, 'database': db_name}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=15.0)
if handle:
result = self.fetch(handle, rows=5000)
return result
def get_configuration(self):
return self.client.get_configuration()
def get_functions(self, prefix=None):
filter = '"%s.*"' % prefix if prefix else '".*"'
hql = 'SHOW FUNCTIONS %s' % filter
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=15.0)
if handle:
result = self.fetch(handle, rows=5000)
return result
def explain(self, query):
return self.client.explain(query)
def getStatus(self):
return self.client.getStatus()
def get_default_configuration(self, include_hadoop):
return self.client.get_default_configuration(include_hadoop)
class Table:
"""
Represents the metadata of a Hive Table.
"""
@property
def hdfs_link(self):
return location_to_url(self.path_location)
class DataTable:
"""
Represents the data of a Hive Table.
If the dataset has more rows, a new fetch should be done in order to return a new data table with the next rows.
"""
pass
# TODO decorator?
def expand_exception(exc, db, handle=None):
try:
if handle is not None:
log = db.get_log(handle)
elif hasattr(exc, 'get_rpc_handle') or hasattr(exc, 'log_context'):
log = db.get_log(exc)
else:
log = ''
except Exception, e:
# Always show something, even if server has died on the job.
log = _("Could not retrieve logs: %s." % e)
if not exc.args or not exc.args[0]:
error_message = _("Unknown exception.")
else:
error_message = force_unicode(exc.args[0], strings_only=True, errors='replace')
return error_message, log
|
|
import asyncio
import unittest
from unittest import mock
import sys
from psycopg2.extensions import TRANSACTION_STATUS_INTRANS
import aiopg
from aiopg.connection import Connection, TIMEOUT
from aiopg.pool import Pool
class TestPool(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.pool = None
def tearDown(self):
if self.pool is not None:
self.pool.terminate()
self.loop.run_until_complete(self.pool.wait_closed())
self.loop.close()
self.loop = None
@asyncio.coroutine
def create_pool(self, no_loop=False, **kwargs):
loop = None if no_loop else self.loop
pool = yield from aiopg.create_pool(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=loop,
**kwargs)
self.pool = pool
return pool
def test_create_pool(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
self.assertIsInstance(pool, Pool)
self.assertEqual(10, pool.minsize)
self.assertEqual(10, pool.maxsize)
self.assertEqual(10, pool.size)
self.assertEqual(10, pool.freesize)
self.assertEqual(TIMEOUT, pool.timeout)
self.assertFalse(pool.echo)
self.loop.run_until_complete(go())
def test_create_pool2(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=10, maxsize=20)
self.assertIsInstance(pool, Pool)
self.assertEqual(10, pool.minsize)
self.assertEqual(20, pool.maxsize)
self.assertEqual(10, pool.size)
self.assertEqual(10, pool.freesize)
self.assertEqual(TIMEOUT, pool.timeout)
self.loop.run_until_complete(go())
def test_acquire(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertIsInstance(conn, Connection)
self.assertFalse(conn.closed)
cur = yield from conn.cursor()
yield from cur.execute('SELECT 1')
val = yield from cur.fetchone()
self.assertEqual((1,), val)
pool.release(conn)
self.loop.run_until_complete(go())
def test_release(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
self.assertEqual({conn}, pool._used)
pool.release(conn)
self.assertEqual(10, pool.freesize)
self.assertFalse(pool._used)
self.loop.run_until_complete(go())
def test_release_closed(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
yield from conn.close()
pool.release(conn)
self.assertEqual(9, pool.freesize)
self.assertFalse(pool._used)
self.assertEqual(9, pool.size)
conn2 = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
self.assertEqual(10, pool.size)
pool.release(conn2)
self.loop.run_until_complete(go())
def test_bad_context_manager_usage(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with self.assertRaises(RuntimeError):
with pool:
pass
self.loop.run_until_complete(go())
def test_context_manager(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with (yield from pool) as conn:
self.assertIsInstance(conn, Connection)
self.assertEqual(9, pool.freesize)
self.assertEqual({conn}, pool._used)
self.assertEqual(10, pool.freesize)
self.loop.run_until_complete(go())
def test_clear(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
yield from pool.clear()
self.assertEqual(0, pool.freesize)
self.loop.run_until_complete(go())
def test_initial_empty(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0)
self.assertEqual(10, pool.maxsize)
self.assertEqual(0, pool.minsize)
self.assertEqual(0, pool.size)
self.assertEqual(0, pool.freesize)
with (yield from pool):
self.assertEqual(1, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual(1, pool.size)
self.assertEqual(1, pool.freesize)
conn1 = yield from pool.acquire()
self.assertEqual(1, pool.size)
self.assertEqual(0, pool.freesize)
conn2 = yield from pool.acquire()
self.assertEqual(2, pool.size)
self.assertEqual(0, pool.freesize)
pool.release(conn1)
self.assertEqual(2, pool.size)
self.assertEqual(1, pool.freesize)
pool.release(conn2)
self.assertEqual(2, pool.size)
self.assertEqual(2, pool.freesize)
self.loop.run_until_complete(go())
def test_parallel_tasks(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0, maxsize=2)
self.assertEqual(2, pool.maxsize)
self.assertEqual(0, pool.minsize)
self.assertEqual(0, pool.size)
self.assertEqual(0, pool.freesize)
fut1 = pool.acquire()
fut2 = pool.acquire()
conn1, conn2 = yield from asyncio.gather(fut1, fut2,
loop=self.loop)
self.assertEqual(2, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual({conn1, conn2}, pool._used)
pool.release(conn1)
self.assertEqual(2, pool.size)
self.assertEqual(1, pool.freesize)
self.assertEqual({conn2}, pool._used)
pool.release(conn2)
self.assertEqual(2, pool.size)
self.assertEqual(2, pool.freesize)
self.assertFalse(conn1.closed)
self.assertFalse(conn2.closed)
conn3 = yield from pool.acquire()
self.assertIs(conn3, conn1)
pool.release(conn3)
self.loop.run_until_complete(go())
def test_parallel_tasks_more(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0, maxsize=3)
fut1 = pool.acquire()
fut2 = pool.acquire()
fut3 = pool.acquire()
conn1, conn2, conn3 = yield from asyncio.gather(fut1, fut2, fut3,
loop=self.loop)
self.assertEqual(3, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual({conn1, conn2, conn3}, pool._used)
pool.release(conn1)
self.assertEqual(3, pool.size)
self.assertEqual(1, pool.freesize)
self.assertEqual({conn2, conn3}, pool._used)
pool.release(conn2)
self.assertEqual(3, pool.size)
self.assertEqual(2, pool.freesize)
self.assertEqual({conn3}, pool._used)
self.assertFalse(conn1.closed)
self.assertFalse(conn2.closed)
pool.release(conn3)
self.assertEqual(3, pool.size)
self.assertEqual(3, pool.freesize)
self.assertFalse(pool._used)
self.assertFalse(conn1.closed)
self.assertFalse(conn2.closed)
self.assertFalse(conn3.closed)
conn4 = yield from pool.acquire()
self.assertIs(conn4, conn1)
pool.release(conn4)
self.loop.run_until_complete(go())
def test_default_event_loop(self):
asyncio.set_event_loop(self.loop)
@asyncio.coroutine
def go():
pool = yield from self.create_pool(no_loop=True)
self.assertIs(pool._loop, self.loop)
self.loop.run_until_complete(go())
def test_cursor(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with (yield from pool.cursor()) as cur:
yield from cur.execute('SELECT 1')
ret = yield from cur.fetchone()
self.assertEqual((1,), ret)
self.assertTrue(cur.closed)
self.loop.run_until_complete(go())
@mock.patch("aiopg.pool.logger")
def test_release_with_invalid_status(self, m_log):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
self.assertEqual({conn}, pool._used)
cur = yield from conn.cursor()
yield from cur.execute('BEGIN')
cur.close()
pool.release(conn)
self.assertEqual(9, pool.freesize)
self.assertFalse(pool._used)
self.assertTrue(conn.closed)
m_log.warning.assert_called_with(
"Invalid transaction status on released connection: %d",
TRANSACTION_STATUS_INTRANS)
self.loop.run_until_complete(go())
def test__fill_free(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=1)
with (yield from pool):
self.assertEqual(0, pool.freesize)
self.assertEqual(1, pool.size)
conn = yield from asyncio.wait_for(pool.acquire(),
timeout=0.5,
loop=self.loop)
self.assertEqual(0, pool.freesize)
self.assertEqual(2, pool.size)
pool.release(conn)
self.assertEqual(1, pool.freesize)
self.assertEqual(2, pool.size)
self.assertEqual(2, pool.freesize)
self.assertEqual(2, pool.size)
self.loop.run_until_complete(go())
def test_connect_from_acquire(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0)
self.assertEqual(0, pool.freesize)
self.assertEqual(0, pool.size)
with (yield from pool):
self.assertEqual(1, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual(1, pool.size)
self.assertEqual(1, pool.freesize)
self.loop.run_until_complete(go())
def test_create_pool_with_timeout(self):
@asyncio.coroutine
def go():
timeout = 0.1
pool = yield from self.create_pool(timeout=timeout)
self.assertEqual(timeout, pool.timeout)
conn = yield from pool.acquire()
self.assertEqual(timeout, conn.timeout)
pool.release(conn)
self.loop.run_until_complete(go())
def test_cursor_with_timeout(self):
@asyncio.coroutine
def go():
timeout = 0.1
pool = yield from self.create_pool()
with (yield from pool.cursor(timeout=timeout)) as cur:
self.assertEqual(timeout, cur.timeout)
self.loop.run_until_complete(go())
def test_concurrency(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=2, maxsize=4)
c1 = yield from pool.acquire()
c2 = yield from pool.acquire()
self.assertEqual(0, pool.freesize)
self.assertEqual(2, pool.size)
pool.release(c1)
pool.release(c2)
self.loop.run_until_complete(go())
def test_invalid_minsize_and_maxsize(self):
@asyncio.coroutine
def go():
with self.assertRaises(ValueError):
yield from self.create_pool(minsize=-1)
with self.assertRaises(ValueError):
yield from self.create_pool(minsize=5, maxsize=2)
self.loop.run_until_complete(go())
def test_true_parallel_tasks(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0, maxsize=1)
self.assertEqual(1, pool.maxsize)
self.assertEqual(0, pool.minsize)
self.assertEqual(0, pool.size)
self.assertEqual(0, pool.freesize)
maxsize = 0
minfreesize = 100
def inner():
nonlocal maxsize, minfreesize
maxsize = max(maxsize, pool.size)
minfreesize = min(minfreesize, pool.freesize)
conn = yield from pool.acquire()
maxsize = max(maxsize, pool.size)
minfreesize = min(minfreesize, pool.freesize)
yield from asyncio.sleep(0.01, loop=self.loop)
pool.release(conn)
maxsize = max(maxsize, pool.size)
minfreesize = min(minfreesize, pool.freesize)
yield from asyncio.gather(inner(), inner(),
loop=self.loop)
self.assertEqual(1, maxsize)
self.assertEqual(0, minfreesize)
self.loop.run_until_complete(go())
def test_cannot_acquire_after_closing(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
pool.close()
with self.assertRaises(RuntimeError):
yield from pool.acquire()
self.loop.run_until_complete(go())
def test_wait_closed(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
c1 = yield from pool.acquire()
c2 = yield from pool.acquire()
self.assertEqual(10, pool.size)
self.assertEqual(8, pool.freesize)
ops = []
@asyncio.coroutine
def do_release(conn):
yield from asyncio.sleep(0, loop=self.loop)
pool.release(conn)
ops.append('release')
@asyncio.coroutine
def wait_closed():
yield from pool.wait_closed()
ops.append('wait_closed')
pool.close()
yield from asyncio.gather(wait_closed(),
do_release(c1),
do_release(c2),
loop=self.loop)
self.assertEqual(['release', 'release', 'wait_closed'], ops)
self.assertEqual(0, pool.freesize)
self.loop.run_until_complete(go())
def test_echo(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(echo=True)
self.assertTrue(pool.echo)
with (yield from pool) as conn:
self.assertTrue(conn.echo)
self.loop.run_until_complete(go())
def test_terminate_with_acquired_connections(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
pool.terminate()
yield from pool.wait_closed()
self.assertTrue(conn.closed)
self.loop.run_until_complete(go())
def test_release_closed_connection(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
conn.close()
pool.release(conn)
self.loop.run_until_complete(go())
def test_wait_closing_on_not_closed(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with self.assertRaises(RuntimeError):
yield from pool.wait_closed()
self.loop.run_until_complete(go())
def test_release_terminated_pool(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
pool.terminate()
yield from pool.wait_closed()
pool.release(conn)
self.loop.run_until_complete(go())
def test_close_with_acquired_connections(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
yield from pool.acquire()
pool.close()
with self.assertRaises(asyncio.TimeoutError):
yield from asyncio.wait_for(pool.wait_closed(),
0.1, loop=self.loop)
self.loop.run_until_complete(go())
@unittest.skipIf(sys.version_info < (3, 4),
"Python 3.3 doesnt support __del__ calls from GC")
def test___del__(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
self.pool = None # drop reference
with self.assertWarns(ResourceWarning):
del pool
self.loop.run_until_complete(go())
def test_unlimited_size(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(maxsize=0)
self.assertEqual(10, pool.minsize)
self.assertIsNone(pool._free.maxlen)
self.loop.run_until_complete(go())
|
|
# -----------------------------------------------------------------------------
# A module that builds core objects, i.e. stacks and laminates
# Stack() : an dict of the order laminate layers.
# Laminate() : pandas objects including laminate dimensions and calculations
# flake8 constructs.py --ignore=E265,E501,N802,H806
import traceback
import itertools as it
import collections as ct
import pandas as pd
import numpy as np
from lamana import theories
from lamana.utils import tools as ut
from lamana.lt_exceptions import IndeterminateError
# =============================================================================
# STACK -----------------------------------------------------------------------
# =============================================================================
# Classes related to stack creation, layer ordering. Precursor to Snapshot.
class Stack(object):
'''Build a StackTuple object containing stack-related methods.
We need to go from a 3 item Geometry object to n-sized stack of labeled
layers. Two operations are performed:
1. Decode the Geometry object into a list of lamina thicknesses and
types "unfolded" (or mirrored) across the physical neutral axis.
2. Identify the unfolded geometry to build a StackTuple - a namedtuple
containing a dict of the stacking order, the number or plies, the
official stack name and alias.
Variables
=========
FeatureInput : dict or Geometry object
Use 'Geometry key to 'extract the GeometryTuple (converted geometry string)
Can directly accept a Geometry object
Methods
=======
decode_geometry --> generator
Iterating forward and backward over the Geometry object.
identify_geometry --> namedtuple
Lists are converted to dicts.
add_materials --> dict
Materials can be later added to a stack dict.
stack_to_df --> DataFrame
Convert the stack to a DataFrame.
Object
======
StackTuple : namedtuple; (dict, int, str, str)
Contains order, nplies, name, alias.
See Also
========
collections.namedtuple : special tuple in the Python Standard Library.
'''
def __init__(self, FeatureInput):
try:
# If passed 'Geometry' is actually a dict (FeatureInput)
self.Geometry = FeatureInput['Geometry']
except(TypeError):
'''TEST geometry object'''
self.Geometry = FeatureInput # if a Geometry object
decoded = self.decode_geometry(self.Geometry)
self.unfolded = list(decoded) # used in tests
'''Recalled because generator exhausts decoded. Improve.'''
decoded = self.decode_geometry(self.Geometry)
self.StackTuple = self.identify_geometry(decoded) # namedtuple of (stack, nplies, name, alias)
def decode_geometry(self, Geometry):
'''Return a generator that decodes the Geometry object.
Interprets the stacking order and yields a tuple of the lamina type
(ltype) and thickness.
A Geometry object has a .geometry attribute returning a namedtuple of
laminae thicknesses labeled:
- ['outer', 'inner', 'middle', 'symmetric'] # symmetry convention
- ['outer', 'inner', 'middle'] # general conventions
This function makes a generator that checks for symmetry, then iterates
forward and backward over the Geometry tuple ("unfolding" the tuple).
If the Symmetric Convention is detected, the tuples are converted
to General Convention by popping the 'symmetric' list element.
Performance
===========
%timeit decode_geometry(G) (with lists) : 10.3 us
%timeit decode_geometry(G) (with generators): 529 ns
%timeit decode_geometry(G) (with generators; 2nd refactor): 1.4 us
%timeit [layer_ for layer_ in decode_geometry(G)] (generators to lists): 57.1 us
%timeit [layer_ for layer_ in decode_geometry(G)] (generators to lists; 2nd refactor): 107 us
Example
=======
>>> G = la.input_.Geometry('400-[200]-800')
>>> G
Geometry object('400-[200]-800')
>>> decoded = decode_geometry(G)
>>> decoded
<generator object>
>>>[tupled for tupled in decode_geometry(G)]
[('outer', 400.0),
('inner', 200.0),
('middle', 800.0),
('inner', 200.0),
('outer', 400.0)]
'''
def get_decoded(): # procedure
'''Iterate forward and backward for each type of layer.'''
# Forward: outer, inner_i, middle ...
for ltype, thickness in listify_layer(Geometry):
#yield from process_layer(ltype, thickness)
'''DEV: Converted for Python 2.7; see latter for Py 3.x'''
for layer_ in process_layer(ltype, thickness):
yield layer_
# Reverse: ... inner_i, outer
for ltype, thickness in reversed(listify_layer(Geometry)[:-1]):
#yield from process_layer(ltype, thickness, reverse=True)
'''DEV: Converted for Python 2.7; see latter for Py 3.x'''
for layer_ in process_layer(ltype, thickness, reverse=True):
yield layer_
def listify_layer(Geometry): # pure function 1
'''Convert Geometry namedtuple to a list of tuples; pops symmetric entry'''
layers = list(Geometry.geometry._asdict().items())
if Geometry.is_symmetric: # clean up last element; see namedtuple of symmetric Geometry
layers.pop()
'''Add to verbose mode.'''
#print('Symmetry detected in Geometry object. Conforming to General Convention...')
return layers
def process_layer(ltype, thickness, reverse=False): # pure function 2
'''Get items out of inner_i thickness list and unfold Geometry stack.
Reverse inner_i list if set True.'''
if isinstance(thickness, list) & (reverse is False): # parse inner_i list for forward iteration
for inner in thickness:
yield (ltype, inner)
elif isinstance(thickness, list) & (reverse is True): # reverse inner_i list for reverse iteration
for inner in reversed(thickness):
yield (ltype, inner)
elif ltype == 'middle' and Geometry.is_symmetric:
yield (ltype, thickness * 2)
else:
yield (ltype, thickness)
return get_decoded()
def identify_geometry(self, decoded):
'''Return a namedtuple containing preliminary stack information.
This function iterates a generator of decoded geometry information.
Specifically, this information is the lamina type (ltype) and thickness.
A stack is built from this info by exclusion principle: only include
non-zero thick laminae to the stack. The number of plies (nplies),
name and alias (if special) are then determined.
Variables
=========
decoded : generator; tuples
Decoded Geometry object, containing tuples of thickness and layer_ type.
Stacking order is preserved; result of Stack.decode_geometry().
Returns
=======
StackTuple : namedtuple; (dict, int, str, str)
- order: (dict) of the layer_ number as keys and decoded geometry values
- nplies: (int) number of plies
- name: (str) name of laminate
- alias: (str) common name
Performance
===========
geo_input = '400-200-800
G = la.input_.Geometry(geo_input)
decoded = decode_geometry(G)
%timeit identify_geometry(decoded) (with lists): 950 us
%timeit identify_geometry(decoded) (with generators): 935 us ?
Example
=======
>>> geo_input = ('400-[200]-800')
>>> G = la.input_.Geometry(geo_input)
>>> identify_geometry(decode_geometry(G))
StackTuple(order=defaultdict(<class 'list'>,
{1: ['outer', 400.0], 2: ['inner', 200.0],
3: ['middle', 800.0], 4: ['inner', 200.0],
5: ['outer', 400.0]}),
nplies=5, name='5-ply', alias='Standard')
'''
# Dict of Aliases for Special Geometries
alias_dict = {
1: 'Monolith',
2: 'Bilayer',
3: 'Trilayer',
4: 'Quadlayer',
5: 'Standard',
}
StackTuple = ct.namedtuple('StackTuple', ['order', 'nplies', 'name', 'alias'])
order = ct.defaultdict(list) # subs empty {}
'''Is there a way to replace this nested counter with something pythonic?'''
layer_ = 0 # nested counter
for (ltype, thickness) in decoded:
#print(ltype, thickness)
# Exclude Zero layers from the Stack
if thickness != 0.0:
layer_ += 1 # updates only for non-zero thickness laminae
order[layer_].append(ltype) # adds tuple elements into the defaultdicts list
order[layer_].append(thickness) # ...
nplies = layer_ # updates, but last layer_ is retained in nplies
name = '{0}{1}'.format(nplies, '-ply')
if nplies in alias_dict.keys():
alias = alias_dict[nplies]
else:
alias = None
return StackTuple(order, nplies, name, alias)
@classmethod
def add_materials(cls, stack, materials):
'''Return a defaultdict of the stack with extended material values.
Uses the Cycler which alternates while iterating the materials list,
keeping count. Once the counter reaches the number of plies,
the loop breaks.
Variables
=========
stack : dict
Layer numbers as keys and layer type/thickness as values.
Material are appended to list values.
materials : list
User input materials either parsed by default in distributions
module or overridden by the user.
Examples
========
>>> import lamana as la
>>> from lamana.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> # Get a stack dict
>>> stack_object = la.constructs.Stack(dft.FeatureInput)
>>> stack_dict = stack_object.StackTuple.order
>>> stack_dict
defaultdict(<class 'list'>,
{1: ['outer', 400.0], 2: ['inner', 200.0],
3: ['middle', 800.0], 4: ['inner', 200.0],
5: ['outer', 400.0]})
>>> # Extend the stack dict by adding materials to the list values
>>> stack = la.constructs.Stack(dft.FeatureInput)
>>> stack_extended = stack_object.add_materials(stack_dict, ['HA', 'PSu'])
>>> stack_extended
defaultdict(<class 'list'>,
{1: ['outer', 400.0, 'HA'], 2: ['inner', 200.0, 'PSu'],
3: ['middle', 800.0, 'HA'], 4: ['inner', 200.0, 'PSu'],
5: ['outer', 400.0, 'HA']})
'''
'''Move this handling and df conversion/extraction to get_FeatureInput'''
##n_materials = len(materials)
nplies = len(stack)
#print('stack materials ', materials)
# Cycler : alternate while iterating a list and add to a dict
for ind, material in enumerate(it.cycle(materials), 1):
#print('material index:', ind)
#print('materials:', material)
clean_values = []
clean_values.extend(stack[ind]) # take extant stack
clean_values.append(material) # add new value
stack[ind] = clean_values
if ind == nplies:
'''Add to verbose mode.'''
#print('Stack materials have been updated.')
return stack
@classmethod
def stack_to_df(cls, stack):
'''Return a DataFrame of converted stacks with materials (list of dicts).'''
df = pd.DataFrame(stack).T
df.reset_index(level=0, inplace=True) # reset index; make new column
df.columns = ['layer', 'type', 't(um)', 'matl'] # rename columns
recolumned = ['layer', 'matl', 'type', 't(um)']
df = ut.set_column_sequence(df, recolumned) # uses ext. f(x)
df[['t(um)']] = df[['t(um)']].astype(float) # reset numeric dtypes
return df
# =============================================================================
# LAMINATES -------------------------------------------------------------------
# =============================================================================
# Create LaminateModel objects
class Laminate(Stack):
'''Generate a LaminateModel object. Stores several representations.
Laminate inherits from Stack. A FeatureInput is passed in from a certain
"Feature" module and exchanged between constructs and theories modules.
Changes from legacy definitions marked with "*".
Objects
=======
- Snapshot : stack of unique layers (1, 2, ..., n), single rows and ID columns.
- LFrame : snapshot with multiple rows including Dimensional Data.
- LMFrame :LFrame w/Dimensional and Data variables via theories.Model data.
Variable Types
==============
ID
--
layer_ : int
Enumerates layers from bottom, tensile side up.
side_ : str
Side of the stress state; tensile (bottom) or compressive (top).
type_ : str
Type of layer; outer, inner or middle.
*matl_ : str
Type of material.
t_ : float
Total thickness per layer.
Dimensional
-----------
label_ : str
Type of point; *interfacial, internal or discontinuity.
h_ : float
Lamina thickness for all lamina except middle layers (half thickness).
d_ : float
Distance from the bottom layer; and shakes with calculations in
`theories.Model` and used in testing. Units (m).
intf_ : int
Enumerates an interfaces from tensile side up.
*k_ : float
Lamina height level used in `Wilson_LT` - 1.
Z_ : float
Distance from the neutral axis to an interface (or sub-interface p).
z_ : float
Distance from the neutral axis to the lamina midplane (or sub-midplane_p).
Model
-----
... : ...
Defined by the user in a models module; related to Laminate Theory,
i.e. Q11, Q12, D11, D12, ..., stress, strain, etc.
Properties
==========
p : float
Number of rows per layer for a given laminate.
total : float
Total laminate thickness (in m).
max_stress : Series
View of max principal stresses per layer.
min_stress : Series
View of min principal stresses per layer.
is_special : bool
Return True if nplies < 5, i.e Monolith, Bilayer, Trilayer, 4-ply.
has_discont : bool
Return True if discontinuity points are found in a DataFrame.
has_neutaxis : bool
Return True if a row is found labeled 'neut. axis'.
Example
=======
>>> from lamana.models import Wilson_LT as wlt
>>> import lamana as la
>>> dft = wlt.Defaults()
>>> FeatureInput = dft.FeatureInput
>>> FeatureInput['Geometry'] = la.input_.Geometry('400-[200]-800')
>>> la.constructs.Laminate(FeatureInput)
<lamana LaminateModel object (400.0-[200.0]-800.0)>
See Also
--------
theories.Model : handles user defined Laminate Theory models
models : directory containing package models
'''
def __init__(self, FeatureInput):
super(Laminate, self).__init__(FeatureInput)
# Parse FeatureInput
self.FeatureInput = FeatureInput.copy() # for preserving FI in each Case
self.Geometry = FeatureInput['Geometry']
self.load_params = FeatureInput['Parameters']
self.mat_props = FeatureInput['Properties']
self.materials = FeatureInput['Materials']
self.model = FeatureInput['Model']
#print('constructs material attr:', self.materials)
# Parse Stack Object
st = Stack(FeatureInput)
self.stack_order = st.StackTuple.order
self.nplies = st.StackTuple.nplies
self.name = st.StackTuple.name
self.alias = st.StackTuple.alias # new in 0.4.3c5a
# Laminate Objects
self.Snapshot = [] # df object; stack
self.LFrame = [] # df of IDs; formerly Laminate_
#self.Model = theories.Model() # Model object
##self.Model = None
self.LMFrame = [] # df object; modded stack
self._type_cache = []
##self._dict_trim = {} # holder of pandas-less __dict__
#-----------------------------------------------------------------------
# LaminateModel Instance Updates # the heart of Laminate()
self._build_snapshot()
self._build_laminate() # Phase 1
self._update_columns() # Phase 2 & 3
# LaminateModel Attributes
if type(self.LMFrame) != list:
self.Middle = self.LMFrame[self.LMFrame['type'] == 'middle']
self.Inner_i = self.LMFrame[self.LMFrame['type'] == 'inner']
self.Outer = self.LMFrame[self.LMFrame['type'] == 'outer']
self.compressive = self.LMFrame[self.LMFrame['side'] == 'Comp.']
self.tensile = self.LMFrame[self.LMFrame['side'] == 'Tens.']
else:
raise AttributeError("Unable to set attributes to LMFrame.")
def __repr__(self):
return '<lamana LaminateModel object ({}), p={}>'.format(self.Geometry.__str__(),
self.p)
def __eq__(self, other):
if isinstance(other, self.__class__):
# Auto check attrs if assigned to DataFrames/Series, then add to list
blacklisted = [attr for attr in self.__dict__ if
isinstance(getattr(self, attr), (pd.DataFrame, pd.Series))]
# Check DataFrames and Series
for attrname in blacklisted:
ndf_eq = ut.ndframe_equal(getattr(self, attrname),
getattr(other, attrname))
# Ignore pandas objects; check rest of __dict__ and build trimmed dicts
# Important to blacklist the trimmed dict from looping in __dict__
blacklisted.append('_dict_trim') # prevent infinite loop
self._dict_trim = {
key: value
for key, value in self.__dict__.items()
if key not in blacklisted}
other._dict_trim = {
key: value
for key, value in other.__dict__.items()
if key not in blacklisted}
return ndf_eq and self._dict_trim == other._dict_trim # order is important
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
'''Allow set comparisons.
The only required property is that objects which compare equal
have the same hash value (REF 035). self.__dict__ is unhashable
due to the inner list. So a copy is made called _geometry_hash
of GeometryTuple with tupled inner instead.'''
return hash((self.Geometry, self.p))
def _build_snapshot(self):
'''Build a quick, skeletal view of the stack (Snapshot).
Assign materials and stress states to self.stack_order.
'''
stack_extended = Stack.add_materials(self.stack_order, self.materials)
#print(stack_extended)
self.Snapshot = Stack.stack_to_df(stack_extended)
self.Snapshot = Laminate._set_stresses(self.Snapshot)
# PHASE 1
def _build_laminate(self):
'''Build a primitive laminate from a stack. Three steps:
1. Adopt the Snapshot and extend it with more rows.
2. Define Lamina layers by types and multiple rows.
3. Glue lamina together to make one DataFrame.
'''
df_snap = self.Snapshot.copy()
p = self.FeatureInput['Parameters']['p']
# Replicate Multiple Rows by p
df = pd.concat([df_snap]*p)
df.sort(axis=0, inplace=True)
df.reset_index(drop=True, inplace=True)
df = Laminate._set_stresses(df)
#print(df)
# Build Laminate with Classes
layers = df.groupby('layer')
self._type_cache = layers['type'].unique()
self._type_cache.apply(str) # converts to str class, not str alone
self.LFrame = df # retains copy of partial Laminate (IDs & Dimensionals)
def _update_columns(self):
'''Update LFrame with columns of Dimensional and Data values.'''
# PHASE 2
def _update_dimensions(LFrame):
'''Update Laminate DataFrame with new dimensional columns.
This function takes a primitive LFrame (converted Stack) and adds
columns: label, h(m), d(m), intf, k, Z(m), z(m), z(m)*
A number of pandas-like implementations are performed to achieve this.
So the coding has a different approach and feel.
Variables
=========
LFrame : DataFrame
A primitive Laminate DateFrame containing ID columns.
'''
# For Implementation
nplies = self.nplies
p = self.p
t_total = self.total
#print('nplies: {}, p: {}, t_total (m): {}'.format(nplies, p, t_total))
df = LFrame.copy()
# WRANGLINGS --------------------------------------------------------------
# Indexers ----------------------------------------------------------------
# Many dimensional values are determined by index positions.
# Revised Indexer
df['idx'] = df.index # temp. index column for idxmin & idxmax
interface_tens = df[df['side'] == 'Tens.'].groupby('layer')['idx'].idxmin()
discontinuity_tens = df[(df['side'] == 'Tens.')
& (df['type'] != 'middle')].groupby('layer')['idx'].idxmax()
discontinuity_comp = df[(df['side'] == 'Comp.')
& (df['type'] != 'middle')].groupby('layer')['idx'].idxmin()
interface_comp = df[df['side'] == 'Comp.'].groupby('layer')['idx'].idxmax()
interface_idx = pd.concat([interface_tens, interface_comp])
discont_idx = pd.concat([discontinuity_tens, discontinuity_comp])
#print(discontinuity_tens.values)
if nplies > 1:
pseudomid = [discontinuity_tens.values[-1],
discontinuity_comp.values[0]] # get disconts indices near neutral axis; for even plies
mid_idx = len(df.index) // 2
#print('middle index: ', mid_idx)
# Indexer dict of outside and inside Indices
idxs = {
'interfaces': interface_idx.values.tolist(), # for interfaces
'disconts': discont_idx.values.tolist(), # for disconts.
'middle': mid_idx, # for neut. axis
'intfTens': interface_tens.values.tolist(), # for side_ interfaces
'intfComp': interface_comp.values.tolist(),
'unboundIntfT': interface_tens.values.tolist()[1:],
'unboundIntfC': interface_comp.values.tolist()[:-1],
'disTens': discontinuity_tens.values.tolist(), # for disconts
'disComp': discontinuity_comp.values.tolist(),
}
# Masks -------------------------------------------------------------------
# Interface Mask
s = df['idx'].copy()
s[:] = False # convert series to bool values
s.loc[idxs['interfaces']] = True
mask = s # boolean mask for interfaces
# COLUMNS -----------------------------------------------------------------
# label_ ------------------------------------------------------------------
# Gives name for point types
df['label'] = np.where(mask, 'interface', 'internal') # yes!; applies values if interface, else internal
if p != 1:
df.loc[idxs['disconts'], 'label'] = 'discont.' # yes!; post-fix for disconts.
if (p % 2 != 0) & ('middle' in df['type'].values):
df.loc[idxs['middle'], 'label'] = 'neut. axis'
internal_idx = df[df['label'] == 'internal'].index.tolist() # additional indexer
# '''Add neut. axis in the middle'''
# h_ ----------------------------------------------------------------------
# Gives the thickness (in m) and height w.r.t to the neut. axis (for middle)
df['h(m)'] = df['t(um)'] * 1e-6
df.loc[df['type'] == 'middle', 'h(m)'] = df['t(um)'] * 1e-6 / 2.
if p != 1: # at disconts.
df.loc[idxs['disTens'], 'h(m)'] = df['h(m)'].shift(-1)
df.loc[idxs['disComp'], 'h(m)'] = df['h(m)'].shift(1)
# d_ ----------------------------------------------------------------------
# Gives the height for interfaces, neutral axes, disconts and internal points
# Assign Laminate Surfaces and Neutral Axis to odd p, odd nply laminates
df.loc[0, 'd(m)'] = 0 # first
df.loc[idxs['middle'], 'd(m)'] = t_total / 2. # middle
df.iloc[-1, df.columns.get_loc('d(m)')] = t_total # last
# Assign Interfaces
# Uses cumsum() for selected interfaces thickness to get d
innerhTens = df.loc[df['label'] == 'interface',
'h(m)'].shift(1)[idxs['unboundIntfT']] # shift h down, select inner interfaces
df.loc[idxs['unboundIntfT'], 'd(m)'] = 0 + np.cumsum(innerhTens)
#print(np.cumsum(innerhTens))
innerhComp = df.loc[df['label'] == 'interface',
'h(m)'].shift(-1)[idxs['unboundIntfC']] # shift h up, select inner interfaces
df.loc[idxs['unboundIntfC'],
'd(m)'] = t_total - np.cumsum(innerhComp[::-1])[::-1]
#print(t_total - np.cumsum(innerhComp[::-1])[::-1]) # inverted cumsum()
# Assign Other Points
if p > 1: # at disconts.
df.loc[idxs['disTens'], 'd(m)'] = df['d(m)'].shift(-1)
df.loc[idxs['disComp'], 'd(m)'] = df['d(m)'].shift(1)
if p > 2:
df = Laminate._make_internals(df, p, column='d(m)') # at internals
##df = _make_internals(df, p, column='d(m)') # at internals
# intf_ -------------------------------------------------------------------
# Enumerates proximal interfaces; n layer, but n+1 interfaces
df['intf'] = df.loc[:, 'layer']
df.loc[df['side'] == 'Comp.', 'intf'] += 1
if (p % 2 != 0) & (nplies % 2 != 0):
'''Need an INDET for numeric dtype. Default to Nan for now'''
##df.loc[df['label'] == 'neut. axis', 'intf'] = 'INDET'
df.loc[idxs['middle'], 'intf'] = np.nan # using indep. indexer vs. label_
# Reset the dtype to float
df[['intf']] = df[['intf']].astype(np.float64)
# k_ ----------------------------------------------------------------------
# Normally the layer number, but now tracks the ith fractional level per layer
# See definition in (Staab 197), k is is the region between k and k-1 level
# Like intf_, k_ is aware of neutral axis
# k_ == intf_ (proximal interface)
df.loc[df['label'] == 'interface',
'k'] = df.loc[df['label'] == 'interface', 'intf'] # at interfaces
##'k'] = df.loc[df['label'] == 'interface', 'intf']-1 # at interfaces
# if (p != 1) & (nplies%2 == 0):
# df.loc[pseudomid, 'k'] = (nplies/2.)+1 # hack for even mids
# #df.loc[pseudomid, 'k'] = nplies/2. # replace middle values
# Interfaces and discontinuities share the same k_
if p > 1: # at disconts.
df.loc[idxs['disTens'], 'k'] = df['k'].shift(-1)
df.loc[idxs['disComp'], 'k'] = df['k'].shift(1)
# Even plies have adjacent discontinuities at the neutral axis
if nplies % 2 == 0:
df.loc[pseudomid, 'k'] = (nplies / 2.) + 1 # hack for even mids
##df.loc[pseudomid, 'k'] = nplies / 2. # replace middle values
# Auto calculate internal divisions
if p > 2: # at internals
df = Laminate._make_internals(df, p, column='k')
##df = _make_internals(df, p, column='k')
'''Need an INDET. for numeric dtype. Default to Nan for now'''
#df.loc[df['label'] == 'neut. axis', 'k'] = 'INDET'
#df.loc[df['label'] == 'neut. axis', 'k'] = np.nan
# Odd plies have nuetral axes
if (p % 2 != 0) & (nplies % 2 != 0): # using indep. indexer vs. label_
df.loc[idxs['middle'], 'k'] = (df['k'].max() + df['k'].min()) / 2.
##df.loc[idxs['middle'], 'k'] = (df['k'].max()-df['k'].min())/2
##df.loc[idxs['middle'], 'k'] = np.nan # using indep. indexer vs. label_
# Z_ ----------------------------------------------------------------------
# Distance from ith level to the neutral access
middle = t_total / 2.
df['Z(m)'] = middle - df['d(m)']
if (nplies == 1) & (p == 1): # d_ = t_total here, so must amend
df['Z(m)'] = t_total / 2.
# z_ ----------------------------------------------------------------------
# Distance from ith Z-midplane level to the neutral access
# Two flavors are implemented for linearly and log-distributed z_ (z(m) and z(m)*)
t_mid = df.loc[df['label'] == 'interface', 'h(m)'] / 2. # for midplane calc.
df.loc[(df['label'] == 'interface') & (df['side'] == 'Tens.'),
'z(m)'] = df.loc[df['label'] == 'interface',
'Z(m)'] - t_mid # at interfaces
df.loc[(df['label'] == 'interface') & (df['side'] == 'Comp.'),
'z(m)'] = df.loc[df['label'] == 'interface',
'Z(m)'] + t_mid # at interfaces
if nplies % 2 == 0:
df.loc[pseudomid, 'z(m)'] = 0 # replace middle values
if p > 1: # at disconts.
df.loc[idxs['disTens'], 'z(m)'] = df['z(m)'].shift(-1)
df.loc[idxs['disComp'], 'z(m)'] = df['z(m)'].shift(1)
if p > 2:
# Equi-partitioned, Linear Intervals (legacy code); z(m)
df = Laminate._make_internals(df, p, column='z(m)')
##df = _make_internals(df, p, column='z(m)')
if p % 2 != 0:
##df.loc[df['label'] == 'neut. axis', 'z(m)'] = 0
df.loc[idxs['middle'], 'z(m)'] = 0 # using indep. indexer vs. label_
####
# Non-equi-partitioned Intervals; "Travelling" Midplanes; z(m)*
'''Possibly offer user options to use either method'''
lastT = df[(df['side'] == 'Tens.') & (df['type'] != 'middle')].groupby('layer')['Z(m)'].last()
lastC = df[(df['side'] == 'Comp.') & (df['type'] != 'middle')].groupby('layer')['Z(m)'].first()
last = pd.concat([lastT, lastC])
last.name = 'lasts'
joined = df.join(last, on='layer')
joined['z_intervals'] = (joined['Z(m)'] - joined['lasts']) / 2.
#print(joined)
#print(last)
df['z(m)*'] = joined['Z(m)'] - joined['z_intervals']
df.loc[df['type'] == 'middle', 'z(m)*'] = df['Z(m)'] / 2.
if (p == 1) & (nplies == 1):
df.loc[0, 'z(m)*'] = 0
####
del df['idx']
sort_columns = ['layer', 'side', 'type', 'matl', 'label', 't(um)',
'h(m)', 'd(m)', 'intf', 'k', 'Z(m)', 'z(m)', 'z(m)*']
self.LFrame = ut.set_column_sequence(df, sort_columns)
# PHASE 3
'''Remove LFrame and FeatureInput'''
def _update_calculations():
'''Update LaminateModel DataFrame and FeatureInput.
- populates stress data calculations from the selected model.
- may add Globals dict to FeatureInput.
Tries to update LaminateModel. If an exception is raised
(on the model side), no update is made, and the Laminate
(without Data columns) is set as the default LFrame.
'''
'''Need to handle general INDET detection. Roll-back to LFrame if detected.'''
try:
self.LMFrame, self.FeatureInput = theories.handshake(self,
adjusted_z=False)
except(IndeterminateError) as e:
'''Improve selecting exact Exceptions.'''
##if err in (AttributeError, ValueError, ZeroDivisionError):
print('The model raised an exception. LaminateModel not updated. LMFrame defaulting to LFrame.')
print(traceback.format_exc())
self.LMFrame = self.LFrame.copy()
'''The args are a bit awkward; replace with empty or comment dependencies'''
_update_dimensions(self.LFrame)
_update_calculations()
# Methods+ ----------------------------------------------------------------
# These methods support Phase 1
def _check_layer_order(self):
'''Cross-check stacking order with layers of the snapshot object.
Returns an abbreviated list of layer orders.
Example
=======
>>> case = la.distributions.Case(load_params, mat_props)
>>> laminate = case.apply(('400-200-800'))
>>> laminate._check_layer_order()
['O','I','M','I','O']
NOTE: Since 0.4.3c4d, _type_cache type list is replaced with ndarray.
'''
stack_types = [row for row in self.Snapshot['type']] # control
#print(stack_types)
abbrev = [letters[0][0].upper() # use if type_cache is ndarray
for letters in self._type_cache] # easier to see
assert self._type_cache.tolist() == stack_types, \
'Lamina mismatch with stack types, \
\n {} instead of \n {}'.format(self._type_cache, stack_types)
return abbrev
'''Find way replace staticmethods with class methods.'''
@classmethod
def _set_stresses(cls, df): # == side_()
'''Return updated DataFrame with stresses per side_ of neutral axis.'''
#print('Assigning stress states to sides for a given stack.')
cols = ['layer', 'side', 'matl', 'type', 't(um)']
n_rows = len(df.index)
half_the_stack = n_rows // 2
#print(half_the_stack)
n_middles = df['type'].str.contains(r'middle').sum()
#print(n_middles)
# Default
df.loc[:, 'side'] = 'None'
side_loc = df.columns.get_loc('side')
# Middle for Snapshot
if n_middles == 1:
df.iloc[half_the_stack, side_loc] = 'INDET'
# For the neutral axis
elif n_rows % 2 != 0 and n_rows != 1:
df.iloc[half_the_stack, side_loc] = 'None' # for odd p
# Other plies
'''Replace with p'''
if n_rows > 1:
df.iloc[:half_the_stack, side_loc] = 'Tens.' # applies to latest column 'side'
df.iloc[-half_the_stack:, side_loc] = 'Comp.'
df = ut.set_column_sequence(df, cols)
return df
@classmethod
def _make_internals(cls, df_mod, p, column):
'''Populate internals between a first and last index per group.
This determines the interval. See df_mod for caution. Steps:
- Make series comprising intervals for each group
- Make a temp df joining intervals of d (d_intervals) to replicate values
- Add the prior d_ row to the correl. interval for internal d_
Formulas
========
x_i = x_0 + sigma_{i=1}^p (delta * i)
inv = (x_n - x_0)/(p-1)
Variables
=========
df_mod : DataFrame
Passed in modified DataFrame. CAUTION: Assumes label_ column is
present. Also assumes interface and discont. rows are correctly
populated.
column: str
Column to assign internals.
'''
df = df_mod.copy()
internal_idx = df[df['label'] == 'internal'].index.tolist()
#print(internal_idx)
# Intervals
first = df.groupby('layer').first() # make series of intervals
last = df.groupby('layer').last()
if p == 1:
raise ZeroDivisionError('p-1. Interval cannot be calculated.')
else:
intervals = (last[column] - first[column]) / (p - 1)
intervals.name = 'intervals'
#print(intervals)
# Join Column of firsts and intervals to df
s_first = first[column]
s_first.name = 'firsts'
joined = df.join(s_first, on='layer') # x_0; join long df with short s to get equal lengths
joined = joined.join(intervals, on='layer') # join long df with short intervals for internal_sums
#print(joined)
# Calc. Interval Sums
trunc = joined[(joined['label'] != 'interface') & (
joined['label'] != 'discont.')] # remove firsts and lasts from cumsum
internal_sums = np.cumsum(
trunc.groupby('layer')['intervals']) # delta; apply sigma from algo
#print(clipped)
#print(internal_sums)
# Apply Internals to df
df.loc[internal_idx, column] = joined.loc[
internal_idx, 'firsts'] + internal_sums # although shorter, internals added to joined_df by index
if p % 2 != 0:
df.loc[df['label'] == 'neut. axis', column] = df[column].mean()
return df
###
# Attributes --------------------------------------------------------------
'''Need Laminate info property to display on repr().'''
@property
def p(self):
'''Return number of rows per layer for a given laminate; default LFrame.'''
df = self.LFrame
return df.groupby('layer').size().unique()[0]
@property
def total(self):
'''Return the total laminate thickness (in m); default LFrame.'''
df = self.LFrame
return df.groupby('layer')['t(um)'].unique().sum()[0] * 1e-6
@property
def max_stress(self):
'''Return Series view of max principal stresses per layer, ~ p = 1.'''
df = self.LMFrame
return df.loc[df['label'] == 'interface', 'stress_f (MPa/N)']
@property
def min_stress(self):
'''Return Series view of min principal stresses per layer, ~ p = 1.'''
df = self.LMFrame
if df['label'].str.contains('discont.').any():
return df.loc[df['label'] == 'discont.', 'stress_f (MPa/N)']
else:
print('Only maxima detected.')
return None
@property
def extrema(self):
'''Return DataFrame excluding internals, showing only maxima and minima.'''
df = self.LMFrame
maxima = (df['label'] == 'interface')
minima = (df['label'] == 'discont.')
return df.loc[maxima | minima, :]
'''or name recap'''
@property
def summary(self):
'''Print a summary of Laminate properties.
Variables
=========
nplies : int
number of plies
p : int
number of points per layer
...
'''
pass
# Checks ------------------------------------------------------------------
# Read from DataFrames
@property
def is_special(self):
'''Return true if nplies < 5; Monolith, Bilayer, Trilayer, 4-ply.'''
return self.nplies < 5
@property
def has_discont(self):
return self.LMFrame['label'].str.contains('discont.')
@property
def has_neutaxis(self):
return self.LMFrame['label'].str.contains('neut. axis')
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import redis_v1beta1
from google.cloud.redis_v1beta1 import enums
from google.cloud.redis_v1beta1.proto import cloud_redis_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestCloudRedisClient(object):
def test_list_instances(self):
# Setup Expected Response
next_page_token = ""
instances_element = {}
instances = [instances_element]
expected_response = {"next_page_token": next_page_token, "instances": instances}
expected_response = cloud_redis_pb2.ListInstancesResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_instances(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.instances[0] == resources[0]
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.ListInstancesRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_instances_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_instances(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_instance(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
location_id = "locationId552319461"
alternative_location_id = "alternativeLocationId-718920621"
redis_version = "redisVersion-685310444"
reserved_ip_range = "reservedIpRange-1082940580"
host = "host3208616"
port = 3446913
current_location_id = "currentLocationId1312712735"
status_message = "statusMessage-239442758"
memory_size_gb = 34199707
authorized_network = "authorizedNetwork-1733809270"
persistence_iam_identity = "persistenceIamIdentity1061944584"
expected_response = {
"name": name_2,
"display_name": display_name,
"location_id": location_id,
"alternative_location_id": alternative_location_id,
"redis_version": redis_version,
"reserved_ip_range": reserved_ip_range,
"host": host,
"port": port,
"current_location_id": current_location_id,
"status_message": status_message,
"memory_size_gb": memory_size_gb,
"authorized_network": authorized_network,
"persistence_iam_identity": persistence_iam_identity,
}
expected_response = cloud_redis_pb2.Instance(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[LOCATION]", "[INSTANCE]")
response = client.get_instance(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.GetInstanceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_instance_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup request
name = client.instance_path("[PROJECT]", "[LOCATION]", "[INSTANCE]")
with pytest.raises(CustomException):
client.get_instance(name)
def test_create_instance(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
location_id = "locationId552319461"
alternative_location_id = "alternativeLocationId-718920621"
redis_version = "redisVersion-685310444"
reserved_ip_range = "reservedIpRange-1082940580"
host = "host3208616"
port = 3446913
current_location_id = "currentLocationId1312712735"
status_message = "statusMessage-239442758"
memory_size_gb_2 = 1493816946
authorized_network = "authorizedNetwork-1733809270"
persistence_iam_identity = "persistenceIamIdentity1061944584"
expected_response = {
"name": name,
"display_name": display_name,
"location_id": location_id,
"alternative_location_id": alternative_location_id,
"redis_version": redis_version,
"reserved_ip_range": reserved_ip_range,
"host": host,
"port": port,
"current_location_id": current_location_id,
"status_message": status_message,
"memory_size_gb": memory_size_gb_2,
"authorized_network": authorized_network,
"persistence_iam_identity": persistence_iam_identity,
}
expected_response = cloud_redis_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
instance_id = "test_instance"
tier = enums.Instance.Tier.BASIC
memory_size_gb = 1
instance = {"tier": tier, "memory_size_gb": memory_size_gb}
response = client.create_instance(parent, instance_id, instance)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.CreateInstanceRequest(
parent=parent, instance_id=instance_id, instance=instance
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_create_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
instance_id = "test_instance"
tier = enums.Instance.Tier.BASIC
memory_size_gb = 1
instance = {"tier": tier, "memory_size_gb": memory_size_gb}
response = client.create_instance(parent, instance_id, instance)
exception = response.exception()
assert exception.errors[0] == error
def test_update_instance(self):
# Setup Expected Response
name = "name3373707"
display_name_2 = "displayName21615000987"
location_id = "locationId552319461"
alternative_location_id = "alternativeLocationId-718920621"
redis_version = "redisVersion-685310444"
reserved_ip_range = "reservedIpRange-1082940580"
host = "host3208616"
port = 3446913
current_location_id = "currentLocationId1312712735"
status_message = "statusMessage-239442758"
memory_size_gb_2 = 1493816946
authorized_network = "authorizedNetwork-1733809270"
persistence_iam_identity = "persistenceIamIdentity1061944584"
expected_response = {
"name": name,
"display_name": display_name_2,
"location_id": location_id,
"alternative_location_id": alternative_location_id,
"redis_version": redis_version,
"reserved_ip_range": reserved_ip_range,
"host": host,
"port": port,
"current_location_id": current_location_id,
"status_message": status_message,
"memory_size_gb": memory_size_gb_2,
"authorized_network": authorized_network,
"persistence_iam_identity": persistence_iam_identity,
}
expected_response = cloud_redis_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_update_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
paths_element = "display_name"
paths_element_2 = "memory_size_gb"
paths = [paths_element, paths_element_2]
update_mask = {"paths": paths}
display_name = "UpdatedDisplayName"
memory_size_gb = 4
instance = {"display_name": display_name, "memory_size_gb": memory_size_gb}
response = client.update_instance(update_mask, instance)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.UpdateInstanceRequest(
update_mask=update_mask, instance=instance
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_update_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
paths_element = "display_name"
paths_element_2 = "memory_size_gb"
paths = [paths_element, paths_element_2]
update_mask = {"paths": paths}
display_name = "UpdatedDisplayName"
memory_size_gb = 4
instance = {"display_name": display_name, "memory_size_gb": memory_size_gb}
response = client.update_instance(update_mask, instance)
exception = response.exception()
assert exception.errors[0] == error
def test_import_instance(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
location_id = "locationId552319461"
alternative_location_id = "alternativeLocationId-718920621"
redis_version = "redisVersion-685310444"
reserved_ip_range = "reservedIpRange-1082940580"
host = "host3208616"
port = 3446913
current_location_id = "currentLocationId1312712735"
status_message = "statusMessage-239442758"
memory_size_gb = 34199707
authorized_network = "authorizedNetwork-1733809270"
persistence_iam_identity = "persistenceIamIdentity1061944584"
expected_response = {
"name": name_2,
"display_name": display_name,
"location_id": location_id,
"alternative_location_id": alternative_location_id,
"redis_version": redis_version,
"reserved_ip_range": reserved_ip_range,
"host": host,
"port": port,
"current_location_id": current_location_id,
"status_message": status_message,
"memory_size_gb": memory_size_gb,
"authorized_network": authorized_network,
"persistence_iam_identity": persistence_iam_identity,
}
expected_response = cloud_redis_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_import_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = "name3373707"
input_config = {}
response = client.import_instance(name, input_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.ImportInstanceRequest(
name=name, input_config=input_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_import_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_import_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = "name3373707"
input_config = {}
response = client.import_instance(name, input_config)
exception = response.exception()
assert exception.errors[0] == error
def test_export_instance(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
location_id = "locationId552319461"
alternative_location_id = "alternativeLocationId-718920621"
redis_version = "redisVersion-685310444"
reserved_ip_range = "reservedIpRange-1082940580"
host = "host3208616"
port = 3446913
current_location_id = "currentLocationId1312712735"
status_message = "statusMessage-239442758"
memory_size_gb = 34199707
authorized_network = "authorizedNetwork-1733809270"
persistence_iam_identity = "persistenceIamIdentity1061944584"
expected_response = {
"name": name_2,
"display_name": display_name,
"location_id": location_id,
"alternative_location_id": alternative_location_id,
"redis_version": redis_version,
"reserved_ip_range": reserved_ip_range,
"host": host,
"port": port,
"current_location_id": current_location_id,
"status_message": status_message,
"memory_size_gb": memory_size_gb,
"authorized_network": authorized_network,
"persistence_iam_identity": persistence_iam_identity,
}
expected_response = cloud_redis_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_export_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = "name3373707"
output_config = {}
response = client.export_instance(name, output_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.ExportInstanceRequest(
name=name, output_config=output_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_export_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_export_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = "name3373707"
output_config = {}
response = client.export_instance(name, output_config)
exception = response.exception()
assert exception.errors[0] == error
def test_failover_instance(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
location_id = "locationId552319461"
alternative_location_id = "alternativeLocationId-718920621"
redis_version = "redisVersion-685310444"
reserved_ip_range = "reservedIpRange-1082940580"
host = "host3208616"
port = 3446913
current_location_id = "currentLocationId1312712735"
status_message = "statusMessage-239442758"
memory_size_gb = 34199707
authorized_network = "authorizedNetwork-1733809270"
persistence_iam_identity = "persistenceIamIdentity1061944584"
expected_response = {
"name": name_2,
"display_name": display_name,
"location_id": location_id,
"alternative_location_id": alternative_location_id,
"redis_version": redis_version,
"reserved_ip_range": reserved_ip_range,
"host": host,
"port": port,
"current_location_id": current_location_id,
"status_message": status_message,
"memory_size_gb": memory_size_gb,
"authorized_network": authorized_network,
"persistence_iam_identity": persistence_iam_identity,
}
expected_response = cloud_redis_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_failover_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[LOCATION]", "[INSTANCE]")
response = client.failover_instance(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.FailoverInstanceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_failover_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_failover_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[LOCATION]", "[INSTANCE]")
response = client.failover_instance(name)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_instance(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_delete_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[LOCATION]", "[INSTANCE]")
response = client.delete_instance(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = cloud_redis_pb2.DeleteInstanceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_delete_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = redis_v1beta1.CloudRedisClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[LOCATION]", "[INSTANCE]")
response = client.delete_instance(name)
exception = response.exception()
assert exception.errors[0] == error
|
|
__author__ = 'Vinayak Marali'
__author__ = 'Pavan Prabhakar Bhat'
"""
CSCI-603: Lab 2 (week 2)
Section 03
Author: Pavan Prbahakar Bhat (pxb8715@rit.edu)
Vinayak Marali (vkm7895@rit.edu)
This is a program that draws a forest with trees and a house.
"""
# Imports required by the program
import turtle
import random
import math
# global constants
UNIT = 50
# to store the height of trees
treeHt=[]
def init():
"""
Inititializes the window for drawing. (1200,800) is the window setup
:pre: (relative) pos(0,0), heading (east), up
:post: relative position, heading (east), up
:param: None
:return: None
"""
turtle.setup(1200, 800)
turtle.penup()
turtle.left(180)
turtle.forward(10 * UNIT)
turtle.left(90)
turtle.forward(4 * UNIT)
turtle.left(90)
def drawTrunk(size):
"""
Draws the trunl of the trees on the screen
:pre: (relative) position, heading (east), down
:post: (relative) position, heading (east), up
:param size: length of the tree trunk to be drawn
:return: None
"""
turtle.pendown()
turtle.left(90)
turtle.forward(size)
turtle.penup()
turtle.right(180)
turtle.forward(size)
turtle.left(90)
def drawSpace():
"""
Draws a space between the trees or between a house and a tree.
:pre: (relative) position, heading (east), up
:post: (relative) position, heading (east), up
:param: None
:return: None
"""
turtle.penup()
turtle.forward(2 * UNIT)
def drawTree(treeNo, isHouse, houseNo):
"""
Draws the tree on the screen
:pre: (relative) position, heading (east), up
:post: (relative) position, heading (east), up
:param treeNo: constant required to build the walls and roof of the house
:param isHouse: a boolean value which determines whether the house is required or not by the user
:param houseNo: a value required to determine the position of the house
:return: wood required to build the tree
"""
# counts the number of trees that are printed
count = 0
flag = 0
while treeNo > 0:
# Required to generate a random type of tree
randomtree = random.randint(1, 3)
if randomtree == 1:
trunkheight = UNIT * random.randint(1, 2)
treeHt.append(trunkheight)
drawTrunk(trunkheight)
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
turtle.penup()
turtle.forward(0.5 * UNIT)
turtle.pendown()
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.penup()
turtle.forward(0.5 * UNIT)
turtle.penup()
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
drawSpace()
count = count + 1
elif randomtree == 2:
trunkheight = UNIT * random.randint(1, 3)
treeHt.append(trunkheight)
drawTrunk(trunkheight)
turtle.left(90)
turtle.forward(trunkheight)
turtle.right(90)
turtle.pendown()
turtle.circle(0.5 * UNIT)
turtle.penup()
turtle.right(90)
turtle.forward(trunkheight)
turtle.left(90)
drawSpace()
count = count + 1
elif randomtree == 3:
trunkheight = UNIT * random.randint(1, 4)
treeHt.append(trunkheight)
drawTrunk(trunkheight)
turtle.pendown()
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
turtle.forward(0.5 * UNIT)
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.forward(UNIT)
turtle.right(120)
turtle.forward(0.5 * UNIT)
turtle.penup()
turtle.left(90)
turtle.forward(trunkheight)
turtle.left(90)
drawSpace()
count = count + 1
if isHouse == 'y' and count == houseNo and flag == 0:
flag = 1
hlumber = drawHouse(50)
drawSpace()
treeNo = treeNo - 1
return sum(treeHt)
def drawHouse(unit):
"""
Draws the house on the screen
:pre: (relative) pos (0,0), heading (east), down
:post: (relative) pos (0,0), heading (east), up
:param unit: constant required to build the walls and roof of the house
:return: wood required to build the house
"""
turtle.pendown()
turtle.left(90)
turtle.forward(2 * unit)
turtle.right(45)
turtle.forward(unit * math.sqrt(2))
turtle.right(90)
turtle.forward(unit * math.sqrt(2))
turtle.right(45)
turtle.forward(2 * unit)
turtle.left(90)
turtle.penup()
return 2 * unit + unit * math.sqrt(2) + unit * math.sqrt(2) + 2 * unit
def drawstar(hStar):
"""
Draws the star on the screen
:pre: (relative) pos (0,0), heading (east), down
:post: (relative) pos (0,0), heading (east), down
:param hStar: height of star
:return: None
"""
turtle.left(90)
turtle.forward(hStar)
turtle.pendown()
turtle.forward(20)
turtle.right(180)
turtle.forward(10)
turtle.left(90)
turtle.forward(10)
turtle.right(180)
turtle.forward(20)
turtle.right(180)
turtle.forward(10)
turtle.left(45)
turtle.forward(10)
turtle.right(180)
turtle.forward(20)
turtle.right(180)
turtle.forward(10)
turtle.left(90)
turtle.forward(10)
turtle.right(180)
turtle.forward(20)
def drawSun():
"""
Draws the sun on the screen
:pre: (relative) position, heading (east), down
:post: (relative) position, heading (east), down
:param: None
:return: None
"""
turtle.pendown()
turtle.circle(15)
def main():
"""
The main function.
:pre: (relative) pos (0,0), heading (east)
:post: (relative) pos (0,0), heading (east)
:return: None
"""
# the lumber required by the house
hlumber = 0
turtle.bgcolor('black')
turtle.pencolor('white')
init()
# Number of trees required by the user
treeNo = int(input('Enter the number of trees '))
isHouse = input('Is there a house in the forest (y/n)? ')
# generates the house at random locations
if isHouse == 'y':
if treeNo >=2 :
houseNo = random.randint(1, treeNo-1)
else:
print('There have to be atleast 2 trees for the house to be printed')
houseNo = 0
tlumber = drawTree(treeNo, isHouse, houseNo)
hlumber = 2 * 50 + 50 * math.sqrt(2) + 50 * math.sqrt(2) + 2 * 50
else:
tlumber = drawTree(treeNo, isHouse, 0)
# draws the star 10 pixels higher than the highest tree
hStar = max(treeHt) + UNIT + 10
drawstar(hStar)
# Total lumber from the trees and the house
lumber = hlumber + tlumber
wallht = lumber/(2 + math.sqrt(2))
input('Night is done, press enter for day')
turtle.reset()
init()
turtle.bgcolor('white')
turtle.pencolor('black')
input('We have ' + str(lumber) + ' units of lumber for the building.')
input('We will build a house with walls ' + str(wallht) + ' tall.')
drawHouse(wallht/2)
drawSpace()
turtle.left(90)
turtle.forward(wallht * 2)
drawSun()
input('Day is done, house is built, press enter to quit')
# Calling the main function
if __name__ == '__main__':
main()
|
|
#A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
#from _mx_datetime_parser import *
if (sys.byteorder == 'little'):
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict):
from multiarray import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError, "entry not a 2- or 3- tuple"
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError, "invalid offset."
format = dtype(obj[0])
if (format.itemsize == 0):
raise ValueError, "all itemsizes must be fixed."
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(lambda x,y: cmp(x[2],y[2]))
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from multiarray import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
from multiarray import METADATA_DTSTR
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
# Eliminate any key related to internal implementation
_ = new.pop(METADATA_DTSTR, None)
return (descriptor.str, new)
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re and _split were taken from numarray by J. Todd Miller
def _split(input):
"""Split the input formats string into field formats without splitting
the tuple used to specify multi-dimensional arrays."""
newlist = []
hold = ''
listinput = input.split(',')
for element in listinput:
if hold != '':
item = hold + ',' + element
else:
item = element
left = item.count('(')
right = item.count(')')
# if the parenthesis is not balanced, hold the string
if left > right :
hold = item
# when balanced, append to the output list and reset the hold
elif left == right:
newlist.append(item.strip())
hold = ''
# too many close parenthesis is unacceptable
else:
raise SyntaxError, item
# if there is string left over in hold
if hold != '':
raise SyntaxError, hold
return newlist
format_datetime = re.compile(r"""(?P<typecode>M8|m8|datetime64|timedelta64)
([[]
((?P<num>\d+)?
(?P<baseunit>Y|M|W|B|D|h|m|s|ms|us|ns|ps|fs|as)
(/(?P<den>\d+))?
[]])
(//(?P<events>\d+))?)?""", re.X)
# Return (baseunit, num, den, events), datetime
# from date-time string
def _datetimestring(astr):
res = format_datetime.match(astr)
if res is None:
raise ValueError, "Incorrect date-time string."
typecode = res.group('typecode')
datetime = (typecode == 'M8' or typecode == 'datetime64')
defaults = ['us', 1, 1, 1]
names = ['baseunit', 'num', 'den', 'events']
func = [str, int, int, int]
dt_tuple = []
for i, name in enumerate(names):
value = res.group(name)
if value:
dt_tuple.append(func[i](value))
else:
dt_tuple.append(defaults[i])
return tuple(dt_tuple), datetime
format_re = re.compile(r'(?P<order1>[<>|=]?)(?P<repeats> *[(]?[ ,0-9]*[)]? *)(?P<order2>[<>|=]?)(?P<dtype>[A-Za-z0-9.]*)')
# astr is a string (perhaps comma separated)
_convorder = {'=': _nbo,
'|': '|',
'>': '>',
'<': '<'}
def _commastring(astr):
res = _split(astr)
if (len(res)) < 1:
raise ValueError, "unrecognized formant"
result = []
for k,item in enumerate(res):
# convert item
try:
(order1, repeats, order2, dtype) = format_re.match(item).groups()
except (TypeError, AttributeError):
raise ValueError('format %s is not recognized' % item)
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder[order1]
order2 = _convorder[order2]
if (order1 != order2):
raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in ['|', '=', _nbo]:
order = ''
dtype = '%s%s' % (order, dtype)
if (repeats == ''):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from multiarray import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError, "unknown field name: %s" % (name,)
return tuple(list(order) + nameslist)
raise ValueError, "unsupported order value: %s" % (order,)
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
from multiarray import empty, dtype
dt = ary.dtype
new_dtype = [(name, dt[name]) for name in dt.names if name in fields]
if ary.flags.f_contiguous:
order = 'F'
else:
order = 'C'
newarray = empty(ary.shape, dtype=new_dtype, order=order)
for name in fields:
newarray[name] = ary[name]
return newarray
|
|
from atila import Atila
import time, math
import json
from services import route_guide_pb2
app = Atila (__name__)
app.debug = True
app.use_reloader = True
def get_feature (feature_db, point):
for feature in feature_db:
if feature.location == point:
return feature
return None
def get_distance(start, end):
coord_factor = 10000000.0
lat_1 = start.latitude / coord_factor
lat_2 = end.latitude / coord_factor
lon_1 = start.longitude / coord_factor
lon_2 = end.longitude / coord_factor
lat_rad_1 = math.radians(lat_1)
lat_rad_2 = math.radians(lat_2)
delta_lat_rad = math.radians(lat_2 - lat_1)
delta_lon_rad = math.radians(lon_2 - lon_1)
a = (pow(math.sin(delta_lat_rad / 2), 2) +
(math.cos(lat_rad_1) * math.cos(lat_rad_2) *
pow(math.sin(delta_lon_rad / 2), 2)))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
R = 6371000; # metres
return R * c
@app.route ("/RouteChat")
def RouteChat (was, new_notes):
prev_notes = []
for new_note in new_notes:
for prev_note in prev_notes:
if prev_note.location == new_note.location:
yield prev_note
prev_notes.append(new_note)
@app.route ("/RecordRoute")
def RecordRoute (self, points):
point_count = 0
feature_count = 0
distance = 0.0
prev_point = None
start_time = time.time()
for point in points:
point_count += 1
if get_feature(db, point):
feature_count += 1
if prev_point:
distance += get_distance(prev_point, point)
prev_point = point
elapsed_time = time.time() - start_time
return route_guide_pb2.RouteSummary(
point_count=point_count,
feature_count=feature_count,
distance=int(distance),
elapsed_time=int(elapsed_time)
)
@app.route ("/GetFeature")
def GetFeature (was, point):
feature = get_feature(db, point)
if feature is None:
return route_guide_pb2.Feature(name="", location=point)
else:
return feature
@app.route ("/ListFeatures")
def ListFeatures (was, rectangle):
left = min(rectangle.lo.longitude, rectangle.hi.longitude)
right = max(rectangle.lo.longitude, rectangle.hi.longitude)
top = max(rectangle.lo.latitude, rectangle.hi.latitude)
bottom = min(rectangle.lo.latitude, rectangle.hi.latitude)
for feature in db:
if (feature.location.longitude >= left and feature.location.longitude <= right and feature.location.latitude >= bottom and feature.location.latitude <= top):
yield feature
@app.route ("/test")
def test (was):
stub = was.grpc ("http://127.0.0.1:5000/routeguide.RouteGuide")
point = route_guide_pb2.Point (latitude=409146138, longitude=-746188906)
feature = stub.GetFeature (point)
rs = feature.dispatch ()
return str (rs.data)
@app.route ("/")
def index (was):
return "<h1>Route Guide<h1>"
_jsondb = """
[{
"location": {
"latitude": 407838351,
"longitude": -746143763
},
"name": "Patriots Path, Mendham, NJ 07945, USA"
}, {
"location": {
"latitude": 408122808,
"longitude": -743999179
},
"name": "101 New Jersey 10, Whippany, NJ 07981, USA"
}, {
"location": {
"latitude": 413628156,
"longitude": -749015468
},
"name": "U.S. 6, Shohola, PA 18458, USA"
}, {
"location": {
"latitude": 419999544,
"longitude": -740371136
},
"name": "5 Conners Road, Kingston, NY 12401, USA"
}, {
"location": {
"latitude": 414008389,
"longitude": -743951297
},
"name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA"
}, {
"location": {
"latitude": 419611318,
"longitude": -746524769
},
"name": "287 Flugertown Road, Livingston Manor, NY 12758, USA"
}, {
"location": {
"latitude": 406109563,
"longitude": -742186778
},
"name": "4001 Tremley Point Road, Linden, NJ 07036, USA"
}, {
"location": {
"latitude": 416802456,
"longitude": -742370183
},
"name": "352 South Mountain Road, Wallkill, NY 12589, USA"
}, {
"location": {
"latitude": 412950425,
"longitude": -741077389
},
"name": "Bailey Turn Road, Harriman, NY 10926, USA"
}, {
"location": {
"latitude": 412144655,
"longitude": -743949739
},
"name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA"
}, {
"location": {
"latitude": 415736605,
"longitude": -742847522
},
"name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA"
}, {
"location": {
"latitude": 413843930,
"longitude": -740501726
},
"name": "162 Merrill Road, Highland Mills, NY 10930, USA"
}, {
"location": {
"latitude": 410873075,
"longitude": -744459023
},
"name": "Clinton Road, West Milford, NJ 07480, USA"
}, {
"location": {
"latitude": 412346009,
"longitude": -744026814
},
"name": "16 Old Brook Lane, Warwick, NY 10990, USA"
}, {
"location": {
"latitude": 402948455,
"longitude": -747903913
},
"name": "3 Drake Lane, Pennington, NJ 08534, USA"
}, {
"location": {
"latitude": 406337092,
"longitude": -740122226
},
"name": "6324 8th Avenue, Brooklyn, NY 11220, USA"
}, {
"location": {
"latitude": 406421967,
"longitude": -747727624
},
"name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA"
}, {
"location": {
"latitude": 416318082,
"longitude": -749677716
},
"name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA"
}, {
"location": {
"latitude": 415301720,
"longitude": -748416257
},
"name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA"
}, {
"location": {
"latitude": 402647019,
"longitude": -747071791
},
"name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA"
}, {
"location": {
"latitude": 412567807,
"longitude": -741058078
},
"name": "New York State Reference Route 987E, Southfields, NY 10975, USA"
}, {
"location": {
"latitude": 416855156,
"longitude": -744420597
},
"name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA"
}, {
"location": {
"latitude": 404663628,
"longitude": -744820157
},
"name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA"
}, {
"location": {
"latitude": 407113723,
"longitude": -749746483
},
"name": ""
}, {
"location": {
"latitude": 402133926,
"longitude": -743613249
},
"name": ""
}, {
"location": {
"latitude": 400273442,
"longitude": -741220915
},
"name": ""
}, {
"location": {
"latitude": 411236786,
"longitude": -744070769
},
"name": ""
}, {
"location": {
"latitude": 411633782,
"longitude": -746784970
},
"name": "211-225 Plains Road, Augusta, NJ 07822, USA"
}, {
"location": {
"latitude": 415830701,
"longitude": -742952812
},
"name": ""
}, {
"location": {
"latitude": 413447164,
"longitude": -748712898
},
"name": "165 Pedersen Ridge Road, Milford, PA 18337, USA"
}, {
"location": {
"latitude": 405047245,
"longitude": -749800722
},
"name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA"
}, {
"location": {
"latitude": 418858923,
"longitude": -746156790
},
"name": ""
}, {
"location": {
"latitude": 417951888,
"longitude": -748484944
},
"name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA"
}, {
"location": {
"latitude": 407033786,
"longitude": -743977337
},
"name": "26 East 3rd Street, New Providence, NJ 07974, USA"
}, {
"location": {
"latitude": 417548014,
"longitude": -740075041
},
"name": ""
}, {
"location": {
"latitude": 410395868,
"longitude": -744972325
},
"name": ""
}, {
"location": {
"latitude": 404615353,
"longitude": -745129803
},
"name": ""
}, {
"location": {
"latitude": 406589790,
"longitude": -743560121
},
"name": "611 Lawrence Avenue, Westfield, NJ 07090, USA"
}, {
"location": {
"latitude": 414653148,
"longitude": -740477477
},
"name": "18 Lannis Avenue, New Windsor, NY 12553, USA"
}, {
"location": {
"latitude": 405957808,
"longitude": -743255336
},
"name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA"
}, {
"location": {
"latitude": 411733589,
"longitude": -741648093
},
"name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA"
}, {
"location": {
"latitude": 412676291,
"longitude": -742606606
},
"name": "1270 Lakes Road, Monroe, NY 10950, USA"
}, {
"location": {
"latitude": 409224445,
"longitude": -748286738
},
"name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA"
}, {
"location": {
"latitude": 406523420,
"longitude": -742135517
},
"name": "652 Garden Street, Elizabeth, NJ 07202, USA"
}, {
"location": {
"latitude": 401827388,
"longitude": -740294537
},
"name": "349 Sea Spray Court, Neptune City, NJ 07753, USA"
}, {
"location": {
"latitude": 410564152,
"longitude": -743685054
},
"name": "13-17 Stanley Street, West Milford, NJ 07480, USA"
}, {
"location": {
"latitude": 408472324,
"longitude": -740726046
},
"name": "47 Industrial Avenue, Teterboro, NJ 07608, USA"
}, {
"location": {
"latitude": 412452168,
"longitude": -740214052
},
"name": "5 White Oak Lane, Stony Point, NY 10980, USA"
}, {
"location": {
"latitude": 409146138,
"longitude": -746188906
},
"name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA"
}, {
"location": {
"latitude": 404701380,
"longitude": -744781745
},
"name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA"
}, {
"location": {
"latitude": 409642566,
"longitude": -746017679
},
"name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA"
}, {
"location": {
"latitude": 408031728,
"longitude": -748645385
},
"name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA"
}, {
"location": {
"latitude": 413700272,
"longitude": -742135189
},
"name": "367 Prospect Road, Chester, NY 10918, USA"
}, {
"location": {
"latitude": 404310607,
"longitude": -740282632
},
"name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA"
}, {
"location": {
"latitude": 409319800,
"longitude": -746201391
},
"name": "11 Ward Street, Mount Arlington, NJ 07856, USA"
}, {
"location": {
"latitude": 406685311,
"longitude": -742108603
},
"name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA"
}, {
"location": {
"latitude": 419018117,
"longitude": -749142781
},
"name": "43 Dreher Road, Roscoe, NY 12776, USA"
}, {
"location": {
"latitude": 412856162,
"longitude": -745148837
},
"name": "Swan Street, Pine Island, NY 10969, USA"
}, {
"location": {
"latitude": 416560744,
"longitude": -746721964
},
"name": "66 Pleasantview Avenue, Monticello, NY 12701, USA"
}, {
"location": {
"latitude": 405314270,
"longitude": -749836354
},
"name": ""
}, {
"location": {
"latitude": 414219548,
"longitude": -743327440
},
"name": ""
}, {
"location": {
"latitude": 415534177,
"longitude": -742900616
},
"name": "565 Winding Hills Road, Montgomery, NY 12549, USA"
}, {
"location": {
"latitude": 406898530,
"longitude": -749127080
},
"name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA"
}, {
"location": {
"latitude": 407586880,
"longitude": -741670168
},
"name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA"
}, {
"location": {
"latitude": 400106455,
"longitude": -742870190
},
"name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA"
}, {
"location": {
"latitude": 400066188,
"longitude": -746793294
},
"name": ""
}, {
"location": {
"latitude": 418803880,
"longitude": -744102673
},
"name": "40 Mountain Road, Napanoch, NY 12458, USA"
}, {
"location": {
"latitude": 414204288,
"longitude": -747895140
},
"name": ""
}, {
"location": {
"latitude": 414777405,
"longitude": -740615601
},
"name": ""
}, {
"location": {
"latitude": 415464475,
"longitude": -747175374
},
"name": "48 North Road, Forestburgh, NY 12777, USA"
}, {
"location": {
"latitude": 404062378,
"longitude": -746376177
},
"name": ""
}, {
"location": {
"latitude": 405688272,
"longitude": -749285130
},
"name": ""
}, {
"location": {
"latitude": 400342070,
"longitude": -748788996
},
"name": ""
}, {
"location": {
"latitude": 401809022,
"longitude": -744157964
},
"name": ""
}, {
"location": {
"latitude": 404226644,
"longitude": -740517141
},
"name": "9 Thompson Avenue, Leonardo, NJ 07737, USA"
}, {
"location": {
"latitude": 410322033,
"longitude": -747871659
},
"name": ""
}, {
"location": {
"latitude": 407100674,
"longitude": -747742727
},
"name": ""
}, {
"location": {
"latitude": 418811433,
"longitude": -741718005
},
"name": "213 Bush Road, Stone Ridge, NY 12484, USA"
}, {
"location": {
"latitude": 415034302,
"longitude": -743850945
},
"name": ""
}, {
"location": {
"latitude": 411349992,
"longitude": -743694161
},
"name": ""
}, {
"location": {
"latitude": 404839914,
"longitude": -744759616
},
"name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA"
}, {
"location": {
"latitude": 414638017,
"longitude": -745957854
},
"name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA"
}, {
"location": {
"latitude": 412127800,
"longitude": -740173578
},
"name": ""
}, {
"location": {
"latitude": 401263460,
"longitude": -747964303
},
"name": ""
}, {
"location": {
"latitude": 412843391,
"longitude": -749086026
},
"name": ""
}, {
"location": {
"latitude": 418512773,
"longitude": -743067823
},
"name": ""
}, {
"location": {
"latitude": 404318328,
"longitude": -740835638
},
"name": "42-102 Main Street, Belford, NJ 07718, USA"
}, {
"location": {
"latitude": 419020746,
"longitude": -741172328
},
"name": ""
}, {
"location": {
"latitude": 404080723,
"longitude": -746119569
},
"name": ""
}, {
"location": {
"latitude": 401012643,
"longitude": -744035134
},
"name": ""
}, {
"location": {
"latitude": 404306372,
"longitude": -741079661
},
"name": ""
}, {
"location": {
"latitude": 403966326,
"longitude": -748519297
},
"name": ""
}, {
"location": {
"latitude": 405002031,
"longitude": -748407866
},
"name": ""
}, {
"location": {
"latitude": 409532885,
"longitude": -742200683
},
"name": ""
}, {
"location": {
"latitude": 416851321,
"longitude": -742674555
},
"name": ""
}, {
"location": {
"latitude": 406411633,
"longitude": -741722051
},
"name": "3387 Richmond Terrace, Staten Island, NY 10303, USA"
}, {
"location": {
"latitude": 413069058,
"longitude": -744597778
},
"name": "261 Van Sickle Road, Goshen, NY 10924, USA"
}, {
"location": {
"latitude": 418465462,
"longitude": -746859398
},
"name": ""
}, {
"location": {
"latitude": 411733222,
"longitude": -744228360
},
"name": ""
}, {
"location": {
"latitude": 410248224,
"longitude": -747127767
},
"name": "3 Hasta Way, Newton, NJ 07860, USA"
}]
"""
def read_route_guide_database():
feature_list = []
for item in json.loads(_jsondb):
feature = route_guide_pb2.Feature(
name=item["name"],
location=route_guide_pb2.Point(
latitude=item["location"]["latitude"],
longitude=item["location"]["longitude"]))
feature_list.append(feature)
return feature_list
db = read_route_guide_database ()
if __name__ == "__main__":
import skitai
skitai.mount = ("/routeguide.RouteGuide", app)
skitai.run (
address = "0.0.0.0",
port = 30371
)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Glance documentation build configuration file, created by
# sphinx-quickstart on Tue May 18 13:50:15 2010.
#
# This file is execfile()'d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
import warnings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path = [
os.path.abspath('../..'),
os.path.abspath('../../bin')
] + sys.path
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'oslosphinx',
'stevedore.sphinxext',
'oslo_config.sphinxext',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'oslo_config.sphinxconfiggen',
]
config_generator_config_file = [
('../../etc/oslo-config-generator/glance-api.conf',
'_static/glance-api'),
('../../etc/oslo-config-generator/glance-cache.conf',
'_static/glance-cache'),
('../../etc/oslo-config-generator/glance-glare.conf',
'_static/glance-glare'),
('../../etc/oslo-config-generator/glance-manage.conf',
'_static/glance-manage'),
('../../etc/oslo-config-generator/glance-registry.conf',
'_static/glance-registry'),
('../../etc/oslo-config-generator/glance-scrubber.conf',
'_static/glance-scrubber'),
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Glance'
copyright = u'2010-present, OpenStack Foundation.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from glance.version import version_info as glance_version
# The full version, including alpha/beta/rc tags.
release = glance_version.version_string_with_vcs()
# The short X.Y version.
version = glance_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
#exclude_trees = ['api']
exclude_patterns = [
# The man directory includes some snippet files that are included
# in other documents during the build but that should not be
# included in the toctree themselves, so tell Sphinx to ignore
# them when scanning for input files.
'man/footer.rst',
'man/general_options.rst',
'man/openstack_options.rst',
]
# The reST default role (for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['glance.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/glanceapi', 'glance-api', u'Glance API Server',
[u'OpenStack'], 1),
('man/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner',
[u'OpenStack'], 1),
('man/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager',
[u'OpenStack'], 1),
('man/glancecacheprefetcher', 'glance-cache-prefetcher',
u'Glance Cache Pre-fetcher', [u'OpenStack'], 1),
('man/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner',
[u'OpenStack'], 1),
('man/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ',
[u'OpenStack'], 1),
('man/glancemanage', 'glance-manage', u'Glance Management Utility',
[u'OpenStack'], 1),
('man/glanceregistry', 'glance-registry', u'Glance Registry Server',
[u'OpenStack'], 1),
('man/glancereplicator', 'glance-replicator', u'Glance Replicator',
[u'OpenStack'], 1),
('man/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service',
[u'OpenStack'], 1)
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
except Exception:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'glancedoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'Glance.tex', u'Glance Documentation',
u'Glance Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for summarization.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from functools import partial
from typing import Optional
import datasets
import nltk # Here to have a nice missing dependency error message early on
import numpy as np
import tensorflow as tf
from datasets import load_dataset, load_metric
from tqdm import tqdm
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoTokenizer,
HfArgumentParser,
TFAutoModelForSeq2SeqLM,
TFTrainingArguments,
create_optimizer,
set_seed,
)
from transformers.file_utils import is_offline_mode
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# region Checking dependencies
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.18.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
# endregion
# region Arguments
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
summary_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics (rouge) on "
"(a jsonlines or csv file)."
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on " "(a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
# endregion
# region Dataset name mappings
summarization_name_mapping = {
"amazon_reviews_multi": ("review_body", "review_title"),
"big_patent": ("description", "abstract"),
"cnn_dailymail": ("article", "highlights"),
"orange_sum": ("text", "summary"),
"pn_summary": ("article", "summary"),
"psc": ("extract_text", "summary_text"),
"samsum": ("dialogue", "summary"),
"thaisum": ("body", "summary"),
"xglue": ("news_body", "news_title"),
"xsum": ("document", "summary"),
"wiki_summary": ("article", "highlights"),
}
# endregion
# region Data generator
def sample_generator(dataset, model, tokenizer, shuffle, pad_to_multiple_of=None):
if shuffle:
sample_ordering = np.random.permutation(len(dataset))
else:
sample_ordering = np.arange(len(dataset))
for sample_idx in sample_ordering:
example = dataset[int(sample_idx)]
# Handle dicts with proper padding and conversion to tensor.
example = tokenizer.pad(example, return_tensors="np", pad_to_multiple_of=pad_to_multiple_of)
example = {key: tf.convert_to_tensor(arr, dtype_hint=tf.int32) for key, arr in example.items()}
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
decoder_input_ids = model.prepare_decoder_input_ids_from_labels(
labels=tf.expand_dims(example["labels"], 0)
)
example["decoder_input_ids"] = tf.squeeze(decoder_input_ids, 0)
yield example, example["labels"] # TF needs some kind of labels, even if we don't use them
return
# endregion
# region Helper functions
def dataset_to_tf(dataset, model, tokenizer, total_batch_size, num_epochs, shuffle):
if dataset is None:
return None
train_generator = partial(sample_generator, dataset, model, tokenizer, shuffle=shuffle)
train_signature = {
feature: tf.TensorSpec(shape=(None,), dtype=tf.int32)
for feature in dataset.features
if feature != "special_tokens_mask"
}
if (
model is not None
and "decoder_input_ids" not in train_signature
and hasattr(model, "prepare_decoder_input_ids_from_labels")
):
train_signature["decoder_input_ids"] = train_signature["labels"]
# This may need to be changed depending on your particular model or tokenizer!
padding_values = {
key: tf.convert_to_tensor(tokenizer.pad_token_id if tokenizer.pad_token_id is not None else 0, dtype=tf.int32)
for key in train_signature.keys()
}
padding_values["labels"] = tf.convert_to_tensor(-100, dtype=tf.int32)
train_signature["labels"] = train_signature["input_ids"]
train_signature = (train_signature, train_signature["labels"])
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
tf_dataset = (
tf.data.Dataset.from_generator(train_generator, output_signature=train_signature)
.with_options(options)
.padded_batch(
batch_size=total_batch_size,
drop_remainder=True,
padding_values=(padding_values, np.array(-100, dtype=np.int32)),
)
.repeat(int(num_epochs))
)
return tf_dataset
# endregion
def main():
# region Argument parsing
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# endregion
# region Logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity(logging.INFO)
transformers.utils.logging.set_verbosity(logging.INFO)
# Log on each process the small summary:
logger.info(f"Training/evaluation parameters {training_args}")
# endregion
# region T5 special-casing
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# endregion
# region Detecting last checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# endregion
# Set seed before initializing model.
set_seed(training_args.seed)
# region Load datasets
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full texts and the second column for the
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
# region Load model config and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# endregion
# region Dataset preprocessing
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.")
return
# Get the column names for input/target.
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if data_args.text_column is None:
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
text_column = data_args.text_column
if text_column not in column_names:
raise ValueError(
f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.summary_column is None:
summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
summary_column = data_args.summary_column
if summary_column not in column_names:
raise ValueError(
f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[summary_column]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
else:
train_dataset = None
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
else:
eval_dataset = None
# endregion
# region Text preprocessing
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
# endregion
with training_args.strategy.scope():
# region Prepare model
model = TFAutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model.resize_token_embeddings(len(tokenizer))
# endregion
# region Prepare TF Dataset objects
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
num_replicas = training_args.strategy.num_replicas_in_sync
total_train_batch_size = training_args.per_device_train_batch_size * num_replicas
total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas
tf_train_dataset = dataset_to_tf(
train_dataset,
model,
tokenizer,
total_batch_size=total_train_batch_size,
num_epochs=training_args.num_train_epochs,
shuffle=True,
)
tf_eval_dataset = dataset_to_tf(
eval_dataset,
model,
tokenizer,
total_eval_batch_size,
num_epochs=1,
shuffle=False,
)
# endregion
# region Optimizer, loss and LR scheduling
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = len(train_dataset) // total_train_batch_size
num_train_steps = training_args.num_train_epochs * num_update_steps_per_epoch
optimizer, lr_schedule = create_optimizer(
init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=0
)
def masked_sparse_categorical_crossentropy(y_true, y_pred):
# We clip the negative labels to 0 to avoid NaNs appearing in the output and
# fouling up everything that comes afterwards. The loss values corresponding to clipped values
# will be masked later anyway, but even masked NaNs seem to cause overflows for some reason.
# 1e6 is chosen as a reasonable upper bound for the number of token indices - in the unlikely
# event that you have more than 1 million tokens in your vocabulary, consider increasing this value.
# More pragmatically, consider redesigning your tokenizer.
losses = tf.keras.losses.sparse_categorical_crossentropy(
tf.clip_by_value(y_true, 0, int(1e6)), y_pred, from_logits=True
)
# Compute the per-sample loss only over the unmasked tokens
losses = tf.ragged.boolean_mask(losses, y_true != -100)
losses = tf.reduce_mean(losses, axis=-1)
return losses
# endregion
# region Metric
metric = load_metric("rouge")
# endregion
# region Training
model.compile(loss={"logits": masked_sparse_categorical_crossentropy}, optimizer=optimizer)
if training_args.do_train:
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {training_args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size = {total_train_batch_size}")
logger.info(f" Total optimization steps = {num_train_steps}")
model.fit(
tf_train_dataset,
epochs=int(training_args.num_train_epochs),
steps_per_epoch=num_update_steps_per_epoch,
)
# endregion
# region Validation
if data_args.val_max_target_length is None:
data_args.val_max_target_length = data_args.max_target_length
gen_kwargs = {
"max_length": data_args.val_max_target_length if data_args is not None else config.max_length,
"num_beams": data_args.num_beams,
}
if training_args.do_eval:
logger.info("Evaluation...")
for batch, labels in tqdm(
tf_eval_dataset, total=len(eval_dataset) // training_args.per_device_eval_batch_size
):
batch.update(gen_kwargs)
generated_tokens = model.generate(**batch)
if isinstance(generated_tokens, tuple):
generated_tokens = generated_tokens[0]
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
result = metric.compute(use_stemmer=True)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
result = {k: round(v, 4) for k, v in result.items()}
logger.info(result)
# endregion
if training_args.output_dir is not None:
model.save_pretrained(training_args.output_dir)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import os
import tct
import sys
from tct import deepget, logstamp_finegrained
params = tct.readjson(sys.argv[1])
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
toolname = params['toolname']
toolname_pure = params['toolname_pure']
toolchain_name = facts['toolchain_name']
workdir = params['workdir']
loglist = result['loglist'] = result.get('loglist', [])
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Helper functions
# --------------------------------------------------
def lookup(D, *keys, **kwdargs):
result = deepget(D, *keys, **kwdargs)
loglist.append((keys, result))
return result
# ==================================================
# define
# --------------------------------------------------
import time
# This is the time that we really finish
time_finished_at_2_unixtime = time.time()
time_finished_at_2 = logstamp_finegrained(
unixtime=time_finished_at_2_unixtime, fmt='%Y-%m-%d %H:%M:%S %f')
age_message = ''
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
checksum_ttl_seconds = lookup(milestones, 'checksum_ttl_seconds',
default=1)
if not checksum_ttl_seconds:
exitcode = 22
reason = 'Bad PARAMS or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('Bad PARAMS or nothing to do')
# ==================================================
# work
# --------------------------------------------------
achieved = milestones.get('assembled', [])[:]
checksum_time = milestones.get('checksum_time', 0)
masterdoc = milestones.get('masterdoc')
masterdoc_candidates = milestones.get('masterdoc_candidates', [])
masterdoc_selected = milestones.get('masterdoc_selected')
rebuild_needed = milestones.get('rebuild_needed')
talk = milestones.get('talk', 1)
time_started_at = milestones.get('time_started_at', '')
time_started_at_unixtime = milestones.get('time_started_at_unixtime', 0)
if milestones.get('package_file'):
achieved.append('package')
if milestones.get('publish_dir_buildinfo'):
achieved.append('buildinfo')
achieved.sort()
cmdline_reportlines = milestones.get('cmdline_reportlines', [])
if talk:
indent = ' '
print()
print(lookup(milestones, 'buildsettings', 'project', default='PROJECT'),
lookup(milestones, 'buildsettings', 'version', default='VERSION'),
os.path.split(milestones.get('makedir', 'MAKEDIR'))[1],
sep=' : ', end='\n')
print(indent,
'makedir ',
milestones.get('makedir', 'MAKEDIR'), sep='', end='\n')
print(indent,
time_started_at,
', took: ', '%4.2f seconds' % (time_finished_at_2_unixtime
- time_started_at_unixtime),
', toolchain: ', toolchain_name, sep='')
age_seconds = time_finished_at_2_unixtime - checksum_time
age_message = "age %3.1f of %3.1f hours" % (age_seconds / 3600.,
checksum_ttl_seconds / 3600.)
age_message += ", %3.1f of %3.1f days" % (age_seconds / 3600. / 24.,
checksum_ttl_seconds / 3600.
/ 24.)
if rebuild_needed:
cause = 'because of '
if milestones.get('rebuild_needed_because_of_change'):
cause += 'change'
elif milestones.get('rebuild_needed_because_of_age'):
cause += 'age'
elif milestones.get('rebuild_needed_run_command'):
cause += 'parameter'
elif milestones.get('rebuild_needed_tctconfig'):
cause += 'config'
else:
cause += '???'
print(indent, 'REBUILD_NEEDED ', cause, ', ', age_message, sep='')
print(indent, 'OK: ', ', '.join(achieved), sep='')
else:
print(indent, 'still ok, ', age_message, sep='')
print()
if cmdline_reportlines:
for line in cmdline_reportlines:
print(indent, line, sep='')
print()
if talk > 1:
if exitcode == CONTINUE:
if talk > 1:
if achieved:
s = ', '.join(sorted(achieved))
else:
s = 'nothing'
print("Produced: %s" % s)
if exitcode == CONTINUE:
if talk > 1:
duration = ''
if time_started_at_unixtime and time_finished_at_2_unixtime:
duration = 'duration: %4.2f seconds' % (
time_finished_at_2_unixtime - time_started_at_unixtime)
print(time_finished_at_2, duration)
if 1:
if not masterdoc:
print('ATTENTION:\n'
'\n'
' No documentation found! No documentation rendered!\n'
'\n'
' Reason: None of the possible starting files (called \n'
' "masterdoc") could not be found. Please provide at\n'
' least one of the following. They will be taken into\n'
' account in this order of preference:\n')
for i, masterdoc_name in enumerate(masterdoc_candidates):
print(' %s. %s' % (i+1, masterdoc_name))
print('\n'
' Find more information at '
'https://docs.typo3.org/typo3cms/HowToDocument/\n')
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if time_finished_at_2:
result['MILESTONES'].append({'time_finished_at_2': time_finished_at_2})
if time_finished_at_2_unixtime:
result['MILESTONES'].append({
'time_finished_at_2_unixtime': time_finished_at_2_unixtime})
if 'html' in milestones.get('builds_successful', []):
# 0 means: Toolchain did finish and 'html' was build
result['MILESTONES'].append({'FINAL_EXITCODE': 0})
print(
' ------------------------------------------------\n'
' FINAL STATUS is: SUCCESS (exitcode 0)\n'
' because HTML builder succeeded')
else:
print(
' ------------------------------------------------\n'
' FINAL STATUS is: FAILURE (exitcode 255)\n'
' because HTML builder failed')
if (not milestones.get("disable_include_files_check")
and not milestones.get("included_files_check_is_ok")
):
print(
' ------------------------------------------------\n'
' An attempt was made to include a file external to the project.\n'
' This prevents any build.')
print(' ------------------------------------------------')
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <weitian@aaronly.me>
# MIT License
#
"""
Make/simulate the X-ray photon list from the object's image and
spectral models.
The simulated X-ray photon list will be used to simulate the
Suzaku event observation by ``xissim`` tool.
This script is intended to replace and extend the abilities of the
``mkphlist`` tool.
NOTE
----
The environment variable ``HEADAS`` should be set in order to help
locate the ``PyXspec`` module and XSPEC shared libraries.
References
----------
* mkphlist: https://heasarc.gsfc.nasa.gov/lheasoft/ftools/headas/mkphlist.txt
* xissim: https://heasarc.gsfc.nasa.gov/lheasoft/ftools/headas/xissim.txt
* PyXspec: https://heasarc.gsfc.nasa.gov/xanadu/xspec/python/html/index.html
Example Configuration File
-----------------------------------------------------------------------
# image to determine the photon counts distribution
image: imgbox800_e500-7000_sm.fits
# region (annuli below) center; in "image" coordinate
center: [400, 399]
nh: 0.03 # 1e22 [cm^-2]
redshift: 0.0137
# simulated photon energy range [keV]
erange: [0.3, 10.0]
# number of energy bins (logarithmic)
ebins: 1000
# total photon counts that will be generated
counts: 300000
# exposure [ks]
exposure: 50
# a set of annular regions, with several pie regions inside each
# annulus; each pie region can have a different spectral model.
regions:
# annulus 1, with 3 pies
- radius: [0, 100]
angle: [0, 120, 200]
temperature: [1.0, 1.5, 2.0]
abundance: [0.5, 1.0, 1.5]
weight: [1, 2, 1.5]
# annulus 2, with 3 pies
- radius: [100, 200]
angle: [0, 90, 250]
temperature: [0.5, 1.0, 1.5]
abundance: [1.5, 2.0, 1.0]
weight: [0.5, 1, 1.5]
# annulus 3, with 4 pies
- radius: [200, 400]
angle: [50, 150, 220, 300]
temperature: [0.8, 1.2, 1.5, 1.3]
abundance: [1.1, 2.0, 1.5, 1.2]
weight: [0.2, 1.5, 0.7, 2]
clobber: True
outfiles:
photons_table: photons.fits
counts_map: counts_map.fits
temperature_map: temperature_map.fits
abundance_map: abundance_map.fits
-----------------------------------------------------------------------
"""
import os
import sys
try:
headas = os.environ["HEADAS"]
healib = os.path.join(headas, "lib")
except KeyError:
raise ValueError("env variable 'HEADAS' not set")
if ("LD_LIBRARY_PATH" not in os.environ) or (
os.environ["LD_LIBRARY_PATH"].find(healib) < 0):
os.environ["LD_LIBRARY_PATH"] = ":".join([
healib, os.environ.get("LD_LIBRARY_PATH", "")
])
try:
# Hack the ``LD_LIBRARY_PATH`` to import Xspec
# Credit: https://stackoverflow.com/a/25457751/4856091
print("sys.argv:", sys.argv)
os.execv(sys.argv[0], sys.argv)
except Exception:
print("ERROR: failed to re-exec with new LD_LIBRARY_PATH")
raise
sys.path.append(os.path.join(healib, "python"))
import xspec
print("Imported XSPEC!")
import argparse
import logging
from pprint import pprint
import yaml
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
logging.basicConfig(level=logging.INFO,
format="[%(levelname)s:%(lineno)d] %(message)s")
logger = logging.getLogger()
class Pie:
"""
Pie region
"""
def __init__(self, xc, yc, rin, rout, abegin, aend):
self.xc = xc
self.yc = yc
self.rin = rin
self.rout = rout
self.abegin = abegin # [deg] beginning angle
self.aend = aend # [deg] ending angle (may be > 360)
# spectral model parameters
self._modelpars = {}
@staticmethod
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = 180 + np.rad2deg(np.arctan2(y, x)) # 0-360 [deg]
return (rho, phi)
def make_mask(self, shape):
try:
nrow, ncol = shape
except TypeError:
nrow = ncol = shape
# HACK: to make the masks consistent with ``rand_position()``
ix = self.xc - np.arange(ncol)
iy = self.yc - np.arange(nrow)
mx, my = np.meshgrid(ix, iy)
rho, phi = self.cart2pol(mx, my)
mask_rho = (rho >= self.rin) & (rho <= self.rout)
mask_phi = (phi >= self.abegin) & (phi <= self.aend)
if self.aend > 360:
mask_phi |= (phi <= (self.aend-360))
mask = mask_rho & mask_phi
return mask
def rand_position(self, n=None):
if n is None:
n = self.modelpar("counts")
theta = np.random.uniform(low=self.abegin, high=self.aend, size=n)
r = np.sqrt(np.random.uniform(low=self.rin**2, high=self.rout**2,
size=n))
x = r * np.cos(np.deg2rad(theta)) + self.xc
y = r * np.sin(np.deg2rad(theta)) + self.yc
return (x, y)
def modelpar(self, key=None, value=None):
if key is None:
return self._modelpars
elif value is None:
return self._modelpars.get(key)
else:
self._modelpars[key] = value
def set_model(self, nh, redshift):
model = xspec.Model("wabs*apec")
model.wabs.nH = nh
model.apec.Redshift = redshift
model.apec.kT = self.modelpar("temperature")
model.apec.Abundanc = self.modelpar("abundance")
self._model = model
def rand_photons(self, n=None):
if n is None:
n = self.modelpar("counts")
model = self._model
mvalues = np.array(model.values(0), dtype=float) # len: ebins
p = mvalues / mvalues.sum()
menergies = np.array(model.energies(0), dtype=float) # len: ebins+1
mebins = np.sqrt(menergies[1:] * menergies[:-1])
photons = np.random.choice(mebins, size=n, p=p)
return photons # [keV]
class Regions:
"""
Configured regions
"""
def __init__(self, configs):
self.configs = configs
self.xc, self.yc = configs["center"]
@property
def rmax(self):
rmax = 0
for annulus in self.configs["regions"]:
rin, rout = annulus["radius"]
if rmax < rout:
rmax = rout
return rmax
def make_mask(self, shape):
try:
nrow, ncol = shape
except TypeError:
nrow = ncol = shape
ix = np.arange(ncol) - self.xc
iy = np.arange(nrow) - self.yc
mx, my = np.meshgrid(ix, iy)
rho = np.sqrt(mx**2 + my**2)
mask = (rho <= self.rmax)
return mask
@property
def regions(self):
reg_all = []
for annulus in self.configs["regions"]:
reg_annulus = []
rin, rout = annulus["radius"]
abegin = annulus["angle"]
aend = abegin[1:] + [abegin[0]+360]
npie = len(abegin)
temperature = annulus["temperature"]
abundance = annulus["abundance"]
weight = annulus.get("weight", [1]*npie)
for i in range(npie):
pie = Pie(xc=self.xc, yc=self.yc, rin=rin, rout=rout,
abegin=abegin[i], aend=aend[i])
pie.modelpar("temperature", temperature[i])
pie.modelpar("abundance", abundance[i])
pie.modelpar("weight", weight[i])
reg_annulus.append(pie)
reg_all.append(reg_annulus)
return reg_all
def pixel2world(x, y, wcs):
pix = np.column_stack([x, y])
world = wcs.wcs_pix2world(pix, 0)
ra = world[:, 0]
dec = world[:, 1]
return (ra, dec) # [deg]
def main():
parser = argparse.ArgumentParser(
description="Make/simulate X-ray photon list for Suzaku simulation")
parser.add_argument("config", help="configuration file in YAML format")
args = parser.parse_args()
configs = yaml.load(open(args.config))
logger.info("Load configuration file: %s" % args.config)
logger.info("Configurations:")
pprint(configs)
# Update XSPEC settings
emin, emax = configs["erange"] # [keV]
ebins = configs["ebins"]
xspec.AllModels.setEnergies("%.1f %.1f %d log" % (emin, emax, ebins))
logger.info("Energy range: [%.1f, %.1f] [keV]" % (emin, emax))
logger.info("Energy: %d logarithmic channels" % ebins)
with fits.open(configs["image"]) as f:
header = f[0].header
image = f[0].data
shape = image.shape
logger.info("Image size: %dx%d" % (shape[1], shape[0]))
wcs = WCS(header)
regions = Regions(configs)
reg_all = regions.regions
mask_all = regions.make_mask(shape=shape)
weight_all = np.sum(image[mask_all])
counts_all = configs["counts"]
logger.info("Total counts: %d" % counts_all)
logger.info("nH: %.4f [1e22 cm^-2]" % configs["nh"])
logger.info("Redshift: %.5f" % configs["redshift"])
exposure = configs["exposure"] * 1e3 # [s]
logger.info("Exposure time: %.1f [s]" % exposure)
logger.info("Determining photons counts in each region ...")
counts_sum = 0
for i, annulus in enumerate(reg_all):
for j, pie in enumerate(annulus):
label = "annu#%d/pie#%d" % (i+1, j+1)
mask = pie.make_mask(shape=shape)
pixels = np.sum(mask)
weight = np.sum(image[mask]) * pie.modelpar("weight")
counts = int(counts_all * weight / weight_all)
counts_sum += counts
pie.modelpar("pixels", pixels)
pie.modelpar("counts", counts)
logger.info("%s: %d pixels, %d photons" % (label, pixels, counts))
logger.info("Determined counts sum: %d" % counts_sum)
logger.info("Adjusting total counts -> %d" % counts_all)
for i, annulus in enumerate(reg_all):
for j, pie in enumerate(annulus):
label = "annu#%d/pie#%d" % (i+1, j+1)
counts_old = pie.modelpar("counts")
counts_new = round(counts_old * counts_all / counts_sum)
pie.modelpar("counts", counts_new)
logger.info("%s: adjusted photon counts: %d -> %d" %
(label, counts_old, counts_new))
# Output files
temp_map = np.zeros_like(image)
abund_map = np.zeros_like(image)
counts_map = np.zeros_like(image)
weights_map = np.zeros_like(image)
photonlist = []
for i, annulus in enumerate(reg_all):
for j, pie in enumerate(annulus):
label = "annu#%d/pie#%d" % (i+1, j+1)
pie.set_model(nh=configs["nh"], redshift=configs["redshift"])
mask = pie.make_mask(shape=shape)
temp = pie.modelpar("temperature")
abund = pie.modelpar("abundance")
counts = pie.modelpar("counts")
logger.info("%s: kT=%.2f, Z=%.2f, %d photons" %
(label, temp, abund, counts))
logger.info("%s: sampling photon positions ..." % label)
x, y = pie.rand_position(n=counts)
ra, dec = pixel2world(x, y, wcs=wcs)
logger.info("%s: sampling photon energies ..." % label)
energies = pie.rand_photons(n=counts)
time = np.random.uniform(low=0, high=exposure, size=counts)
photons = np.column_stack([time, energies, ra, dec])
photonlist.append(photons)
logger.info("%s: spatially binning photons ..." % label)
rbins = np.arange(shape[0]+1, dtype=int)
cbins = np.arange(shape[1]+1, dtype=int)
hist2d, __, __ = np.histogram2d(y, x, bins=(rbins, cbins))
counts_map += hist2d
temp_map[mask] = temp
abund_map[mask] = abund
weights_map[mask] = pie.modelpar("weight")
logger.info("Creating output FITS header ...")
header_out = fits.Header()
header_out.extend(wcs.to_header(), update=True)
header_out["CREATOR"] = os.path.basename(sys.argv[0])
header_out.add_history(" ".join(sys.argv))
logger.info("Creating photons table ...")
photons = np.row_stack(photonlist)
photons = photons[photons[:, 0].argsort()] # sort by time (1st column)
hdu = fits.BinTableHDU.from_columns([
fits.Column(name="PHOTON_TIME", format="D", unit="s",
array=photons[:, 0]),
fits.Column(name="PHOTON_ENERGY", format="E", unit="keV",
array=photons[:, 1]),
fits.Column(name="RA", format="E", unit="deg", array=photons[:, 2]),
fits.Column(name="DEC", format="E", unit="deg", array=photons[:, 3]),
], header=header_out)
hdu.name = "PHOTON_LIST"
outfile = configs["outfiles"]["photons_table"]
hdu.writeto(outfile, overwrite=configs["clobber"])
logger.info("Wrote photons table to: %s" % outfile)
data = np.stack([counts_map, weights_map], axis=0)
hdu = fits.PrimaryHDU(data=data, header=header_out)
outfile = configs["outfiles"]["counts_map"]
hdu.writeto(outfile, overwrite=configs["clobber"])
logger.info("Wrote counts/weights map to: %s" % outfile)
#
hdu = fits.PrimaryHDU(data=temp_map, header=header_out)
outfile = configs["outfiles"]["temperature_map"]
hdu.writeto(outfile, overwrite=configs["clobber"])
logger.info("Wrote temperature map to: %s" % outfile)
#
hdu = fits.PrimaryHDU(data=abund_map, header=header_out)
outfile = configs["outfiles"]["abundance_map"]
hdu.writeto(outfile, overwrite=configs["clobber"])
logger.info("Wrote abundance map to: %s" % outfile)
if __name__ == "__main__":
main()
|
|
"""Config flow to configure Philips Hue."""
import asyncio
import json
import os
from aiohue.discovery import discover_nupnp
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .bridge import get_bridge
from .const import DOMAIN, LOGGER
from .errors import AuthenticationRequired, CannotConnect
HUE_MANUFACTURERURL = "http://www.philips.com"
@callback
def configured_hosts(hass):
"""Return a set of the configured hosts."""
return set(
entry.data["host"] for entry in hass.config_entries.async_entries(DOMAIN)
)
def _find_username_from_config(hass, filename):
"""Load username from config.
This was a legacy way of configuring Hue until Home Assistant 0.67.
"""
path = hass.config.path(filename)
if not os.path.isfile(path):
return None
with open(path) as inp:
try:
return list(json.load(inp).values())[0]["username"]
except ValueError:
# If we get invalid JSON
return None
@config_entries.HANDLERS.register(DOMAIN)
class HueFlowHandler(config_entries.ConfigFlow):
"""Handle a Hue config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the Hue flow."""
self.host = None
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_init(user_input)
async def async_step_init(self, user_input=None):
"""Handle a flow start."""
if user_input is not None:
self.host = user_input["host"]
return await self.async_step_link()
websession = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(5):
bridges = await discover_nupnp(websession=websession)
except asyncio.TimeoutError:
return self.async_abort(reason="discover_timeout")
if not bridges:
return self.async_abort(reason="no_bridges")
# Find already configured hosts
configured = configured_hosts(self.hass)
hosts = [bridge.host for bridge in bridges if bridge.host not in configured]
if not hosts:
return self.async_abort(reason="all_configured")
if len(hosts) == 1:
self.host = hosts[0]
return await self.async_step_link()
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({vol.Required("host"): vol.In(hosts)}),
)
async def async_step_link(self, user_input=None):
"""Attempt to link with the Hue bridge.
Given a configured host, will ask the user to press the link button
to connect to the bridge.
"""
errors = {}
# We will always try linking in case the user has already pressed
# the link button.
try:
bridge = await get_bridge(self.hass, self.host, username=None)
return await self._entry_from_bridge(bridge)
except AuthenticationRequired:
errors["base"] = "register_failed"
except CannotConnect:
LOGGER.error("Error connecting to the Hue bridge at %s", self.host)
errors["base"] = "linking"
except Exception: # pylint: disable=broad-except
LOGGER.exception(
"Unknown error connecting with Hue bridge at %s", self.host
)
errors["base"] = "linking"
# If there was no user input, do not show the errors.
if user_input is None:
errors = {}
return self.async_show_form(step_id="link", errors=errors)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered Hue bridge.
This flow is triggered by the SSDP component. It will check if the
host is already configured and delegate to the import step if not.
"""
from homeassistant.components.ssdp import ATTR_MANUFACTURERURL
if discovery_info[ATTR_MANUFACTURERURL] != HUE_MANUFACTURERURL:
return self.async_abort(reason="not_hue_bridge")
# Filter out emulated Hue
if "HASS Bridge" in discovery_info.get("name", ""):
return self.async_abort(reason="already_configured")
# pylint: disable=unsupported-assignment-operation
host = self.context["host"] = discovery_info.get("host")
if any(host == flow["context"]["host"] for flow in self._async_in_progress()):
return self.async_abort(reason="already_in_progress")
if host in configured_hosts(self.hass):
return self.async_abort(reason="already_configured")
# This value is based off host/description.xml and is, weirdly, missing
# 4 characters in the middle of the serial compared to results returned
# from the NUPNP API or when querying the bridge API for bridgeid.
# (on first gen Hue hub)
serial = discovery_info.get("serial")
return await self.async_step_import(
{
"host": host,
# This format is the legacy format that Hue used for discovery
"path": "phue-{}.conf".format(serial),
}
)
async def async_step_homekit(self, homekit_info):
"""Handle HomeKit discovery."""
# pylint: disable=unsupported-assignment-operation
host = self.context["host"] = homekit_info.get("host")
if any(host == flow["context"]["host"] for flow in self._async_in_progress()):
return self.async_abort(reason="already_in_progress")
if host in configured_hosts(self.hass):
return self.async_abort(reason="already_configured")
return await self.async_step_import({"host": host})
async def async_step_import(self, import_info):
"""Import a new bridge as a config entry.
Will read authentication from Phue config file if available.
This flow is triggered by `async_setup` for both configured and
discovered bridges. Triggered for any bridge that does not have a
config entry yet (based on host).
This flow is also triggered by `async_step_discovery`.
If an existing config file is found, we will validate the credentials
and create an entry. Otherwise we will delegate to `link` step which
will ask user to link the bridge.
"""
host = import_info["host"]
path = import_info.get("path")
if path is not None:
username = await self.hass.async_add_job(
_find_username_from_config, self.hass, self.hass.config.path(path)
)
else:
username = None
try:
bridge = await get_bridge(self.hass, host, username)
LOGGER.info("Imported authentication for %s from %s", host, path)
return await self._entry_from_bridge(bridge)
except AuthenticationRequired:
self.host = host
LOGGER.info("Invalid authentication for %s, requesting link.", host)
return await self.async_step_link()
except CannotConnect:
LOGGER.error("Error connecting to the Hue bridge at %s", host)
return self.async_abort(reason="cannot_connect")
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unknown error connecting with Hue bridge at %s", host)
return self.async_abort(reason="unknown")
async def _entry_from_bridge(self, bridge):
"""Return a config entry from an initialized bridge."""
# Remove all other entries of hubs with same ID or host
host = bridge.host
bridge_id = bridge.config.bridgeid
same_hub_entries = [
entry.entry_id
for entry in self.hass.config_entries.async_entries(DOMAIN)
if entry.data["bridge_id"] == bridge_id or entry.data["host"] == host
]
if same_hub_entries:
await asyncio.wait(
[
self.hass.config_entries.async_remove(entry_id)
for entry_id in same_hub_entries
]
)
return self.async_create_entry(
title=bridge.config.name,
data={"host": host, "bridge_id": bridge_id, "username": bridge.username},
)
|
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import random
import re
from collections import defaultdict
import threading
import socket
import json
import sys
import ipaddress
import asyncio
from typing import NamedTuple, Optional, Sequence, List, Dict, Tuple
import traceback
import dns
import dns.resolver
import aiorpcx
from aiorpcx import TaskGroup
from aiohttp import ClientResponse
from . import util
from .util import (log_exceptions, ignore_exceptions,
bfh, SilentTaskGroup, make_aiohttp_session, send_exception_to_crash_reporter,
is_hash256_str, is_non_negative_integer)
from .bitcoin import COIN
from . import constants
from . import blockchain
from . import bitcoin
from .blockchain import Blockchain, HEADER_SIZE
from .interface import (Interface, serialize_server, deserialize_server,
RequestTimedOut, NetworkTimeout, BUCKET_NAME_OF_ONION_SERVERS)
from .version import PROTOCOL_VERSION
from .simple_config import SimpleConfig
from .i18n import _
from .logging import get_logger, Logger
_logger = get_logger(__name__)
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
NUM_TARGET_CONNECTED_SERVERS = 10
NUM_RECENT_SERVERS = 20
def parse_servers(result: Sequence[Tuple[str, str, List[str]]]) -> Dict[str, dict]:
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match(r"[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match(r"p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_noonion(servers):
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
def filter_protocol(hostmap, protocol='s'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap=None, protocol='s', exclude_set=None):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
if exclude_set is None:
exclude_set = set()
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
class NetworkParameters(NamedTuple):
host: str
port: str
protocol: str
proxy: Optional[dict]
auto_connect: bool
oneserver: bool = False
proxy_modes = ['socks4', 'socks5']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s: str) -> Optional[dict]:
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
# FIXME raw IPv6 address fails here
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
class BestEffortRequestFailed(Exception): pass
class TxBroadcastError(Exception):
def get_message_for_gui(self):
raise NotImplementedError()
class TxBroadcastHashMismatch(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}\n\n{}" \
.format(_("The server returned an unexpected transaction ID when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Electrum."),
str(self))
class TxBroadcastServerReturnedError(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}\n\n{}" \
.format(_("The server returned an error when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Electrum."),
str(self))
class TxBroadcastUnknownError(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}" \
.format(_("Unknown error when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Electrum."))
class UntrustedServerReturnedError(Exception):
def __init__(self, *, original_exception):
self.original_exception = original_exception
def __str__(self):
return _("The server returned an error.")
def __repr__(self):
return f"<UntrustedServerReturnedError original_exception: {repr(self.original_exception)}>"
INSTANCE = None
class Network(Logger):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
"""
LOGGING_SHORTCUT = 'n'
def __init__(self, config: SimpleConfig=None):
global INSTANCE
INSTANCE = self
Logger.__init__(self)
self.asyncio_loop = asyncio.get_event_loop()
assert self.asyncio_loop.is_running(), "event loop not running"
self._loop_thread = None # type: threading.Thread # set by caller; only used for sanity checks
if config is None:
config = {} # Do not use mutables as default values!
self.config = SimpleConfig(config) if isinstance(config, dict) else config # type: SimpleConfig
blockchain.read_blockchains(self.config)
self.logger.info(f"blockchains {list(map(lambda b: b.forkpoint, blockchain.blockchains.values()))}")
self._blockchain_preferred_block = self.config.get('blockchain_preferred_block', None) # type: Optional[Dict]
self._blockchain = blockchain.get_best_chain()
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.logger.warning('failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.main_taskgroup = None # type: TaskGroup
# locks
self.restart_lock = asyncio.Lock()
self.bhi_lock = asyncio.Lock()
self.callback_lock = threading.Lock()
self.recent_servers_lock = threading.RLock() # <- re-entrant
self.interfaces_lock = threading.Lock() # for mutating/iterating self.interfaces
self.server_peers = {} # returned by interface (servers that the main interface knows about)
self.recent_servers = self._read_recent_servers() # note: needs self.recent_servers_lock
self.banner = ''
self.donation_address = ''
self.relay_fee = None # type: Optional[int]
# callbacks set by the GUI
self.callbacks = defaultdict(list) # note: needs self.callback_lock
dir_path = os.path.join(self.config.path, 'certs')
util.make_dir(dir_path)
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# the main server we are currently communicating with
self.interface = None # type: Interface
# set of servers we have an ongoing connection with
self.interfaces = {} # type: Dict[str, Interface]
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.server_queue = None
self.proxy = None
# Dump network messages (all interfaces). Set at runtime from the console.
self.debug = False
self._set_status('disconnected')
def run_from_another_thread(self, coro):
assert self._loop_thread != threading.current_thread(), 'must not be called from network thread'
fut = asyncio.run_coroutine_threadsafe(coro, self.asyncio_loop)
return fut.result()
@staticmethod
def get_instance() -> Optional["Network"]:
return INSTANCE
def with_recent_servers_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.recent_servers_lock:
return func(self, *args, **kwargs)
return func_wrapper
def register_callback(self, callback, events):
with self.callback_lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.callback_lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.callback_lock:
callbacks = self.callbacks[event][:]
for callback in callbacks:
# FIXME: if callback throws, we will lose the traceback
if asyncio.iscoroutinefunction(callback):
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
else:
self.asyncio_loop.call_soon_threadsafe(callback, event, *args)
def _read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
@with_recent_servers_lock
def _save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
interface = self.interface
return interface.tip if interface else 0
async def _server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.logger.info('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.logger.info(f'{self.default_server} is lagging ({sh} vs {lh})')
return result
def _set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
interface = self.interface
return interface is not None and interface.ready.done()
def is_connecting(self):
return self.connection_status == 'connecting'
async def _request_server_info(self, interface):
await interface.ready
session = interface.session
async def get_banner():
self.banner = await session.send_request('server.banner')
self.notify('banner')
async def get_donation_address():
addr = await session.send_request('server.donation_address')
if not bitcoin.is_address(addr):
if addr: # ignore empty string
self.logger.info(f"invalid donation address from server: {repr(addr)}")
addr = ''
self.donation_address = addr
async def get_server_peers():
server_peers = await session.send_request('server.peers.subscribe')
random.shuffle(server_peers)
max_accepted_peers = len(constants.net.DEFAULT_SERVERS) + NUM_RECENT_SERVERS
server_peers = server_peers[:max_accepted_peers]
self.server_peers = parse_servers(server_peers)
self.notify('servers')
async def get_relay_fee():
relayfee = await session.send_request('blockchain.relayfee')
if relayfee is None:
self.relay_fee = None
else:
relayfee = int(relayfee * COIN)
self.relay_fee = max(0, relayfee)
async with TaskGroup() as group:
await group.spawn(get_banner)
await group.spawn(get_donation_address)
await group.spawn(get_server_peers)
await group.spawn(get_relay_fee)
await group.spawn(self._request_fee_estimates(interface))
async def _request_fee_estimates(self, interface):
session = interface.session
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
async with TaskGroup() as group:
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
fee_tasks = []
for i in FEE_ETA_TARGETS:
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
self.config.mempool_fees = histogram = histogram_task.result()
self.logger.info(f'fee_histogram {histogram}')
self.notify('fee_histogram')
fee_estimates_eta = {}
for nblock_target, task in fee_tasks:
fee = int(task.result() * COIN)
fee_estimates_eta[nblock_target] = fee
if fee < 0: continue
self.config.update_fee_estimates(nblock_target, fee)
self.logger.info(f'fee_estimates {fee_estimates_eta}')
self.notify('fee')
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'servers':
value = self.get_servers()
else:
raise Exception('unexpected trigger key {}'.format(key))
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self) -> NetworkParameters:
host, port, protocol = deserialize_server(self.default_server)
return NetworkParameters(host=host,
port=port,
protocol=protocol,
proxy=self.proxy,
auto_connect=self.auto_connect,
oneserver=self.oneserver)
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self) -> List[str]:
"""The list of servers for the connected interfaces."""
with self.interfaces_lock:
return list(self.interfaces)
@with_recent_servers_lock
def get_servers(self):
# note: order of sources when adding servers here is crucial!
# don't let "server_peers" overwrite anything,
# otherwise main server can eclipse the client
out = dict()
# add servers received from main interface
server_peers = self.server_peers
if server_peers:
out.update(filter_version(server_peers.copy()))
# hardcoded servers
out.update(constants.net.DEFAULT_SERVERS)
# add recent servers
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host in out:
out[host].update({protocol: port})
else:
out[host] = {protocol: port}
# potentially filter out some
if self.config.get('noonion'):
out = filter_noonion(out)
return out
def _start_interface(self, server: str):
if server not in self.interfaces and server not in self.connecting:
if server == self.default_server:
self.logger.info(f"connecting to {server} as new interface")
self._set_status('connecting')
self.connecting.add(server)
self.server_queue.put(server)
def _start_random_interface(self):
with self.interfaces_lock:
exclude_set = self.disconnected_servers | set(self.interfaces) | self.connecting
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self._start_interface(server)
return server
def _set_proxy(self, proxy: Optional[dict]):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.logger.info(f'setting proxy {proxy}')
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
if sys.platform == 'win32':
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
# see #4421
socket.getaddrinfo = self._fast_getaddrinfo
else:
socket.getaddrinfo = socket._getaddrinfo
self.trigger_callback('proxy_set', self.proxy)
@staticmethod
def _fast_getaddrinfo(host, *args, **kwargs):
def needs_dns_resolving(host):
try:
ipaddress.ip_address(host)
return False # already valid IP
except ValueError:
pass # not an IP
if str(host) in ('localhost', 'localhost.',):
return False
return True
def resolve_with_dnspython(host):
addrs = []
# try IPv6
try:
answers = dns.resolver.query(host, dns.rdatatype.AAAA)
addrs += [str(answer) for answer in answers]
except dns.exception.DNSException as e:
pass
except BaseException as e:
_logger.info(f'dnspython failed to resolve dns (AAAA) with error: {e}')
# try IPv4
try:
answers = dns.resolver.query(host, dns.rdatatype.A)
addrs += [str(answer) for answer in answers]
except dns.exception.DNSException as e:
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN this is normal.
# Simply report back failure; except if we already have some results.
if not addrs:
raise socket.gaierror(11001, 'getaddrinfo failed') from e
except BaseException as e:
# Possibly internal error in dnspython :( see #4483
_logger.info(f'dnspython failed to resolve dns (A) with error: {e}')
if addrs:
return addrs
# Fall back to original socket.getaddrinfo to resolve dns.
return [host]
addrs = [host]
if needs_dns_resolving(host):
addrs = resolve_with_dnspython(host)
list_of_list_of_socketinfos = [socket._getaddrinfo(addr, *args, **kwargs) for addr in addrs]
list_of_socketinfos = [item for lst in list_of_list_of_socketinfos for item in lst]
return list_of_socketinfos
@log_exceptions
async def set_parameters(self, net_params: NetworkParameters):
proxy = net_params.proxy
proxy_str = serialize_proxy(proxy)
host, port, protocol = net_params.host, net_params.port, net_params.protocol
server_str = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy['mode']) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', net_params.auto_connect, False)
self.config.set_key('oneserver', net_params.oneserver, False)
self.config.set_key('proxy', proxy_str, False)
self.config.set_key('server', server_str, True)
# abort if changes were not allowed by config
if self.config.get('server') != server_str \
or self.config.get('proxy') != proxy_str \
or self.config.get('oneserver') != net_params.oneserver:
return
async with self.restart_lock:
self.auto_connect = net_params.auto_connect
if self.proxy != proxy or self.protocol != protocol or self.oneserver != net_params.oneserver:
# Restart the network defaulting to the given server
await self._stop()
self.default_server = server_str
await self._start()
elif self.default_server != server_str:
await self.switch_to_interface(server_str)
else:
await self.switch_lagging_interface()
def _set_oneserver(self, oneserver: bool):
self.num_server = NUM_TARGET_CONNECTED_SERVERS if not oneserver else 0
self.oneserver = bool(oneserver)
async def _switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
await self.switch_to_interface(random.choice(servers))
async def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.auto_connect and await self._server_is_lagging():
# switch to one that has the correct header (not height)
best_header = self.blockchain().read_header(self.get_local_height())
with self.interfaces_lock: interfaces = list(self.interfaces.values())
filtered = list(filter(lambda iface: iface.tip_header == best_header, interfaces))
if filtered:
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
async def switch_unwanted_fork_interface(self):
"""If auto_connect and main interface is not on preferred fork,
try to switch to preferred fork.
"""
if not self.auto_connect or not self.interface:
return
with self.interfaces_lock: interfaces = list(self.interfaces.values())
# try to switch to preferred fork
if self._blockchain_preferred_block:
pref_height = self._blockchain_preferred_block['height']
pref_hash = self._blockchain_preferred_block['hash']
if self.interface.blockchain.check_hash(pref_height, pref_hash):
return # already on preferred fork
filtered = list(filter(lambda iface: iface.blockchain.check_hash(pref_height, pref_hash),
interfaces))
if filtered:
self.logger.info("switching to preferred fork")
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
return
else:
self.logger.info("tried to switch to preferred fork but no interfaces are on it")
# try to switch to best chain
if self.blockchain().parent is None:
return # already on best chain
filtered = list(filter(lambda iface: iface.blockchain.parent is None,
interfaces))
if filtered:
self.logger.info("switching to best chain")
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
else:
# FIXME switch to best available?
self.logger.info("tried to switch to best chain but no interfaces are on it")
async def switch_to_interface(self, server: str):
"""Switch to server as our main interface. If no connection exists,
queue interface to be started. The actual switch will
happen when the interface becomes ready.
"""
self.default_server = server
old_interface = self.interface
old_server = old_interface.server if old_interface else None
# Stop any current interface in order to terminate subscriptions,
# and to cancel tasks in interface.group.
# However, for headers sub, give preference to this interface
# over unknown ones, i.e. start it again right away.
if old_server and old_server != server:
await self._close_interface(old_interface)
if len(self.interfaces) <= self.num_server:
self._start_interface(old_server)
if server not in self.interfaces:
self.interface = None
self._start_interface(server)
return
i = self.interfaces[server]
if old_interface != i:
self.logger.info(f"switching to {server}")
blockchain_updated = i.blockchain != self.blockchain()
self.interface = i
await i.group.spawn(self._request_server_info(i))
self.trigger_callback('default_server_changed')
self._set_status('connected')
self.trigger_callback('network_updated')
if blockchain_updated: self.trigger_callback('blockchain_updated')
async def _close_interface(self, interface):
if interface:
with self.interfaces_lock:
if self.interfaces.get(interface.server) == interface:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
await interface.close()
@with_recent_servers_lock
def _add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[:NUM_RECENT_SERVERS]
self._save_recent_servers()
async def connection_down(self, interface: Interface):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
if not interface: return
server = interface.server
self.disconnected_servers.add(server)
if server == self.default_server:
self._set_status('disconnected')
await self._close_interface(interface)
self.trigger_callback('network_updated')
def get_network_timeout_seconds(self, request_type=NetworkTimeout.Generic) -> int:
if self.oneserver and not self.auto_connect:
return request_type.MOST_RELAXED
if self.proxy:
return request_type.RELAXED
return request_type.NORMAL
@ignore_exceptions # do not kill main_taskgroup
@log_exceptions
async def _run_new_interface(self, server):
interface = Interface(self, server, self.proxy)
# note: using longer timeouts here as DNS can sometimes be slow!
timeout = self.get_network_timeout_seconds(NetworkTimeout.Generic)
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
self.logger.info(f"couldn't launch iface {server} -- {repr(e)}")
await interface.close()
return
else:
with self.interfaces_lock:
assert server not in self.interfaces
self.interfaces[server] = interface
finally:
try: self.connecting.remove(server)
except KeyError: pass
if server == self.default_server:
await self.switch_to_interface(server)
self._add_recent_server(server)
self.trigger_callback('network_updated')
def check_interface_against_healthy_spread_of_connected_servers(self, iface_to_check) -> bool:
# main interface is exempt. this makes switching servers easier
if iface_to_check.is_main_server():
return True
if not iface_to_check.bucket_based_on_ipaddress():
return True
# bucket connected interfaces
with self.interfaces_lock:
interfaces = list(self.interfaces.values())
if iface_to_check in interfaces:
interfaces.remove(iface_to_check)
buckets = defaultdict(list)
for iface in interfaces:
buckets[iface.bucket_based_on_ipaddress()].append(iface)
# check proposed server against buckets
onion_servers = buckets[BUCKET_NAME_OF_ONION_SERVERS]
if iface_to_check.is_tor():
# keep number of onion servers below half of all connected servers
if len(onion_servers) > NUM_TARGET_CONNECTED_SERVERS // 2:
return False
else:
bucket = iface_to_check.bucket_based_on_ipaddress()
if len(buckets[bucket]) > 0:
return False
return True
async def _init_headers_file(self):
b = blockchain.get_best_chain()
filename = b.path()
length = HEADER_SIZE * len(constants.net.CHECKPOINTS) * 2016
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
if length > 0:
f.seek(length-1)
f.write(b'\x00')
util.ensure_sparse_file(filename)
with b.lock:
b.update_size()
def best_effort_reliable(func):
async def make_reliable_wrapper(self, *args, **kwargs):
for i in range(10):
iface = self.interface
# retry until there is a main interface
if not iface:
await asyncio.sleep(0.1)
continue # try again
# wait for it to be usable
iface_ready = iface.ready
iface_disconnected = iface.got_disconnected
await asyncio.wait([iface_ready, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
if not iface_ready.done() or iface_ready.cancelled():
await asyncio.sleep(0.1)
continue # try again
# try actual request
success_fut = asyncio.ensure_future(func(self, *args, **kwargs))
await asyncio.wait([success_fut, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
if success_fut.done() and not success_fut.cancelled():
if success_fut.exception():
try:
raise success_fut.exception()
except RequestTimedOut:
await iface.close()
await iface_disconnected
continue # try again
return success_fut.result()
# otherwise; try again
raise BestEffortRequestFailed('no interface to do request on... gave up.')
return make_reliable_wrapper
def catch_server_exceptions(func):
async def wrapper(self, *args, **kwargs):
try:
return await func(self, *args, **kwargs)
except aiorpcx.jsonrpc.CodeMessageError as e:
raise UntrustedServerReturnedError(original_exception=e) from e
return wrapper
@best_effort_reliable
@catch_server_exceptions
async def get_merkle_for_transaction(self, tx_hash: str, tx_height: int) -> dict:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
if not is_non_negative_integer(tx_height):
raise Exception(f"{repr(tx_height)} is not a block height")
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
@best_effort_reliable
async def broadcast_transaction(self, tx, *, timeout=None) -> None:
if timeout is None:
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
try:
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout)
# note: both 'out' and exception messages are untrusted input from the server
except (RequestTimedOut, asyncio.CancelledError, asyncio.TimeoutError):
raise # pass-through
except aiorpcx.jsonrpc.CodeMessageError as e:
self.logger.info(f"broadcast_transaction error [DO NOT TRUST THIS MESSAGE]: {repr(e)}")
raise TxBroadcastServerReturnedError(self.sanitize_tx_broadcast_response(e.message)) from e
except BaseException as e: # intentional BaseException for sanity!
self.logger.info(f"broadcast_transaction error2 [DO NOT TRUST THIS MESSAGE]: {repr(e)}")
send_exception_to_crash_reporter(e)
raise TxBroadcastUnknownError() from e
if out != tx.txid():
self.logger.info(f"unexpected txid for broadcast_transaction [DO NOT TRUST THIS MESSAGE]: {out} != {tx.txid()}")
raise TxBroadcastHashMismatch(_("Server returned unexpected transaction ID."))
@staticmethod
def sanitize_tx_broadcast_response(server_msg) -> str:
# Unfortunately, bitcoind and hence the Electrum protocol doesn't return a useful error code.
# So, we use substring matching to grok the error message.
# server_msg is untrusted input so it should not be shown to the user. see #4968
server_msg = str(server_msg)
server_msg = server_msg.replace("\n", r"\n")
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/policy/policy.cpp
# grep "reason ="
policy_error_messages = {
r"version": _("Transaction uses non-standard version."),
r"tx-size": _("The transaction was rejected because it is too large (in bytes)."),
r"scriptsig-size": None,
r"scriptsig-not-pushonly": None,
r"scriptpubkey": None,
r"bare-multisig": None,
r"dust": _("Transaction could not be broadcast due to dust outputs."),
r"multi-op-return": _("The transaction was rejected because it contains multiple OP_RETURN outputs."),
}
for substring in policy_error_messages:
if substring in server_msg:
msg = policy_error_messages[substring]
return msg if msg else substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/script/script_error.cpp
script_error_messages = {
r"Script evaluated without error but finished with a false/empty top stack element",
r"Script failed an OP_VERIFY operation",
r"Script failed an OP_EQUALVERIFY operation",
r"Script failed an OP_CHECKMULTISIGVERIFY operation",
r"Script failed an OP_CHECKSIGVERIFY operation",
r"Script failed an OP_NUMEQUALVERIFY operation",
r"Script is too big",
r"Push value size limit exceeded",
r"Operation limit exceeded",
r"Stack size limit exceeded",
r"Signature count negative or greater than pubkey count",
r"Pubkey count negative or limit exceeded",
r"Opcode missing or not understood",
r"Attempted to use a disabled opcode",
r"Operation not valid with the current stack size",
r"Operation not valid with the current altstack size",
r"OP_RETURN was encountered",
r"Invalid OP_IF construction",
r"Negative locktime",
r"Locktime requirement not satisfied",
r"Signature hash type missing or not understood",
r"Non-canonical DER signature",
r"Data push larger than necessary",
r"Only non-push operators allowed in signatures",
r"Non-canonical signature: S value is unnecessarily high",
r"Dummy CHECKMULTISIG argument must be zero",
r"OP_IF/NOTIF argument must be minimal",
r"Signature must be zero for failed CHECK(MULTI)SIG operation",
r"NOPx reserved for soft-fork upgrades",
r"Witness version reserved for soft-fork upgrades",
r"Public key is neither compressed or uncompressed",
r"Extra items left on stack after execution",
r"Witness program has incorrect length",
r"Witness program was passed an empty witness",
r"Witness program hash mismatch",
r"Witness requires empty scriptSig",
r"Witness requires only-redeemscript scriptSig",
r"Witness provided for non-witness script",
r"Using non-compressed keys in segwit",
r"Using OP_CODESEPARATOR in non-witness script",
r"Signature is found in scriptCode",
}
for substring in script_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/validation.cpp
# grep "REJECT_"
# should come after script_error.cpp (due to e.g. non-mandatory-script-verify-flag)
validation_error_messages = {
r"coinbase",
r"tx-size-small",
r"non-final",
r"txn-already-in-mempool",
r"txn-mempool-conflict",
r"txn-already-known",
r"non-BIP68-final",
r"bad-txns-nonstandard-inputs",
r"bad-witness-nonstandard",
r"bad-txns-too-many-sigops",
r"mempool min fee not met",
r"min relay fee not met",
r"absurdly-high-fee",
r"too-long-mempool-chain",
r"bad-txns-spends-conflicting-tx",
r"insufficient fee",
r"too many potential replacements",
r"replacement-adds-unconfirmed",
r"mempool full",
r"non-mandatory-script-verify-flag",
r"mandatory-script-verify-flag-failed",
}
for substring in validation_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/rpc/rawtransaction.cpp
# grep "RPC_TRANSACTION"
# grep "RPC_DESERIALIZATION_ERROR"
# https://github.com/bitcoin/bitcoin/blob/d7d7d315060620446bd363ca50f95f79d3260db7/src/util/error.cpp
rawtransaction_error_messages = {
r"Missing inputs",
r"transaction already in block chain",
r"Transaction already in block chain",
r"TX decode failed",
r"Peer-to-peer functionality missing or disabled",
r"Transaction rejected by AcceptToMemoryPool",
r"AcceptToMemoryPool failed",
}
for substring in rawtransaction_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/consensus/tx_verify.cpp
# grep "REJECT_"
tx_verify_error_messages = {
r"bad-txns-vin-empty",
r"bad-txns-vout-empty",
r"bad-txns-oversize",
r"bad-txns-vout-negative",
r"bad-txns-vout-toolarge",
r"bad-txns-txouttotal-toolarge",
r"bad-txns-inputs-duplicate",
r"bad-cb-length",
r"bad-txns-prevout-null",
r"bad-txns-inputs-missingorspent",
r"bad-txns-premature-spend-of-coinbase",
r"bad-txns-inputvalues-outofrange",
r"bad-txns-in-belowout",
r"bad-txns-fee-outofrange",
}
for substring in tx_verify_error_messages:
if substring in server_msg:
return substring
# otherwise:
return _("Unknown error")
@best_effort_reliable
@catch_server_exceptions
async def request_chunk(self, height: int, tip=None, *, can_return_early=False):
if not is_non_negative_integer(height):
raise Exception(f"{repr(height)} is not a block height")
return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early)
@best_effort_reliable
@catch_server_exceptions
async def get_transaction(self, tx_hash: str, *, timeout=None) -> str:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
return await self.interface.session.send_request('blockchain.transaction.get', [tx_hash],
timeout=timeout)
@best_effort_reliable
@catch_server_exceptions
async def get_history_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.get_history', [sh])
@best_effort_reliable
@catch_server_exceptions
async def listunspent_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.listunspent', [sh])
@best_effort_reliable
@catch_server_exceptions
async def get_balance_for_scripthash(self, sh: str) -> dict:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.get_balance', [sh])
def blockchain(self) -> Blockchain:
interface = self.interface
if interface and interface.blockchain is not None:
self._blockchain = interface.blockchain
return self._blockchain
def get_blockchains(self):
out = {} # blockchain_id -> list(interfaces)
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
with self.interfaces_lock: interfaces_values = list(self.interfaces.values())
for chain_id, bc in blockchain_items:
r = list(filter(lambda i: i.blockchain==bc, interfaces_values))
if r:
out[chain_id] = r
return out
def _set_preferred_chain(self, chain: Blockchain):
height = chain.get_max_forkpoint()
header_hash = chain.get_hash(height)
self._blockchain_preferred_block = {
'height': height,
'hash': header_hash,
}
self.config.set_key('blockchain_preferred_block', self._blockchain_preferred_block)
async def follow_chain_given_id(self, chain_id: str) -> None:
bc = blockchain.blockchains.get(chain_id)
if not bc:
raise Exception('blockchain {} not found'.format(chain_id))
self._set_preferred_chain(bc)
# select server on this chain
with self.interfaces_lock: interfaces = list(self.interfaces.values())
interfaces_on_selected_chain = list(filter(lambda iface: iface.blockchain == bc, interfaces))
if len(interfaces_on_selected_chain) == 0: return
chosen_iface = random.choice(interfaces_on_selected_chain)
# switch to server (and save to config)
net_params = self.get_parameters()
host, port, protocol = deserialize_server(chosen_iface.server)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
await self.set_parameters(net_params)
async def follow_chain_given_server(self, server_str: str) -> None:
# note that server_str should correspond to a connected interface
iface = self.interfaces.get(server_str)
if iface is None:
return
self._set_preferred_chain(iface.blockchain)
# switch to server (and save to config)
net_params = self.get_parameters()
host, port, protocol = deserialize_server(server_str)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
await self.set_parameters(net_params)
def get_local_height(self):
return self.blockchain().height()
def export_checkpoints(self, path):
"""Run manually to generate blockchain checkpoints.
Kept for console use only.
"""
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
async def _start(self):
assert not self.main_taskgroup
self.main_taskgroup = main_taskgroup = SilentTaskGroup()
assert not self.interface and not self.interfaces
assert not self.connecting and not self.server_queue
self.logger.info('starting network')
self.disconnected_servers = set([])
self.protocol = deserialize_server(self.default_server)[2]
self.server_queue = queue.Queue()
self._set_proxy(deserialize_proxy(self.config.get('proxy')))
self._set_oneserver(self.config.get('oneserver', False))
self._start_interface(self.default_server)
async def main():
try:
await self._init_headers_file()
# note: if a task finishes with CancelledError, that
# will NOT raise, and the group will keep the other tasks running
async with main_taskgroup as group:
await group.spawn(self._maintain_sessions())
[await group.spawn(job) for job in self._jobs]
except Exception as e:
self.logger.exception('')
raise e
asyncio.run_coroutine_threadsafe(main(), self.asyncio_loop)
self.trigger_callback('network_updated')
def start(self, jobs: List=None):
self._jobs = jobs or []
asyncio.run_coroutine_threadsafe(self._start(), self.asyncio_loop)
@log_exceptions
async def _stop(self, full_shutdown=False):
self.logger.info("stopping network")
try:
await asyncio.wait_for(self.main_taskgroup.cancel_remaining(), timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError) as e:
self.logger.info(f"exc during main_taskgroup cancellation: {repr(e)}")
self.main_taskgroup = None # type: TaskGroup
self.interface = None # type: Interface
self.interfaces = {} # type: Dict[str, Interface]
self.connecting.clear()
self.server_queue = None
if not full_shutdown:
self.trigger_callback('network_updated')
def stop(self):
assert self._loop_thread != threading.current_thread(), 'must not be called from network thread'
fut = asyncio.run_coroutine_threadsafe(self._stop(full_shutdown=True), self.asyncio_loop)
try:
fut.result(timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError): pass
async def _ensure_there_is_a_main_interface(self):
if self.is_connected():
return
now = time.time()
# if auto_connect is set, try a different server
if self.auto_connect and not self.is_connecting():
await self._switch_to_random_interface()
# if auto_connect is not set, or still no main interface, retry current
if not self.is_connected() and not self.is_connecting():
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
await self.switch_to_interface(self.default_server)
async def _maintain_sessions(self):
async def launch_already_queued_up_new_interfaces():
while self.server_queue.qsize() > 0:
server = self.server_queue.get()
await self.main_taskgroup.spawn(self._run_new_interface(server))
async def maybe_queue_new_interfaces_to_be_launched_later():
now = time.time()
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
# FIXME this should try to honour "healthy spread of connected servers"
self._start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.logger.info('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
async def maintain_healthy_spread_of_connected_servers():
with self.interfaces_lock: interfaces = list(self.interfaces.values())
random.shuffle(interfaces)
for iface in interfaces:
if not self.check_interface_against_healthy_spread_of_connected_servers(iface):
self.logger.info(f"disconnecting from {iface.server}. too many connected "
f"servers already in bucket {iface.bucket_based_on_ipaddress()}")
await self._close_interface(iface)
async def maintain_main_interface():
await self._ensure_there_is_a_main_interface()
if self.is_connected():
if self.config.is_fee_estimates_update_required():
await self.interface.group.spawn(self._request_fee_estimates, self.interface)
while True:
try:
await launch_already_queued_up_new_interfaces()
await maybe_queue_new_interfaces_to_be_launched_later()
await maintain_healthy_spread_of_connected_servers()
await maintain_main_interface()
except asyncio.CancelledError:
# suppress spurious cancellations
group = self.main_taskgroup
if not group or group._closed:
raise
await asyncio.sleep(0.1)
@classmethod
async def _send_http_on_proxy(cls, method: str, url: str, params: str = None,
body: bytes = None, json: dict = None, headers=None,
on_finish=None, timeout=None):
async def default_on_finish(resp: ClientResponse):
resp.raise_for_status()
return await resp.text()
if headers is None:
headers = {}
if on_finish is None:
on_finish = default_on_finish
network = cls.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy, timeout=timeout) as session:
if method == 'get':
async with session.get(url, params=params, headers=headers) as resp:
return await on_finish(resp)
elif method == 'post':
assert body is not None or json is not None, 'body or json must be supplied if method is post'
if body is not None:
async with session.post(url, data=body, headers=headers) as resp:
return await on_finish(resp)
elif json is not None:
async with session.post(url, json=json, headers=headers) as resp:
return await on_finish(resp)
else:
assert False
@classmethod
def send_http_on_proxy(cls, method, url, **kwargs):
network = cls.get_instance()
if network:
assert network._loop_thread is not threading.currentThread()
loop = network.asyncio_loop
else:
loop = asyncio.get_event_loop()
coro = asyncio.run_coroutine_threadsafe(cls._send_http_on_proxy(method, url, **kwargs), loop)
# note: _send_http_on_proxy has its own timeout, so no timeout here:
return coro.result()
# methods used in scripts
async def get_peers(self):
while not self.is_connected():
await asyncio.sleep(1)
session = self.interface.session
return parse_servers(await session.send_request('server.peers.subscribe'))
async def send_multiple_requests(self, servers: List[str], method: str, params: Sequence):
responses = dict()
async def get_response(server):
interface = Interface(self, server, self.proxy)
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
await interface.close()
return
try:
res = await interface.session.send_request(method, params, timeout=10)
except Exception as e:
res = e
responses[interface.server] = res
async with TaskGroup() as group:
for server in servers:
await group.spawn(get_response(server))
return responses
|
|
# -*- coding: utf-8 -*-
# TODO: This thing is a beast! Refactor.
import cmd
import os
import sys
import time
import traceback
from typing import Optional, List, Iterator, Tuple, Dict
from pyramids.categorization import Category
from pyramids.model import Model
from pyramids.rules.parse_rule import ParseRule
try:
from graphviz import Digraph
except ImportError:
Digraph = None
from pyramids.grammar import GrammarParser
from pyramids.trees import Parse, TreeNodeSet, ParseTreeUtils
try:
# noinspection PyPep8Naming
import cProfile as profile
except ImportError:
import profile
from pyramids.batching import Attempt, Result, ModelBatchController, FeedbackReceiver, Failure
from pyramids.config import ModelConfig
from pyramids.loader import ModelLoader
from pyramids.parsing import ParsingAlgorithm
from pyramids.generation import GenerationAlgorithm
from pyramids.sample_utils import Input, Target, SampleSet, SampleUtils
__author__ = 'Aaron Hosford'
__all__ = [
'ParserCmd',
'repl',
]
function_to_profile = None
class ParserCmd(cmd.Cmd):
def __init__(self, model_loader: ModelLoader):
cmd.Cmd.__init__(self)
self._model_loader = model_loader
self._model = model_loader.load_model()
self.prompt = '% '
self._simple = True
self._show_broken = False
self._parser_state = None
self._parses = [] # type: List[Parse]
self._whole_parses = 0
self._parse_index = 0
self._fast = False
self._timeout_interval = 5
self._emergency_mode = False
self._benchmark_path = None
self._benchmark: Optional[SampleSet] = None
self._benchmark_dirty = False
self._benchmark_emergency_disambiguations = 0
self._benchmark_parse_timeouts = 0
self._benchmark_disambiguation_timeouts = 0
self._benchmark_time = 0.0
self._benchmark_tests_completed = 0
self._benchmark_update_time = time.time()
self._last_input_text = None
self.do_load()
@property
def model(self) -> Model:
return self._model
@property
def model_loader(self) -> ModelLoader:
return self._model_loader
@property
def max_parse_index(self) -> int:
if self._show_broken:
return len(self._parses) - 1 if self._parses else 0
return self._whole_parses - 1 if self._whole_parses else 0
@property
def parses_available(self) -> bool:
return bool(self._parser_state if self._show_broken else self._whole_parses)
@property
def last_input_text(self) -> Optional[str]:
return self._last_input_text
def onecmd(self, line: str) -> Optional[bool]:
# noinspection PyBroadException
try:
return cmd.Cmd.onecmd(self, line)
except Exception:
traceback.print_exc()
def precmd(self, line: str) -> str:
# Pre-processes command lines before they are executed.
line = line.strip()
if not line:
return line
command = line.split()[0]
if command == '+':
return 'good' + line[1:]
if command == '-':
return 'bad' + line[1:]
if command == '++':
return 'best' + line[2:]
if command == '--':
return 'worst' + line[2:]
return line
def postcmd(self, stop: Optional[bool], line: str) -> Optional[bool]:
# Post-processes command results before they are passed back to the
# command interpreter.
print('') # Print a blank line for clarity
return stop
def emptyline(self) -> Optional[bool]:
# Called when the user just hits enter with no input.
return self.do_next()
def default(self, line: str) -> Optional[bool]:
# Called when the command is unrecognized. By default, we assume
# it's a parse request.
return self.do_parse(line)
@staticmethod
def do_shell(line: str) -> None:
# Called when the command starts with "!".
try:
print(eval(line))
except SyntaxError:
exec(line)
def do_quit(self, line: str) -> Optional[bool]:
"""Save scoring features and exit the parser debugger."""
if line:
print("'quit' command does not accept arguments.")
return
self.do_save() # Save what we're doing first.
return True # Indicate we're ready to stop.
def do_exit(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'exit' command does not accept arguments.")
return
return self.do_quit(line)
def do_bye(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'bye' command does not accept arguments.")
return
return self.do_quit(line)
def do_done(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'done' command does not accept arguments.")
return
return self.do_quit(line)
@staticmethod
def do_cls(_line: str) -> None:
"""Clears the screen."""
if sys.platform == 'nt':
os.system('cls')
else:
os.system('clear')
@staticmethod
def do_clear(_line: str) -> None:
"""Clears the screen."""
if sys.platform == 'nt':
os.system('cls')
else:
os.system('clear')
def do_standardize(self, line: str) -> None:
"""Standardizes the parser's files."""
if not line:
if self._model and self._model.config_info:
config_info = self._model.config_info
else:
config_info = self._model_loader.load_model_config()
else:
config_info = ModelConfig(line)
self._model_loader.standardize_model(config_info)
def do_short(self, line: str) -> None:
"""Causes parses to be printed in short form instead of long form."""
if line:
print("'short' command does not accept arguments.")
return
self._simple = True
print("Parses will now be printed in short form.")
def do_broken(self, line: str) -> None:
"""Causes parses that have more pieces or gaps than necessary to be
listed."""
if line:
print("'broken' command does not accept arguments.")
return
self._show_broken = True
print("Parses with more pieces or gaps than necessary will now be listed.")
def do_whole(self, line: str) -> None:
"""Causes only parses that have no more pieces or gaps than necessary to be listed."""
if line:
print("'whole' command does not accept arguments.")
return
self._show_broken = False
self._parse_index = min(self._parse_index, self.max_parse_index)
print("Only parses with no more pieces or gaps than necessary will now be listed.")
def do_long(self, line: str) -> None:
"""Causes parses to be printed in long form instead of short form."""
if line:
print("'long' command does not accept arguments.")
return
self._simple = False
print("Parses will now be printed in long form.")
def do_fast(self, line: str) -> None:
"""Causes parsing to stop as soon as a single parse is found."""
if line:
print("'fast' command does not accept arguments.")
return
self._fast = True
print("Parsing will now stop as soon as a single parse is found.")
def do_complete(self, line: str) -> None:
"""Causes parsing to continue until all parses have been identified."""
if line:
print("'complete' command does not accept arguments.")
return
self._fast = False
print("Parsing will now continue until all parses have been identified.")
def do_load(self, line: str = '') -> None:
"""Save scoring features and load a parser from the given configuration file."""
self.do_save()
if not line:
line = self._model.config_info.config_file_path
if not os.path.isfile(line):
print("File not found: " + line)
return
config_info = ModelConfig(line)
self._model = self._model_loader.load_model(config_info)
self._parser_state = None
self._benchmark = (SampleUtils.load(config_info.benchmark_file)
if os.path.isfile(config_info.benchmark_file)
else None)
self._benchmark_dirty = False
def do_reload(self, line: str = '') -> None:
"""Save scoring features and reload the last configuration file provided."""
if line:
print("'reload' command does not accept arguments.")
return
self.do_save()
self.do_load(self._model.config_info.config_file_path
if self._model and self._model.config_info
else '')
def do_save(self, line: str = '') -> None:
"""Save scoring features."""
if line:
print("'save' command does not accept arguments.")
return
if self._model is not None:
self._model_loader.save_scoring_features(self._model)
if self._benchmark_dirty:
SampleUtils.save(self._benchmark, self._model.config_info.benchmark_file)
self._benchmark_dirty = False
def do_discard(self, line: str = '') -> None:
"""Discard scoring features."""
if line:
print("'discard' command does not accept arguments.")
return
self._model_loader.load_scoring_features(self._model)
config_info = self._model.config_info
if os.path.isfile(config_info.benchmark_file):
self._benchmark = SampleUtils.load(config_info.benchmark_file)
else:
self._benchmark = None
self._benchmark_dirty = False
@staticmethod
def do_compare(line: str) -> None:
"""Compare two categories to determine if either contains the other."""
definitions = [definition for definition in line.split() if definition]
if len(definitions) == 0:
print("Nothing to compare.")
return
if len(definitions) == 1:
print("Nothing to compare with.")
return
categories = set()
for definition in definitions:
categories.add(GrammarParser.parse_category(definition,
offset=line.find(definition) + 1))
categories = sorted(categories, key=str)
for category1 in categories:
for category2 in categories:
if category1 is not category2:
contains_phrase = [" does not contain ", " contains "][category2 in category1]
print(str(category1) + contains_phrase + str(category2))
def do_timeout(self, line: str) -> None:
"""Set (or display) the timeout duration for parsing."""
if not line:
print("Parsing timeout duration is currently " + str(self._timeout_interval) +
" seconds")
return
try:
try:
# Only bother with this because an integer looks prettier
# when printed.
self._timeout_interval = int(line)
except ValueError:
self._timeout_interval = float(line)
except ValueError:
print("Timeout duration could not be set to this value.")
else:
print("Set parsing timeout duration to " + str(self._timeout_interval) + " seconds.")
def _do_parse(self, line: str, timeout: float, new_parser_state: bool = True,
restriction_category: Category = None, fast: bool = None,
emergency: bool = False) -> Tuple[bool, bool, bool]:
if fast is None:
fast = self._fast
if new_parser_state or self._parser_state is None:
self._parser_state = ParsingAlgorithm.new_parser_state(self._model)
parse = ParsingAlgorithm.parse(self._parser_state, line, fast, timeout, emergency)
parse_timed_out = time.time() >= timeout
emergency_disambiguation = False
if restriction_category:
parse = parse.restrict(restriction_category)
self._parses = [disambiguation
for (disambiguation, rank)
in parse.get_sorted_disambiguations(None, None, timeout)]
if not self._parses:
emergency_disambiguation = True
self._parses = [parse.disambiguate()]
disambiguation_timed_out = time.time() >= timeout
self._whole_parses = len([disambiguation
for disambiguation in self._parses
if ((len(disambiguation.parse_trees)
== len(self._parses[0].parse_trees)) and
(disambiguation.total_gap_size()
== self._parses[0].total_gap_size()))])
self._parse_index = 0
self._last_input_text = line
return emergency_disambiguation, parse_timed_out, disambiguation_timed_out
def _handle_parse(self, line: str, new_parser_state: bool = True,
restriction_category: Category = None, fast: bool = None,
emergency: bool = False) -> None:
"""Handle parsing on behalf of do_parse, do_as, and do_extend."""
if not line:
print("Nothing to parse.")
return
start_time = time.time()
timeout = start_time + self._timeout_interval
emergency_disambig, parse_timed_out, disambig_timed_out = \
self._do_parse(line, timeout, new_parser_state, restriction_category, fast, emergency)
end_time = time.time()
print('')
if parse_timed_out:
print("*** Parse timed out. ***")
if disambig_timed_out:
print("*** Disambiguation timed out. ***")
if emergency_disambig:
print("*** Using emergency (non-optimal) disambiguation. ***")
print('')
print("Total parse time: " + str(
round(end_time - start_time, 3)) + " seconds")
print("Total number of parses: " + str(len(self._parses)))
print("Total number of whole parses: " + str(self._whole_parses))
print('')
self.do_current()
def do_parse(self, line: str) -> None:
"""Parse an input string and print the highest-scoring parse for it."""
self._handle_parse(line, emergency=self._emergency_mode)
def do_as(self, line: str) -> None:
"""Parse an input string as a particular category and print the
highest-scoring parse for it."""
if not line:
print("No category specified.")
return
category_definition = line.split()[0]
category = GrammarParser.parse_category(category_definition)
line = line[len(category_definition):].strip()
self._handle_parse(line, restriction_category=category, emergency=self._emergency_mode)
def do_extend(self, line: str) -> None:
"""Extend the previous input string with additional text and print the
highest-scoring parse for the combined input strings."""
self._handle_parse(line, new_parser_state=False, emergency=self._emergency_mode)
def do_files(self, line: str) -> None:
"""List the word list files containing a given word."""
if not line:
print("No word specified.")
return
if len(line.split()) > 1:
print("Expected only one word.")
return
word = line.strip()
config_info = (self._model.config_info
if self._model and self._model.config_info
else self._model_loader.load_model_config())
found = False
for folder_path in config_info.word_sets_folders:
for filename in os.listdir(folder_path):
if not filename.lower().endswith('.ctg'):
continue
file_path = os.path.join(folder_path, filename)
with open(file_path) as word_set_file:
words = set(word_set_file.read().split())
if word in words:
print(repr(word) + " found in " + file_path + ".")
found = True
if not found:
print(repr(word) + " not found in any word list files.")
def do_add(self, line: str) -> None:
"""Adds a word to a given category's word list file."""
if not line:
print("No category specified.")
return
category_definition = line.split()[0]
category = GrammarParser.parse_category(category_definition)
words_to_add = sorted(set(line[len(category_definition):].strip().split()))
if not words_to_add:
print("No words specified.")
return
config_info = (self._model.config_info
if self._model and self._model.config_info
else self._model_loader.load_model_config())
found = False
for folder_path in config_info.word_sets_folders:
for filename in os.listdir(folder_path):
if not filename.lower().endswith('.ctg'):
continue
file_category = GrammarParser.parse_category(filename[:-4])
if file_category != category:
continue
file_path = os.path.join(folder_path, filename)
with open(file_path) as word_set_file:
words = set(word_set_file.read().split())
for w in words_to_add:
if w in words:
print(repr(w) + " was already in " + file_path + ".")
else:
print("Adding " + repr(w) + " to " + file_path + ".")
words.add(w)
with open(file_path, 'w') as word_set_file:
word_set_file.write('\n'.join(sorted(words)))
found = True
if not found:
for folder_path in config_info.word_sets_folders:
file_path = os.path.join(folder_path, str(category) + '.ctg')
print("Creating " + file_path + ".")
with open(file_path, 'w') as word_set_file:
word_set_file.write('\n'.join(sorted(words_to_add)))
break
else:
print("No word sets folder identified. Cannot add words.")
return
self.do_reload()
def do_remove(self, line: str) -> None:
"""Removes a word from a given category's word list file."""
if not line:
print("No category specified.")
return
category_definition = line.split()[0]
words_to_remove = set(line[len(category_definition):].strip().split())
if not words_to_remove:
print("No words specified.")
return
category = GrammarParser.parse_category(category_definition)
config_info = (self._model.config_info
if self._model and self._model.config_info
else self._model_loader.load_model_config())
found = set()
for folder_path in config_info.word_sets_folders:
for filename in os.listdir(folder_path):
if not filename.lower().endswith('.ctg'):
continue
file_category = GrammarParser.parse_category(filename[:-4])
if file_category != category:
continue
file_path = os.path.join(folder_path, filename)
with open(file_path) as words_file:
words = set(words_file.read().split())
for w in sorted(words_to_remove):
if w in words:
print("Removing " + repr(w) + " from " + file_path + ".")
words.remove(w)
found.add(w)
else:
print(repr(w) + " not found in " + file_path + ".")
if words:
with open(file_path, 'w') as words_file:
words_file.write('\n'.join(sorted(words)))
else:
print("Deleting empty word list file " + file_path + ".")
os.remove(file_path)
if words_to_remove - found:
print("No file(s) found containing the following words: " +
' '.join(repr(word) for word in sorted(words_to_remove - found)) + ".")
return
self.do_reload()
def do_profile(self, line: str) -> None:
"""Profiles the execution of a command, printing the profile statistics."""
# Only a function at the module level can be profiled. To get
# around this limitation, we define a temporary module-level
# function that calls the method we want to profile.
global function_to_profile
def _function_to_profile():
self.onecmd(line)
function_to_profile = _function_to_profile
profile.run('function_to_profile()')
def do_analyze(self, line: str) -> None:
"""Analyzes the last parse and prints statistics useful for debugging."""
if line:
print("'analyze' command does not accept arguments.")
return
if self._parser_state is None:
print("Nothing to analyze.")
return
print('Covered: ' + repr(self._parser_state.is_covered()))
rule_counts, rule_nodes = self._get_rule_map()
counter = 0
for rule in sorted(rule_counts, key=rule_counts.get, reverse=True):
print(str(rule) + " (" + str(rule_counts[rule]) + " nodes)")
for node_str in sorted(ParseTreeUtils.to_str(node, simplify=True)
for node in rule_nodes[rule]):
print(' ' + node_str.replace('\n', '\n '))
counter += node_str.count('\n') + 1
if counter >= 100:
break
print("Rules in waiting:")
rule_counts = {}
for node in self._parser_state.insertion_queue:
rule_counts[node.rule] = rule_counts.get(node.rule, 0) + 1
for rule in sorted(rule_counts, key=rule_counts.get, reverse=True):
print(str(rule) + " (" + str(rule_counts[rule]) + " nodes)")
def _get_rule_map(self) -> Tuple[Dict[ParseRule, int],
Dict[ParseRule, List[TreeNodeSet]]]:
"""Collect and count the nodes, organized by rule, from the latest parse's category map."""
cat_map = self._parser_state.category_map
rule_counts = {}
rule_nodes = {}
for start, category, end in cat_map:
for node_set in cat_map.iter_node_sets(start, category, end):
for node in node_set:
rule_counts[node.rule] = rule_counts.get(node.rule, 0) + 1
rule_nodes[node.rule] = rule_nodes.get(node.rule, []) + [node]
return rule_counts, rule_nodes
def do_links(self, line: str) -> None:
"""Display the semantic net links for the current parse."""
if line:
print("'links' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
for sentence in parse.get_parse_graphs():
print(sentence)
print('')
else:
print("No parses found.")
def do_reverse(self, line: str) -> None:
"""Display token sequences that produce the same semantic net links as the current parse."""
if line:
print("'reverse' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
start_time = time.time()
sentences = list(parse.get_parse_graphs())
results = [GenerationAlgorithm().generate(self._model, sentence)
for sentence in sentences]
end_time = time.time()
for sentence, result in zip(sentences, results):
print(sentence)
print('')
for tree in sorted(result, key=lambda t: t.tokens):
text = ' '.join(tree.tokens)
text = text[:1].upper() + text[1:]
for punctuation in ',.?!:;)]}':
text = text.replace(' ' + punctuation, punctuation)
for punctuation in '([{':
text = text.replace(punctuation + ' ', punctuation)
print('"' + text + '"')
print(tree)
print('')
print('')
print("Total time: " + str(end_time - start_time) + " seconds")
print('')
else:
print("No parses found.")
def do_current(self, line: str = '') -> None:
"""Reprint the current parse for the most recent input string."""
if line:
print("'current' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
gaps = parse.total_gap_size()
size = len(parse.parse_trees)
score, confidence = parse.get_weighted_score()
print("Parses #" + str(self._parse_index + 1) + " of " +
str(self.max_parse_index + 1) + ":")
print(parse.to_str(self._simple))
print("Gaps: " + str(gaps))
print("Size: " + str(size))
print("Score: " + str(score))
print("Confidence: " + str(confidence))
print("Coverage: " + str(parse.coverage))
else:
print("No parses found.")
def do_next(self, line: str = '') -> None:
"""Print the next parse for the most recent input string."""
if line:
print("'next' command does not accept arguments.")
return
if self.parses_available:
if self._parse_index >= self.max_parse_index:
print("No more parses available.")
return
self._parse_index += 1
self.do_current()
def do_previous(self, line: str) -> None:
"""Print the previous parse for the most recent input string."""
if line:
print("'next' command does not accept arguments.")
return
if self.parses_available:
if self._parse_index <= 0:
print("No more parses available.")
return
self._parse_index -= 1
self.do_current()
def do_first(self, line: str) -> None:
"""Print the first parse for the most recent input string."""
if line:
print("'first' command does not accept arguments.")
return
self._parse_index = 0
self.do_current()
def do_last(self, line: str) -> None:
"""Print the last parse for the most recent input string."""
if line:
print("'last' command does not accept arguments.")
return
self._parse_index = self.max_parse_index
self.do_current()
def do_show(self, line: str) -> None:
"""Print the requested parse for the most recent input string."""
if len(line.split()) != 1:
print("'show' command requires a single integer argument.")
return
try:
index = int(line.strip())
except ValueError:
print("'show' command requires a single integer argument.")
return
if not (index and (-(self.parses_available + 1) <= index <= self.parses_available + 1)):
print("Index out of range.")
return
if index < 0:
index += self.parses_available + 1
else:
index -= 1
self._parse_index = index
self.do_current()
def do_gaps(self, line: str) -> None:
"""Print the gaps in the current parse."""
if line:
print("'gaps' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
print("Gaps: " + str(parse.total_gap_size()))
for start, end in parse.iter_gaps():
print(' ' + str(start) + ' to ' + str(end) + ': ' +
' '.join(parse.tokens[start:end]))
else:
print("No parses found.")
def do_best(self, line: str) -> None:
"""Adjust the score upward for the most recently printed parse until it
is higher than all others returned."""
if line:
print("'best' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
best_parse = self._parses[self._parse_index]
for _ in range(100):
self._parses[self._parse_index].adjust_score(True)
ranks = {}
for parse in self._parses:
ranks[parse] = parse.get_rank()
self._parses.sort(key=ranks.get, reverse=True)
self._parse_index = [id(parse) for parse in self._parses].index(id(best_parse))
if (self._parses[0] is best_parse or
(len(self._parses[self._parse_index - 1].parse_trees)
!= len(best_parse.parse_trees)) or
(self._parses[self._parse_index - 1].total_gap_size()
!= best_parse.total_gap_size())):
break
if self._parse_index == 0:
print("Successfully made this parse the highest ranked.")
else:
print("Failed to make this parse the highest ranked.")
def do_worst(self, line: str) -> None:
"""Adjust the score downward for the most recently printed parse until
it is lower than all others returned."""
if line:
print("'worst' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
worst_parse = self._parses[self._parse_index]
for _ in range(100):
self._parses[self._parse_index].adjust_score(False)
ranks = {}
for parse in self._parses:
ranks[parse] = parse.get_rank()
self._parses.sort(key=ranks.get, reverse=True)
self._parse_index = [id(parse) for parse in self._parses].index(id(worst_parse))
if (self._parses[-1] is worst_parse or
(len(self._parses[self._parse_index + 1].parse_trees)
!= len(worst_parse.parse_trees)) or
(self._parses[self._parse_index + 1].total_gap_size()
!= worst_parse.total_gap_size())):
break
if self._parse_index == self.max_parse_index:
print("Successfully made this parse the lowest ranked.")
else:
print("Failed to make this parse the lowest ranked.")
def do_good(self, line: str) -> None:
"""Adjust the score upward for the most recently printed parse."""
if line:
print("'next' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
self._parses[self._parse_index].adjust_score(True)
def do_bad(self, line: str) -> None:
"""Adjust the score downward for the most recently printed parse."""
if line:
print("'next' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
self._parses[self._parse_index].adjust_score(False)
def _get_benchmark_parser_output(self) -> str:
parse = self._parses[self._parse_index]
result = set()
for sentence in parse.get_parse_graphs():
result.add(str(sentence))
return '\n'.join(sorted(result))
def do_keep(self, line: str) -> None:
"""Save the current parse as benchmark case."""
if line:
print("'keep' command does not accept arguments.")
return
if not self._parses:
print("No parses available.")
return
assert self._benchmark is not None
self._benchmark[Input(self.last_input_text)] = Target(self._get_benchmark_parser_output())
self._benchmark_dirty = True
# noinspection PyUnusedLocal
def _test_attempt_iterator(self, text: Input, target: Target) -> Iterator[Tuple[Attempt, None]]:
start_time = time.time()
emergency_disambig, parse_timed_out, disambig_timed_out = \
self._do_parse(text, start_time + self._timeout_interval)
end_time = time.time()
self._benchmark_emergency_disambiguations += int(emergency_disambig)
self._benchmark_parse_timeouts += int(parse_timed_out)
self._benchmark_disambiguation_timeouts += int(disambig_timed_out)
self._benchmark_time += end_time - start_time
yield Attempt(self._get_benchmark_parser_output()), None
# noinspection PyUnusedLocal
def _report_benchmark_progress(self, result: Result) -> None:
assert self._benchmark is not None
self._benchmark_tests_completed += 1
if time.time() >= self._benchmark_update_time + 1:
print("Benchmark " +
str(round((100 * self._benchmark_tests_completed / float(len(self._benchmark))),
ndigits=1)) +
"% complete...")
self._benchmark_update_time = time.time()
def do_benchmark(self, line: str) -> None:
"""Parse all benchmark samples and report statistics on them as a batch."""
if line:
print("'benchmark' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
self._benchmark_emergency_disambiguations = 0
self._benchmark_parse_timeouts = 0
self._benchmark_disambiguation_timeouts = 0
self._benchmark_time = 0.0
self._benchmark_tests_completed = 0
self._benchmark_update_time = time.time()
failures = [] # type: List[Failure]
tally = ModelBatchController(self._validate_output)\
.run_batch(self._benchmark, self._test_attempt_iterator,
self._report_benchmark_progress, failures.append)
print("")
if failures:
print('')
print("Failures:")
for failure in failures:
print(failure.input)
print(failure.first_attempt)
print(failure.target)
print('')
print("Score: " + str(round(100 * tally.avg_first_attempt_score, 1)) + "%")
print("Average Parse Time: " + str(round(self._benchmark_time / float(len(self._benchmark)),
ndigits=1)) +
' seconds per parse')
print("Samples Evaluated: " + str(len(self._benchmark)))
print("Emergency Disambiguations: " + str(self._benchmark_emergency_disambiguations) +
" (" + str(round(100 * self._benchmark_emergency_disambiguations
/ float(len(self._benchmark)), ndigits=1)) + '%)')
print("Parse Timeouts: " + str(self._benchmark_parse_timeouts) + " (" +
str(round(100 * self._benchmark_parse_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
print("Disambiguation Timeouts: " + str(self._benchmark_disambiguation_timeouts) + " (" +
str(round(100 * self._benchmark_disambiguation_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
def _scoring_function(self, target: float) -> None:
# NOTE: It is important that positive reinforcement not
# occur if the first try gives the right answer and
# the score is already >= .9; otherwise, it will
# throw off the relative scoring of other parses.
if (not target or self._parse_index or
self._parses[self._parse_index].get_weighted_score()[0] < .9):
self._parses[self._parse_index].adjust_score(target)
def _training_attempt_iterator(self, text: Input,
target: Target) -> Iterator[Tuple[Attempt, FeedbackReceiver]]:
print(text)
# Restrict it to the correct category and token_start_index from there. This gives the
# parser a leg up when it's far from the correct response.
split_index = target.index(':')
target_category = GrammarParser.parse_category(target[:split_index])
start_time = time.time()
end_time = start_time + self._timeout_interval
emergency_disambig, parse_timed_out, disambig_timed_out = \
self._do_parse(text, end_time, restriction_category=target_category)
end_time = time.time()
self._benchmark_emergency_disambiguations += int(emergency_disambig)
self._benchmark_parse_timeouts += int(parse_timed_out)
self._benchmark_disambiguation_timeouts += int(disambig_timed_out)
self._benchmark_time += end_time - start_time
# We shouldn't keep going if there are no parses of the correct category. This most likely
# indicates a change in the grammar, not a problem with the model.
assert self.parses_available
while self._parse_index <= self.max_parse_index:
# (benchmark target, scoring function)
yield self._get_benchmark_parser_output(), self._scoring_function
self._parse_index += 1
# Now try it without any help,
start_time = time.time()
end_time = start_time + self._timeout_interval
emergency_disambig, parse_timed_out, disambig_timed_out = self._do_parse(text, end_time)
end_time = time.time()
self._benchmark_emergency_disambiguations += int(emergency_disambig)
self._benchmark_parse_timeouts += int(parse_timed_out)
self._benchmark_disambiguation_timeouts += int(disambig_timed_out)
self._benchmark_time += end_time - start_time
if self.parses_available:
while self._parse_index <= self.max_parse_index:
# (benchmark target, scoring function)
yield self._get_benchmark_parser_output(), self._scoring_function
self._parse_index += 1
@staticmethod
def _validate_output(output_val: str, target: str) -> bool:
if ':' not in output_val:
return False
split_index = target.index(':')
target_category = GrammarParser.parse_category(target[:split_index])
target_structure = target[split_index:]
split_index = output_val.index(':')
output_category = GrammarParser.parse_category(output_val[:split_index])
output_structure = output_val[split_index:]
return output_category in target_category and target_structure == output_structure
def do_train(self, line: str) -> None:
"""Automatically adjust scoring to improve benchmark statistics."""
if line:
print("'train' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
self._benchmark_emergency_disambiguations = 0
self._benchmark_parse_timeouts = 0
self._benchmark_disambiguation_timeouts = 0
self._benchmark_time = 0.0
self._benchmark_tests_completed = 0
self._benchmark_update_time = time.time()
failures = [] # type: List[Failure]
tally = ModelBatchController(self._validate_output)\
.run_batch(self._benchmark, self._training_attempt_iterator,
self._report_benchmark_progress, failures.append)
print("")
if failures:
print('')
print("Failures:")
for failure in failures:
print(failure.input)
print(failure.first_attempt)
print(failure.target)
print('')
print("Score: " + str(round(100 * tally.avg_first_attempt_score, 1)) + "%")
print("Average Parse Time: " + str(round(self._benchmark_time / float(len(self._benchmark)),
ndigits=1)) +
' seconds per parse')
print("Samples Evaluated: " + str(len(self._benchmark)))
print("Emergency Disambiguations: " + str(self._benchmark_emergency_disambiguations) +
" (" + str(round(100 * self._benchmark_emergency_disambiguations
/ float(len(self._benchmark)), ndigits=1)) + '%)')
print("Parse Timeouts: " + str(self._benchmark_parse_timeouts) + " (" +
str(round(100 * self._benchmark_parse_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
print("Disambiguation Timeouts: " + str(self._benchmark_disambiguation_timeouts) + " (" +
str(round(100 * self._benchmark_disambiguation_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
def do_training(self, line: str) -> None:
"""Repeatedly train and save until user hits Ctrl-C."""
if line:
print("'training' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
iteration = 0
while True:
try:
iteration += 1
print("Iteration:", iteration)
self.do_train('')
self.do_save('')
except KeyboardInterrupt:
self.do_save('')
break
def do_list(self, line: str) -> None:
"""List all benchmark samples."""
if line:
print("'list' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
print(str(len(self._benchmark)) + " recorded benchmark samples:")
max_tokens = 0
total_tokens = 0
for input_val in sorted(self._benchmark):
print(" " + input_val)
count = len(list(self._model.tokenizer.tokenize(input_val)))
total_tokens += count
if count > max_tokens:
max_tokens = count
print('')
print("Longest benchmark sample: " + str(max_tokens) + " tokens")
print("Average benchmark sample length: " +
str(round(total_tokens / float(len(self._benchmark)), ndigits=1)) + " tokens")
def do_visualize(self, line: str) -> None:
"""Visualize the most recent parse."""
if line:
print("'visualize' command does not accept arguments.")
return
if Digraph is None:
print('The graphviz library is not installed.')
return
if self.parses_available:
parse = self._parses[self._parse_index]
gv_graph = Digraph()
for graph in parse.get_parse_graphs():
with gv_graph.subgraph() as subgraph:
graph.visualize(subgraph)
gv_graph.view()
else:
print("No parses found.")
def do_emergency(self, line: str) -> None:
"""Set, clear, or display the emergency parsing mode flag."""
line = line.strip()
if line == 'on':
self._emergency_mode = True
print("Emergency mode is on.")
elif line == 'off':
self._emergency_mode = False
print("Emergency mode is off.")
elif not line:
print('Emergency mode is %s.' % ('on' if self._emergency_mode else 'off'))
else:
print('Unexpected argument: ' + line)
def repl(model_loader: ModelLoader) -> None:
"""Run the interactive command line interface to the parser."""
parser_cmd = ParserCmd(model_loader)
print('')
parser_cmd.cmdloop()
|
|
import sys
from cps.base import BaseClient
import base64
class Client(BaseClient):
def info(self, service_id):
service = BaseClient.info(self, service_id)
print 'persistent:', service['persistent']
print 'osd_volume_size:', service['osd_volume_size']
nodes = self.callmanager(service['sid'], "list_nodes", False, {})
if 'error' in nodes:
return
print nodes
for role in ( 'dir', 'mrc', 'osd' ):
print "\n", role.upper(), "nodes:"
for node in nodes[role]:
params = { 'serviceNodeId': node }
details = self.callmanager(service['sid'], "get_node_info", False, params)
if 'error' in details:
print node, details['error']
continue
if role == 'dir':
port = 30638
if role == 'osd':
port = 30640
if role == 'mrc':
port = 30636
print "http://%s:%s" % (details['serviceNode']['ip'], port)
def usage(self, cmdname):
BaseClient.usage(self, cmdname)
print " add_nodes serviceid count [cloud] # add the specified number of osd nodes"
print " remove_nodes serviceid count [cloud] # remove the specified number of osd nodes"
print " list_volumes serviceid"
print " create_volume serviceid vol_name"
print " delete_volume serviceid vol_name"
print " get_client_cert serviceid passphrase adminflag filename"
print " get_user_cert serviceid user group passphrase adminflag filename"
print " list_policies serviceid policy_type # [ osd_sel | replica_sel | replication ]"
print " set_policy serviceid policy_type vol_name policy [factor]"
print " toggle_persistent serviceid"
print " set_osd_size serviceid vol_size"
# TODO: add when there is more than one striping policy
# print " list_striping_policies serviceid"
# print " set_striping_policy serviceid vol_name policy width stripe-size"
def main(self, argv):
command = argv[1]
if command in ( 'add_nodes', 'remove_nodes', 'list_volumes',
'create_volume', 'delete_volume', 'get_client_cert',
'get_user_cert', 'list_policies', 'set_policy',
'toggle_persistent', 'set_osd_size' ):
try:
sid = int(argv[2])
except (IndexError, ValueError):
self.usage(argv[0])
sys.exit(0)
self.check_service_id(sid)
if command in ( 'add_nodes', 'remove_nodes' ):
try:
params = {
'osd': int(argv[3]),
#'dir': int(argv[4]),
#'mrc': int(argv[5])
}
except (IndexError, ValueError):
self.usage(argv[0])
sys.exit(0)
if len(argv) == 4:
params['cloud'] = 'default'
else:
params['cloud'] = argv[4]
# call the method
res = self.callmanager(sid, command, True, params)
if 'error' in res:
print res['error']
else:
print "Service", sid, "is performing the requested operation (%s)" % command
if command == 'create_volume':
try:
params = { 'volumeName': argv[3] }
except IndexError:
self.usage(argv[0])
sys.exit(0)
res = self.callmanager(sid, 'createVolume', True, params)
if 'error' in res:
print res['error']
else:
print "Volume", params['volumeName'], "created"
if command == 'delete_volume':
try:
params = { 'volumeName': argv[3] }
except IndexError:
self.usage(argv[0])
sys.exit(0)
res = self.callmanager(sid, 'deleteVolume', True, params)
if 'error' in res:
print res['error']
else:
print "Volume", params['volumeName'], "deleted"
if command == 'get_client_cert':
try:
params = { 'passphrase': argv[3],
'adminflag': str(argv[4]).lower() in ("yes", "y", "true", "t", "1") }
filename = argv[5]
except IndexError:
self.usage(argv[0])
sys.exit(0)
res = self.callmanager(sid, 'get_client_cert', True, params)
if 'error' in res:
print res['error']
else:
open(filename, 'wb').write(base64.b64decode(res['cert']))
if command == 'get_user_cert':
try:
params = { 'user': argv[3],
'group': argv[4],
'passphrase': argv[5],
'adminflag': str(argv[6]).lower() in ("yes", "y", "true", "t", "1") }
filename = argv[7]
except IndexError:
self.usage(argv[0])
sys.exit(0)
res = self.callmanager(sid, 'get_user_cert', True, params)
if 'error' in res:
print res['error']
else:
open(filename, 'wb').write(base64.b64decode(res['cert']))
if command == 'list_volumes':
res = self.callmanager(sid, 'listVolumes', False, {})
if 'error' in res:
print res['error']
else:
print res['volumes']
if command == 'list_policies':
try:
policy_type = argv[3]
except IndexError:
self.usage(argv[0])
sys.exit(0)
if policy_type == 'osd_sel':
command = 'list_osd_sel_policies'
elif policy_type == 'replica_sel':
command = 'list_replica_sel_policies'
elif policy_type == 'replication':
command = 'list_replication_policies'
else:
self.usage(argv[0])
sys.exit(0)
res = self.callmanager(sid, command, False, {})
if 'error' in res:
print res['error']
else:
print res['policies']
if command == 'set_policy':
try:
policy_type = argv[3]
except IndexError:
self.usage(argv[0])
sys.exit(0)
try:
params = { 'volumeName': argv[4], 'policy': argv[5] }
except IndexError:
self.usage(argv[0])
sys.exit(0)
if policy_type == 'osd_sel':
command = 'set_osd_sel_policy'
elif policy_type == 'replica_sel':
command = 'set_replica_sel_policy'
elif policy_type == 'replication':
command = 'set_replication_policy'
try:
# set_replication_policy requires a 'factor'
params['factor'] = argv[6]
except IndexError:
self.usage(argv[0])
sys.exit(0)
else:
self.usage(argv[0])
sys.exit(0)
res = self.callmanager(sid, command, True, params)
if 'error' in res:
print res['error']
else:
print res['stdout']
print "Policy set."
if command == 'toggle_persistent':
res = self.callmanager(sid, command, True, {})
print "This service is now",
if not res['persistent']:
print "not",
print "persistent"
if command == 'set_osd_size':
params = {}
try:
params['size'] = int(argv[3])
except (IndexError, ValueError):
self.usage(argv[0])
sys.exit(0)
res = self.callmanager(sid, command, True, params)
print "OSD volume size is now %s MBs" % res['osd_volume_size']
# if command in 'set_striping_policy':
# try:
# params = { 'volumeName': argv[3], 'policy': argv[4], 'width': argv[5], 'stripe-size': argv[6] }
# except IndexError:
# self.usage(argv[0])
# sys.exit(0)
#
# res = self.callmanager(sid, command, True, params)
# if 'error' in res:
# print res['error']
# else:
# print res['stdout']
# print "Policy set."
|
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import numbers
from collections import namedtuple, OrderedDict
from typing import List
import numpy as np
import pytest
import torch
from pytorch_lightning.utilities.apply_func import apply_to_collection, apply_to_collections, move_data_to_device
def test_recursive_application_to_collection():
ntc = namedtuple("Foo", ["bar"])
@dataclasses.dataclass
class Feature:
input_ids: torch.Tensor
segment_ids: np.ndarray
@dataclasses.dataclass
class ModelExample:
example_ids: List[str]
feature: Feature
label: torch.Tensor
to_reduce = {
"a": torch.tensor([1.0]), # Tensor
"b": [torch.tensor([2.0])], # list
"c": (torch.tensor([100.0]),), # tuple
"d": ntc(bar=5.0), # named tuple
"e": np.array([10.0]), # numpy array
"f": "this_is_a_dummy_str", # string
"g": 12.0, # number
"h": Feature(input_ids=torch.tensor([1.0, 2.0, 3.0]), segment_ids=np.array([4.0, 5.0, 6.0])), # dataclass
"i": ModelExample(
example_ids=["i-1", "i-2", "i-3"],
feature=Feature(input_ids=torch.tensor([1.0, 2.0, 3.0]), segment_ids=np.array([4.0, 5.0, 6.0])),
label=torch.tensor([7.0, 8.0, 9.0]),
), # nested dataclass
}
expected_result = {
"a": torch.tensor([2.0]),
"b": [torch.tensor([4.0])],
"c": (torch.tensor([200.0]),),
"d": ntc(bar=torch.tensor([10.0])),
"e": np.array([20.0]),
"f": "this_is_a_dummy_str",
"g": 24.0,
"h": Feature(input_ids=torch.tensor([2.0, 4.0, 6.0]), segment_ids=np.array([8.0, 10.0, 12.0])),
"i": ModelExample(
example_ids=["i-1", "i-2", "i-3"],
feature=Feature(input_ids=torch.tensor([2.0, 4.0, 6.0]), segment_ids=np.array([8.0, 10.0, 12.0])),
label=torch.tensor([14.0, 16.0, 18.0]),
),
}
reduced = apply_to_collection(to_reduce, (torch.Tensor, numbers.Number, np.ndarray), lambda x: x * 2)
assert isinstance(reduced, dict), " Type Consistency of dict not preserved"
assert all(x in reduced for x in to_reduce), "Not all entries of the dict were preserved"
assert all(
isinstance(reduced[k], type(expected_result[k])) for k in to_reduce
), "At least one type was not correctly preserved"
assert isinstance(reduced["a"], torch.Tensor), "Reduction Result of a Tensor should be a Tensor"
assert torch.allclose(expected_result["a"], reduced["a"]), "Reduction of a tensor does not yield the expected value"
assert isinstance(reduced["b"], list), "Reduction Result of a list should be a list"
assert all(
torch.allclose(x, y) for x, y in zip(reduced["b"], expected_result["b"])
), "At least one value of list reduction did not come out as expected"
assert isinstance(reduced["c"], tuple), "Reduction Result of a tuple should be a tuple"
assert all(
torch.allclose(x, y) for x, y in zip(reduced["c"], expected_result["c"])
), "At least one value of tuple reduction did not come out as expected"
assert isinstance(reduced["d"], ntc), "Type Consistency for named tuple not given"
assert isinstance(
reduced["d"].bar, numbers.Number
), "Failure in type promotion while reducing fields of named tuples"
assert reduced["d"].bar == expected_result["d"].bar
assert isinstance(reduced["e"], np.ndarray), "Type Promotion in reduction of numpy arrays failed"
assert reduced["e"] == expected_result["e"], "Reduction of numpy array did not yield the expected result"
assert isinstance(reduced["f"], str), "A string should not be reduced"
assert reduced["f"] == expected_result["f"], "String not preserved during reduction"
assert isinstance(reduced["g"], numbers.Number), "Reduction of a number should result in a number"
assert reduced["g"] == expected_result["g"], "Reduction of a number did not yield the desired result"
assert dataclasses.is_dataclass(reduced["h"]) and not isinstance(
reduced["h"], type
), "Reduction of a dataclass should result in a dataclass"
assert torch.allclose(
reduced["h"].input_ids, expected_result["h"].input_ids
), "Reduction of a dataclass did not yield the desired result"
assert np.allclose(
reduced["h"].segment_ids, expected_result["h"].segment_ids
), "Reduction of a dataclass did not yield the desired result"
assert dataclasses.is_dataclass(reduced["i"]) and not isinstance(
reduced["i"], type
), "Reduction of a dataclass should result in a dataclass"
assert dataclasses.is_dataclass(reduced["i"].feature) and not isinstance(
reduced["i"].feature, type
), "Reduction of a nested dataclass should result in a nested dataclass"
assert (
reduced["i"].example_ids == expected_result["i"].example_ids
), "Reduction of a nested dataclass did not yield the desired result"
assert torch.allclose(
reduced["i"].label, expected_result["i"].label
), "Reduction of a nested dataclass did not yield the desired result"
assert torch.allclose(
reduced["i"].feature.input_ids, expected_result["i"].feature.input_ids
), "Reduction of a nested dataclass did not yield the desired result"
assert np.allclose(
reduced["i"].feature.segment_ids, expected_result["i"].feature.segment_ids
), "Reduction of a nested dataclass did not yield the desired result"
# mapping support
reduced = apply_to_collection({"a": 1, "b": 2}, int, lambda x: str(x))
assert reduced == {"a": "1", "b": "2"}
reduced = apply_to_collection(OrderedDict([("b", 2), ("a", 1)]), int, lambda x: str(x))
assert reduced == OrderedDict([("b", "2"), ("a", "1")])
# custom mappings
class _CustomCollection(dict):
def __init__(self, initial_dict):
super().__init__(initial_dict)
to_reduce = _CustomCollection({"a": 1, "b": 2, "c": 3})
reduced = apply_to_collection(to_reduce, int, lambda x: str(x))
assert reduced == _CustomCollection({"a": "1", "b": "2", "c": "3"})
def test_apply_to_collection_include_none():
to_reduce = [1, 2, 3.4, 5.6, 7, (8, 9.1, {10: 10})]
def fn(x):
if isinstance(x, float):
return x
reduced = apply_to_collection(to_reduce, (int, float), fn)
assert reduced == [None, None, 3.4, 5.6, None, (None, 9.1, {10: None})]
reduced = apply_to_collection(to_reduce, (int, float), fn, include_none=False)
assert reduced == [3.4, 5.6, (9.1, {})]
def test_apply_to_collections():
to_reduce_1 = {"a": {"b": [1, 2]}, "c": 5}
to_reduce_2 = {"a": {"b": [3, 4]}, "c": 6}
def fn(a, b):
return a + b
# basic test
reduced = apply_to_collections(to_reduce_1, to_reduce_2, int, fn)
assert reduced == {"a": {"b": [4, 6]}, "c": 11}
with pytest.raises(KeyError):
# strict mode - if a key does not exist in both we fail
apply_to_collections({**to_reduce_2, "d": "foo"}, to_reduce_1, float, fn)
# multiple dtypes
reduced = apply_to_collections(to_reduce_1, to_reduce_2, (list, int), fn)
assert reduced == {"a": {"b": [1, 2, 3, 4]}, "c": 11}
# wrong dtype
reduced = apply_to_collections(to_reduce_1, to_reduce_2, (list, int), fn, wrong_dtype=int)
assert reduced == {"a": {"b": [1, 2, 3, 4]}, "c": 5}
# list takes precedence because it is the type of data1
reduced = apply_to_collections([1, 2, 3], [4], (int, list), fn)
assert reduced == [1, 2, 3, 4]
# different sizes
with pytest.raises(AssertionError, match="Sequence collections have different sizes"):
apply_to_collections([[1, 2], [3]], [4], int, fn)
def fn(a, b):
return a.keys() | b.keys()
# base case
reduced = apply_to_collections(to_reduce_1, to_reduce_2, dict, fn)
assert reduced == {"a", "c"}
# type conversion
to_reduce = [(1, 2), (3, 4)]
reduced = apply_to_collections(to_reduce, to_reduce, int, lambda *x: sum(x))
assert reduced == [(2, 4), (6, 8)]
# named tuple
foo = namedtuple("Foo", ["bar"])
to_reduce = [foo(1), foo(2), foo(3)]
reduced = apply_to_collections(to_reduce, to_reduce, int, lambda *x: sum(x))
assert reduced == [foo(2), foo(4), foo(6)]
# passing none
reduced1 = apply_to_collections([1, 2, 3], None, int, lambda x: x * x)
reduced2 = apply_to_collections(None, [1, 2, 3], int, lambda x: x * x)
assert reduced1 == reduced2 == [1, 4, 9]
reduced = apply_to_collections(None, None, int, lambda x: x * x)
assert reduced is None
@pytest.mark.parametrize("should_return", [False, True])
def test_wrongly_implemented_transferable_data_type(should_return):
class TensorObject:
def __init__(self, tensor: torch.Tensor, should_return: bool = True):
self.tensor = tensor
self.should_return = should_return
def to(self, device):
self.tensor.to(device)
# simulate a user forgets to return self
if self.should_return:
return self
tensor = torch.tensor(0.1)
obj = TensorObject(tensor, should_return)
assert obj == move_data_to_device(obj, torch.device("cpu"))
|
|
"""
PHYLIP wrapper for python
author: Matt Rasmussen
date: 2/4/2007
The following programs should be in your PATH
dnaml -- Maximum likelihood (nucleotide)
proml -- Maximum likelihood (peptide)
dnapars -- Parsimony (nucleotide)
protpars -- Parsimony (peptide)
neighbor -- Neighbor Joining
dnadist -- Distance estimation (nucleotide)
protdist -- Distance estimation (peptide)
seqboot -- Sequence bootstrapping
consense -- Consensus tree building
"""
# python imports
import os
import shutil
import sys
# rasmus imports
from rasmus import util
from rasmus import treelib
# compbio imports
from . import fasta
#=============================================================================
# managing input, output, and execution of PHYLIP-like programs
#
def validate_seqs(seqs):
"""Ensures sequences are all same size"""
sizes = map(len, seqs.values())
assert util.equal(* sizes), "sequences are not same length"
def check_temp_files(force=False):
"""Ensure PHYLIP tempfiles do not already exist in current directory"""
if force:
os.system("rm -f infile outfile outtree")
elif (os.path.isfile("infile") or
os.path.isfile("outfile") or
os.path.isfile("outtree")):
raise Exception(
"Can't run phylip, "
"'infile'/'outfile'/'outtree' is in current dir!")
def exec_phylip(cmd, args, verbose=False):
"""Execute a phylip-like program that expects arguments from stdin"""
util.logger("exec: %s" % cmd)
util.logger("args: %s" % args)
if verbose:
util.logger("exec: %s" % cmd)
util.logger("args: %s" % args)
assert os.system("""cat <<EOF | %s
%s""" % (cmd, args)) == 0
else:
assert os.system("""cat <<EOF | %s >/dev/null 2>&1
%s""" % (cmd, args)) == 0
def create_temp_dir(prefix="tmpphylip_"):
"""Create a temporary directory for executing PHYLIP"""
directory = os.path.split(util.tempfile(".", prefix, ""))[1]
os.mkdir(directory)
os.chdir(directory)
return directory
def cleanup_temp_dir(directory):
"""Exit and delete a temporary directory for executing PHYLIP"""
os.chdir("..")
assert "/" not in directory
assert os.path.isdir(directory)
util.deldir(directory)
def save_temp_dir(directory, newname):
"""Exit and save a temporary directory for executing PHYLIP"""
os.chdir("..")
assert "/" not in directory
assert os.path.isdir(directory)
if os.path.exists(newname):
util.deldir(newname)
os.rename(directory, newname)
#=============================================================================
# common input/output
#
def read_phylip_align(filename):
"""
Read a PHYLIP alignment. Can be interleaved or not.
returns a FastaDict object.
"""
infile = util.open_stream(filename)
seqs = fasta.FastaDict()
# read sequences and length
nseq, seqlen = infile.next().split()
nseq = int(nseq)
i = 0
first = True
names = []
# parse remaining lines
for line in infile:
line = line.rstrip()
if len(line) > 0:
if first:
name = line[:10].strip()
seq = line[10:].strip().replace(" ", "")
names.append(name)
else:
seq = line.strip().replace(" ", "")
name = names[i]
i += 1
if name not in seqs:
seqs[name] = seq
else:
seqs[name] += seq
else:
i = 0
first = False
return seqs
def write_phylip_align(out, seqs, strip_names=True):
"""
Write a PHYLIP alignment
"""
validate_seqs(seqs)
if strip_names:
print >>out, len(seqs), len(seqs.values()[0])
for i, name in enumerate(seqs.keys()):
print >>out, "%8s %s" % (phylip_padding(str(i), 8), seqs[name])
else:
print >>out, len(seqs), len(seqs.values()[0])
for i, name in enumerate(seqs.keys()):
if name <= 8:
print >>out, "%8s %s" % (name, seqs[name])
else:
print >>out, "%s %s" % (name, seqs[name])
return seqs.keys()
def read_logl(filename):
# parse logl
logl = None
for line in file(filename):
if line.startswith("Ln Likelihood ="):
logl = float(line.replace("Ln Likelihood =", ""))
assert logl is not None, "could not find logl in outfile"
return logl
def read_out_tree(filename, labels, iters=1):
infile = file(filename)
# skip any numbers that may appear on the first line
line = infile.readline()
if not line[0].isdigit():
# reopen file
infile = file(filename)
if iters == 1:
# parse output
tree = treelib.Tree()
tree.read_newick(infile)
rename_tree_with_name(tree, labels)
return tree
else:
trees = []
for i in xrange(iters):
tree = treelib.Tree()
tree.read_newick(infile)
rename_tree_with_name(tree, labels)
trees.append(tree)
infile.close()
return trees
def write_in_tree(filename, tree, labels):
tree2 = tree.copy()
rename_tree_with_ids(tree2, labels)
for node in tree2.nodes.values():
node.dist = 0
tree2.write(filename)
def write_boot_trees(filename, trees, counts=None):
out = util.open_stream(filename, "w")
if counts is None:
counts = [1] * len(trees)
for tree, count in zip(trees, counts):
for i in range(count):
out.write(tree.get_one_line_newick() + "\n")
def read_dist_matrix(filename):
infile = util.open_stream(filename)
size = int(util.read_word(infile))
mat = util.make_matrix(size, size)
names = []
"""
I must be able to read all of these matrices
11
_______0 0.00000 0.60810 0.46709 0.57693 0.67485 0.62632 0.64763
0.67709 0.70192 0.70949 0.68634
_______1 0.60810 0.00000 0.45522 0.49033 0.47842 0.47278 0.47224
0.47160 0.52655 0.50293 0.49679
_______2 0.46709 0.45522 0.00000 0.57586 0.57433 0.57300 0.56020
0.57763 0.54225 0.58722 0.58559
_______3 0.57693 0.49033 0.57586 0.00000 0.20713 0.20357 0.21252
0.46120 0.49081 0.50956 0.49340
_______4 0.67485 0.47842 0.57433 0.20713 0.00000 0.11210 0.13503
0.45915 0.46692 0.48844 0.47421
_______5 0.62632 0.47278 0.57300 0.20357 0.11210 0.00000 0.10037
0.45525 0.50959 0.48943 0.49588
_______6 0.64763 0.47224 0.56020 0.21252 0.13503 0.10037 0.00000
0.46078 0.49727 0.53117 0.51126
_______7 0.67709 0.47160 0.57763 0.46120 0.45915 0.45525 0.46078
0.00000 0.20980 0.21216 0.20121
_______8 0.70192 0.52655 0.54225 0.49081 0.46692 0.50959 0.49727
0.20980 0.00000 0.18209 0.13265
_______9 0.70949 0.50293 0.58722 0.50956 0.48844 0.48943 0.53117
0.21216 0.18209 0.00000 0.08389
______10 0.68634 0.49679 0.58559 0.49340 0.47421 0.49588 0.51126
0.20121 0.13265 0.08389 0.00000
As well as
11
_______0
_______1 0.60810
_______2 0.46709 0.45522
_______3 0.57693 0.49033 0.57586
_______4 0.67485 0.47842 0.57433 0.20713
_______5 0.62632 0.47278 0.57300 0.20357 0.11210
_______6 0.64763 0.47224 0.56020 0.21252 0.13503 0.10037
_______7 0.67709 0.47160 0.57763 0.46120 0.45915 0.45525 0.46078
_______8 0.70192 0.52655 0.54225 0.49081 0.46692 0.50959 0.49727
0.20980
_______9 0.70949 0.50293 0.58722 0.50956 0.48844 0.48943 0.53117
0.21216 0.18209
______10 0.68634 0.49679 0.58559 0.49340 0.47421 0.49588 0.51126
0.20121 0.13265 0.08389
"""
def isName(token):
try:
float(token)
return False
except:
return True
i = -1
j = 0
for line in infile:
row = line.split()
if len(row) == 0:
continue
if isName(row[0]):
names.append(row[0])
row = row[1:]
i += 1
j = 0
assert i != -1
for val in row:
if val == "nan" or val == "inf":
val = None
else:
val = float(val)
mat[i][j] = val
mat[j][i] = val
j += 1
# remove nasty infinities
top = util.max2(mat)
for i in range(size):
for j in range(size):
if mat[i][j] is None:
mat[i][j] = 10 * top
"""
for i in xrange(size):
names.append(util.read_word(infile))
for j in xrange(size):
mat[i][j] = float(util.read_word(infile))
"""
return names, mat
def write_dist_matrix(mat, labels=None, out=sys.stdout):
out = util.open_stream(out, "w")
out.write("%d\n" % len(mat))
for i in range(len(mat)):
if labels is None:
out.write("%8s " % phylip_padding(str(i)))
else:
out.write("%8s " % labels[i])
for val in mat[i]:
out.write("%10f " % val)
out.write("\n")
#=============================================================================
# common conversions
#
def phylip_padding(name, width=8):
return "_" * (width - len(name)) + name
def rename_tree_with_names(tree, labels):
names = tree.nodes.keys()
for name in names:
if tree.nodes[name].is_leaf():
num = int(name.replace("_", ""))
tree.rename(name, labels[num])
def rename_tree_with_ids(tree, labels):
lookup = util.list2lookup(labels)
names = tree.nodes.keys()
for name in names:
if tree.nodes[name].is_leaf():
tree.rename(name, phylip_padding(str(lookup[name])))
#=============================================================================
# tree building programs
#
def align2tree(prog, seqs, verbose=True, force=False, args=None,
usertree=None, saveOutput="",
bootiter=1,
seed=1,
jumble=1):
validate_seqs(seqs)
cwd = create_temp_dir()
util.tic("%s on %d of length %d" %
(prog, len(seqs), len(seqs.values()[0])))
# create input
labels = write_phylip_align(file("infile", "w"), seqs)
util.write_list(file("labels", "w"), labels)
# initialize default arguments
if args is None:
args = "y"
# create user tree if given
if usertree is not None:
write_in_tree("intree", usertree, labels)
args = "u\n" + args # add user tree option
# bootstrap alignment if needed
if bootiter > 1:
exec_phylip("seqboot", "r\n%d\ny\n%d" % (bootiter, seed), verbose)
os.rename("outfile", "infile")
# add bootstrap arguments
args = "m\nD\n%d\n%d\n%d\n%s" % (bootiter, seed, jumble, args)
# run phylip
exec_phylip(prog, args, verbose)
# check for PHYLIP GIVE UP
if is_phylip_give_up("outfile"):
tree = treelib.Tree()
tree.make_root()
# make star tree
for key in seqs:
tree.add_child(tree.root, treelib.TreeNode(key))
else:
# parse tree
if bootiter == 1:
tree = read_out_tree("outtree", labels, bootiter)
# parse likelihood
if prog in ["dnaml", "proml"]:
tree.data["logl"] = read_logl("outfile")
else:
trees = read_out_tree("outtree", labels, bootiter)
if saveOutput != "":
save_temp_dir(cwd, saveOutput)
else:
cleanup_temp_dir(cwd)
util.toc()
if bootiter == 1:
return tree
else:
return trees
def is_phylip_give_up(filename):
for line in file(filename):
if "0 trees in all found" in line:
return True
return False
def protpars(seqs, verbose=True, force=False, args="y",
usertree=None, saveOutput="", bootiter=1):
return align2tree("protpars", seqs,
verbose=verbose,
force=force,
args=args,
usertree=usertree,
saveOutput=saveOutput,
bootiter=bootiter)
def proml(seqs, verbose=True, force=False, args="y",
usertree=None, saveOutput="", bootiter=1):
return align2tree("proml", seqs,
verbose=verbose,
force=force,
args=args,
usertree=usertree,
saveOutput=saveOutput,
bootiter=bootiter)
def dnaml(seqs, verbose=True, force=False, args="y",
usertree=None, saveOutput="", bootiter=1):
return align2tree("dnaml", seqs,
verbose=verbose,
force=force,
args=args,
usertree=usertree,
saveOutput=saveOutput,
bootiter=bootiter)
def dnapars(seqs, verbose=True, force=False, args="y",
usertree=None, saveOutput="", bootiter=1):
return align2tree("dnapars", seqs,
verbose=verbose,
force=force,
args=args,
usertree=usertree,
saveOutput=saveOutput,
bootiter=bootiter)
def proml_treelk(aln, tree, verbose=True, force=False, args="u\ny"):
validate_seqs(aln)
cwd = create_temp_dir()
util.tic("proml on %d of length %d" % (len(aln), len(aln.values()[0])))
# create input
labels = write_phylip_align(file("infile", "w"), aln)
write_in_tree("intree", tree, labels)
# run phylip
exec_phylip("proml", args, verbose)
# parse logl
logl = read_logl("outfile")
# parse tree
tree = read_out_tree("outtree", labels)
cleanup_temp_dir(cwd)
util.toc()
return logl, tree
def draw_tree(tree, plotfile, verbose=False, args=None, saveOutput=""):
cwd = create_temp_dir()
fontfile = os.popen("which font4", "r").read().rstrip()
# create input
tree.write("intree")
# initialize default arguments
if args is None:
args = "%s\nv\nn\ny" % fontfile
# run phylip
exec_phylip("drawgram", args, verbose)
os.rename("plotfile", "../" + plotfile)
if saveOutput != "":
save_temp_dir(cwd, saveOutput)
else:
cleanup_temp_dir(cwd)
#=============================================================================
# distance estimation programs
#
def protdist(seqs, output=None, verbose=True, force=False, args=None):
if args is None:
args = "y"
validate_seqs(seqs)
cwd = create_temp_dir()
util.tic("protdist on %d of length %d" %
(len(seqs), len(seqs.values()[0])))
# create input
labels = write_phylip_align(file("infile", "w"), seqs)
# run phylip
exec_phylip("protdist", args, verbose)
util.toc()
# parse output
if output is not None:
os.rename("outfile", "../" + output)
cleanup_temp_dir(cwd)
return labels
else:
name, mat = read_dist_matrix("outfile")
cleanup_temp_dir(cwd)
return labels, mat
def dnadist(seqs, output=None, verbose=True, force=False, args=None):
if args is None:
args = "y"
validate_seqs(seqs)
cwd = create_temp_dir()
util.tic("dnadist on %d of length %d" % (len(seqs), len(seqs.values()[0])))
# create input
labels = write_phylip_align(file("infile", "w"), seqs)
# run phylip
exec_phylip("dnadist", args, verbose)
util.toc()
# parse output
if output is not None:
os.rename("outfile", "../" + output)
cleanup_temp_dir(cwd)
return labels
else:
name, mat = read_dist_matrix("outfile")
cleanup_temp_dir(cwd)
return labels, mat
def correct_dist_matrix(distmat, maxdist=40, fardist=None):
"""remove -1 and extremely large distances (>maxdist), replace them with
fatdist (defaults to maximum distance in matrix)"""
if fardist is None:
fardist = 0
for row in distmat:
for x in row:
if x < maxdist:
fardist = max(fardist, x)
distmat2 = []
for row in distmat:
distmat2.append([])
for x in row:
if x == -1 or x > maxdist:
distmat2[-1].append(fardist)
else:
distmat2[-1].append(x)
return distmat2
def boot_neighbor(seqs, iters=100, seed=None, output=None,
verbose=True, force=False):
if seed is None:
seed = random.randInt(0, 1000) * 2 + 1
validate_seqs(seqs)
cwd = create_temp_dir()
util.tic("boot_neighbor on %d of length %d" %
(len(seqs), len(seqs.values()[0])))
# create input
labels = write_phylip_align(file("infile", "w"), seqs)
exec_phylip("seqboot", "r\n%d\ny\n%d" % (iters, seed), verbose)
os.rename("outfile", "infile")
exec_phylip("protdist", "m\nd\n%d\ny" % iters, verbose)
os.rename("outfile", "infile")
exec_phylip("neighbor", "m\n%d\n%d\ny" % (iters, seed), verbose)
util.toc()
# read tree samples
if output is not None:
os.rename("outtree", "../" + output)
cleanup_temp_dir(cwd)
return labels
else:
trees = []
infile = file("outtree")
for i in xrange(iters):
tree = treelib.Tree()
tree.read_newick(infile)
rename_tree_with_name(tree, labels)
trees.append(tree)
infile.close()
cleanup_temp_dir(cwd)
return trees
def boot_proml(seqs, iters=100, seed=1, jumble=5, output=None,
verbose=True, force=False):
validate_seqs(seqs)
cwd = create_temp_dir()
util.tic("bootProml on %d of length %d" %
(len(seqs), len(seqs.values()[0])))
# create input
labels = write_phylip_align(file("infile", "w"), seqs)
exec_phylip("seqboot", "y\n%d" % seed, verbose)
os.rename("outfile", "infile")
exec_phylip("proml", "m\nD\n%d\n%d\n%d\ny" %
(iters, seed, jumble), verbose)
util.toc()
# read tree samples
if output is not None:
os.rename("outtree", "../" + output)
cleanup_temp_dir(cwd)
return labels
else:
trees = []
infile = file("outtree")
for i in xrange(iters):
tree = treelib.Tree()
tree.read_newick(infile)
rename_tree_with_names(tree, labels)
trees.append(tree)
infile.close()
cleanup_temp_dir(cwd)
return trees
def consense_from_file(intrees, verbose=True, args="y"):
# read all trees
trees = util.open_stream(intrees).readlines()
ntrees = len(trees)
cwd = create_temp_dir()
out = open("intree", "w")
for tree in trees:
out.write(tree)
out.close()
exec_phylip("consense", args, verbose)
tree = treelib.read_tree("outtree")
cleanup_temp_dir(cwd)
return tree, ntrees
def consense(trees, counts=None, verbose=True, args="y"):
cwd = create_temp_dir()
write_boot_trees("intree", trees, counts=counts)
exec_phylip("consense", args, verbose)
tree = treelib.Tree()
tree.read_newick("outtree")
cleanup_temp_dir(cwd)
return tree
# testing
if __name__ == "__main__":
seqs = fasta.read_fasta("test/dna-align.fa")
del seqs["target"]
tree = protpars(seqs, force=True, verbose=False)
tree.write()
|
|
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import functools
import numpy as np
from selfsup import extra
from selfsup.moving_averages import ExponentialMovingAverageExtended
from selfsup.util import DummyDict
from selfsup import ops, caffe
import sys
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
'''From https://github.com/ethereon/caffe-tensorflow
'''
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
if group==1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(input, group, 3)
kernel_groups = tf.split(kernel, group, 3)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
conv = tf.concat(output_groups, 3)
return tf.reshape(tf.nn.bias_add(conv, biases), [-1]+conv.get_shape().as_list()[1:])
def init_batch_norm_vars(name, sh):
bn_mean = tf.get_variable(name + '/bn_mean', shape=sh, trainable=False,
dtype=tf.float32, initializer=tf.constant_initializer(0.0))
bn_var = tf.get_variable(name + '/bn_var', shape=sh, trainable=False,
dtype=tf.float32, initializer=tf.constant_initializer(1.0))
return bn_mean, bn_var
def batch_norm(z, global_step, phase_test, name, bn_mean=None, bn_var=None):
mm, vv = extra.moments(z, list(range(z.get_shape().ndims-1)), keep_dims=False, name=name + '_moments')
beta = 0.0
gamma = 1.0
sh = mm.get_shape().as_list()[-1:]
if bn_mean is None and bn_var is None:
bn_mean, bn_var = init_batch_norm_vars(name, sh)
alpha0 = 0.999
N = 1000
alpha = tf.to_float(tf.minimum(global_step, N) / N * alpha0)
def mean_var_train():
apply_op_mm = tf.assign(bn_mean, bn_mean * alpha + mm * (1 - alpha))
apply_op_vv = tf.assign(bn_var, bn_var * alpha + vv * (1 - alpha))
with tf.control_dependencies([apply_op_mm, apply_op_vv]):
return tf.identity(mm), tf.identity(vv)
#return tf.identity(bn_mean), tf.identity(bn_var)
def mean_var_test():
return bn_mean, bn_var
mean, var = tf.cond(tf.logical_not(phase_test),
mean_var_train,
mean_var_test)
z = tf.nn.batch_normalization(z, mean, var, beta, gamma, 1e-5)
return z
def build_network(x, info=DummyDict(), parameters={},
phase_test=None, convolutional=False, final_layer=True,
activation=tf.nn.relu,
well_behaved_size=False,
global_step=None,
use_lrn=True, prefix='', use_dropout=True):
# Set up AlexNet
#conv = functools.partial(alex_conv, size=3, parameters=parameters,
#info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
#pool = functools.partial(ops.max_pool, info=info)
if use_dropout:
dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)
else:
def dropout(x, *args, **kwargs):
return x
def add_info(name, z, pre=None, info=DummyDict()):
info['activations'][name] = z
if info['config'].get('save_pre'):
info['activations']['pre:' + name] = pre
if info.get('scale_summary'):
with tf.name_scope('activation'):
tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
if activation is None:
activation = lambda x: x
W_init = tf.contrib.layers.xavier_initializer_conv2d()
W_init_fc = tf.contrib.layers.xavier_initializer()
b_init = tf.constant_initializer(0.0)
k_h = 11; k_w = 11; c_o = 96; s_h = 4; s_w = 4; padding='VALID'
if convolutional or well_behaved_size:
padding = 'SAME'
name = prefix + 'conv1'
with tf.variable_scope(name):
sh = [k_h, k_w, x.get_shape().as_list()[3], c_o]
conv1W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init)
conv1b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
if 'weights' in info:
info['weights'][name + ':weights'] = conv1W
info['weights'][name + ':biases'] = conv1b
info['weights'][name + ':weights'] = conv1W
info['weights'][name + ':biases'] = conv1b
conv1 = conv(x, conv1W, conv1b, k_h, k_w, c_o, s_h, s_w, padding=padding, group=1)
conv1 = batch_norm(conv1, global_step=global_step, phase_test=phase_test, name=name)
pre = conv1
conv1 = activation(conv1)
add_info(name, conv1, pre=pre, info=info)
c_o_old = c_o
#lrn1
#lrn(2, 2e-05, 0.75, name='norm1')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
if use_lrn:
lrn1 = tf.nn.local_response_normalization(conv1,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
info['activations']['lrn1'] = lrn1
else:
lrn1 = conv1
#maxpool1
#max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
if convolutional or well_behaved_size:
padding = 'SAME'
maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
info['activations']['maxpool1'] = maxpool1
#conv2
#conv(5, 5, 256, 1, 1, group=2, name='conv2')
k_h = 5; k_w = 5; c_o = 256; s_h = 1; s_w = 1; group = 2
#conv2W = tf.Variable(net_data["conv2"][0])
#conv2b = tf.Variable(net_data["conv2"][1])
name = prefix + 'conv2'
with tf.variable_scope(name):
sh = [k_h, k_w, c_o_old // group, c_o]
conv2W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init)
conv2b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
if 'weights' in info:
info['weights'][name + ':weights'] = conv2W
info['weights'][name + ':biases'] = conv2b
conv2 = conv(maxpool1, conv2W, conv2b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv2 = batch_norm(conv2, global_step=global_step, phase_test=phase_test, name=name)
pre = conv2
conv2 = activation(conv2)
add_info(name, conv2, pre=pre, info=info)
#lrn2
#lrn(2, 2e-05, 0.75, name='norm2')
radius = 2; alpha = 2e-05; beta = 0.75; bias = 1.0
if use_lrn:
lrn2 = tf.nn.local_response_normalization(conv2,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias)
else:
lrn2 = conv2
#maxpool2
#max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
if convolutional or well_behaved_size:
padding = 'SAME'
maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
info['activations'][prefix+'pool2'] = maxpool2
c_o_old = c_o
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 1
name = prefix + 'conv3'
with tf.variable_scope(name):
sh = [k_h, k_w, c_o_old // group, c_o]
conv3W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init)
conv3b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
if 'weights' in info:
info['weights'][name + ':weights'] = conv3W
info['weights'][name + ':biases'] = conv3b
conv3 = conv(maxpool2, conv3W, conv3b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv3 = batch_norm(conv3, global_step=global_step, phase_test=phase_test, name=name)
pre = conv3
conv3 = activation(conv3)
add_info(name, conv3, pre=pre, info=info)
c_o_old = c_o
k_h = 3; k_w = 3; c_o = 384; s_h = 1; s_w = 1; group = 2
name = prefix + 'conv4'
with tf.variable_scope(name):
sh = [k_h, k_w, c_o_old // group, c_o]
conv4W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init)
conv4b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
if 'weights' in info:
info['weights'][name + ':weights'] = conv4W
info['weights'][name + ':biases'] = conv4b
conv4 = conv(conv3, conv4W, conv4b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv4 = batch_norm(conv4, global_step=global_step, phase_test=phase_test, name=name)
pre = conv4
conv4 = activation(conv4)
add_info(name, conv4, pre=pre, info=info)
c_o_old = c_o
k_h = 3; k_w = 3; c_o = 256; s_h = 1; s_w = 1; group = 2
name = prefix + 'conv5'
with tf.variable_scope(name):
sh = [k_h, k_w, c_o_old // group, c_o]
conv5W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init)
conv5b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
if 'weights' in info:
info['weights'][name + ':weights'] = conv5W
info['weights'][name + ':biases'] = conv5b
conv5 = conv(conv4, conv5W, conv5b, k_h, k_w, c_o, s_h, s_w, padding="SAME", group=group)
conv5 = batch_norm(conv5, global_step=global_step, phase_test=phase_test, name=name)
pre = conv5
conv5 = activation(conv5)
add_info(name, conv5, pre=pre, info=info)
#maxpool5
#max_pool(3, 3, 2, 2, padding='VALID', name='pool5')
k_h = 3; k_w = 3; s_h = 2; s_w = 2; padding = 'VALID'
if convolutional or well_behaved_size:
padding = 'SAME'
maxpool5 = tf.nn.max_pool(conv5, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)
info['activations']['pool5'] = maxpool5
c_o_old = np.prod(maxpool5.get_shape().as_list()[1:])
channels = maxpool5.get_shape().as_list()[-1]
info['activations'][prefix+'conv1'] = conv1
info['activations'][prefix+'conv2'] = conv2
info['activations'][prefix+'conv3'] = conv3
info['activations'][prefix+'conv4'] = conv4
info['activations'][prefix+'conv5'] = conv5
# Set up weights and biases for fc6/fc7, so that if they are not used, they
# are still set up (otherwise reuse=True will fail)
name = prefix + 'fc6'
with tf.variable_scope(name):
c_o = 4096
sh = [6, 6, channels, c_o]
fc6W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init_fc)
fc6b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
fc6_bn_mean, fc6_bn_var = init_batch_norm_vars(name, [c_o])
if 'weights' in info:
info['weights'][name + ':weights'] = fc6W
info['weights'][name + ':biases'] = fc6b
name = prefix + 'fc7'
with tf.variable_scope(name):
c_old_o = c_o
c_o = 4096
sh = [1, 1, c_old_o, c_o]
fc7W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init_fc)
fc7b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
fc7_bn_mean, fc7_bn_var = init_batch_norm_vars(name, [c_o])
if 'weights' in info:
info['weights'][name + ':weights'] = fc7W
info['weights'][name + ':biases'] = fc7b
if maxpool5.get_shape().as_list()[1:3] != [6, 6] and not convolutional:
print('RETURNING PRE-FULLY-CONNECTED')
return maxpool5
if convolutional:
name = prefix + 'fc6'
#fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)
#fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)
conv6 = tf.nn.conv2d(maxpool5, fc6W, strides=[1, 1, 1, 1], padding='SAME')
fc6_in = tf.nn.bias_add(conv6, fc6b)
fc6 = fc6_in
fc6 = batch_norm(fc6, global_step=global_step, phase_test=phase_test, name=name,
bn_mean=fc6_bn_mean, bn_var=fc6_bn_var)
pre = fc6
fc6 = tf.nn.relu(fc6)
add_info(name+':nodropout', fc6, pre=fc6_in, info=info)
fc6 = dropout(fc6, 0.5)
add_info(name, fc6, pre=pre, info=info)
c_o_old = c_o
c_o = 4096
name = prefix + 'fc7'
#fc7 = tf.nn.relu_layer(fc6, fc7W, fc7b)
conv7 = tf.nn.conv2d(fc6, fc7W, strides=[1, 1, 1, 1], padding='SAME')
fc7_in = tf.nn.bias_add(conv7, fc7b)
fc7 = fc7_in
fc7 = batch_norm(fc7, global_step=global_step, phase_test=phase_test, name=name,
bn_mean=fc7_bn_mean, bn_var=fc7_bn_var)
pre = fc7
fc7 = tf.nn.relu(fc7)
add_info(name+':nodropout', fc7, pre=fc7_in, info=info)
fc7 = dropout(fc7, 0.5)
add_info(name, fc7, pre=pre, info=info)
c_o_old = c_o
if final_layer:
c_o = 1000
name = prefix + 'fc8'
with tf.variable_scope(name):
sh = [1, 1, c_o_old, c_o]
fc8W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init_fc)
fc8b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
if 'weights' in info:
info['weights'][name + ':weights'] = fc8W
info['weights'][name + ':biases'] = fc8b
#fc8 = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
conv8 = tf.nn.conv2d(fc7, fc8W, strides=[1, 1, 1, 1], padding='SAME')
fc8 = tf.nn.bias_add(conv8, fc8b)
info['activations'][name] = fc8
else:
fc8 = fc7
else:
sh_fc = [c_o_old, c_o]
fc6W = tf.reshape(fc6W, sh_fc)
name = prefix + 'fc6'
maxpool5_flat = tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))])
#fc6 = tf.nn.relu_layer(maxpool5_flat, fc6W, fc6b, name=name)
fc6_in = tf.nn.bias_add(tf.matmul(maxpool5_flat, fc6W), fc6b)
fc6 = fc6_in
fc6 = batch_norm(fc6, global_step=global_step, phase_test=phase_test, name=name,
bn_mean=fc6_bn_mean, bn_var=fc6_bn_var)
pre = fc6
fc6 = tf.nn.relu(fc6, name=name)
add_info(name+':nodropout', fc6, pre=fc6_in, info=info)
fc6 = dropout(fc6, 0.5)
add_info(name, fc6, pre=pre, info=info)
c_o_old = c_o
c_o = 4096
name = prefix + 'fc7'
fc7W = tf.squeeze(fc7W, [0, 1])
#fc7 = tf.nn.relu_layer(fc6, fc7W, fc7b, name=name)
fc7_in = tf.nn.bias_add(tf.matmul(fc6, fc7W), fc7b)
fc7 = fc7_in
fc7 = batch_norm(fc7, global_step=global_step, phase_test=phase_test, name=name,
bn_mean=fc7_bn_mean, bn_var=fc7_bn_var)
pre = fc7
fc7 = tf.nn.relu(fc7, name=name)
add_info(name+':nodropout', fc7, pre=fc7_in, info=info)
fc7 = dropout(fc7, 0.5)
add_info(name, fc7, pre=pre, info=info)
c_o_old = c_o
c_o = 1000
if final_layer:
name = prefix+'fc8'
with tf.variable_scope(name):
sh = [c_o_old, c_o]
fc8W = tf.get_variable('weights', sh, dtype=tf.float32,
initializer=W_init_fc)
fc8b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
if 'weights' in info:
info['weights'][name + ':weights'] = fc8W
info['weights'][name + ':biases'] = fc8b
fc8 = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
info['activations'][name] = fc8
else:
fc8 = fc7
info['activations'][prefix+'fc6'] = fc6
info['activations'][prefix+'fc7'] = fc7
return fc8
LAYERS = [
'conv1', 'conv2', 'conv3', 'conv4', 'conv5',
'fc6', 'fc7'
]
CONV_FC_TRANSITIONALS = {
'fc6': (4096, 256, 6, 6)
}
def save_caffemodel(path, session, prefix='', verbose=True, extra_layers=[]):
return caffe.save_caffemodel(path, session, LAYERS+extra_layers,
prefix=prefix,
save_batch_norm=True,
#lax_naming=True,
conv_fc_transitionals=CONV_FC_TRANSITIONALS,
verbose=verbose)
def load_caffemodel(path, session, prefix='', ignore=set(), verbose=True):
return caffe.load_caffemodel(path, session,
prefix=prefix,
ignore=ignore,
conv_fc_transitionals=CONV_FC_TRANSITIONALS,
verbose=verbose)
|
|
"""
A "smart compression" replacement for savez, assuming data is quantised.
The quantum is found, and the data replaced by a product of and integer
sequence and quantum with offset. delta encoding is optional and often
saves space. The efficiency is better than bz2 of ascii data for
individual channels, and a little worse if many channels are lumped
together with a common timebase in the bz2 ascii format, because
save_compress stores individual timebases.
$ wc -c /f/python/local_data/027/27999_P* 2300176 total
At the moment (2010), save_compress is not explicitly implemented - it
is effected by calling discretise_signal() with a filename argument.
July 2009 - long-standing error in delta_encode_signal fixed (had not
been usable before)
Original Author: Boyd Blackwell.
"""
import inspect
import numpy as np
from django.utils.importlib import import_module
from django.conf import settings
from h1ds.base import BaseBackendShotManager
def discretise_array(arr, eps=0, bits=0, maxcount=0, delta_encode=False):
"""
Return an integer array and scales etc in a dictionary - the
dictionary form allows for added functionaility. If bits=0, find
the natural accuracy. eps defaults to 3e-6, and is the error
relative to the larest element, as is maxerror.
"""
if eps == 0:
eps = 3e-6
if maxcount == 0:
maxcount = 10
count = 1
ans = try_discretise_array(arr, eps=eps, bits=bits,
delta_encode=delta_encode)
initial_deltar = ans['deltar']
# look for timebase, because they have the largest ratio of value to
# step size, and are the hardest to discretise in presence of repn
# err. better check positive! Could add code to handle negative
# later.
if initial_deltar > 0:
# find the largest power of 10 smaller than initial_deltar
p10r = np.log10(initial_deltar)
p10int = int(100 + p10r) - 100 # always round down
ratiop10 = initial_deltar / 10 ** p10int
eps10 = abs(round(ratiop10) - ratiop10)
if eps10 < 3e-3 * ratiop10:
initial_deltar = round(ratiop10) * 10 ** p10int
ans = try_discretise_array(arr, eps=eps, bits=bits,
deltar=initial_deltar,
delta_encode=delta_encode)
initial_deltar = ans['deltar']
while (ans['maxerror'] > eps) and (count < maxcount):
count += 1
# have faith in our guess, assume problem is that step is
# not the minimum. e.g. arr=[1,3,5,8]
# - min step is 2, natural step is 1
ans = try_discretise_array(arr, eps=eps, bits=bits,
deltar=initial_deltar / count,
delta_encode=delta_encode)
return ans
# return(ans.update({'count':count})) # need to add in count
def try_discretise_array(arr, eps=0, bits=0, deltar=None, delta_encode=False):
"""
Return an integer array and scales etc in a dictionary
- the dictionary form allows for added functionaility.
If bits=0, find the natural accuracy. eps defaults to 1e-6
"""
if eps == 0:
eps = 1e-6
if deltar is not None:
data_sort = np.unique(arr)
# don't want uniques because of noise
diff_sort = np.sort(np.diff(data_sort))
if np.size(diff_sort) == 0:
diff_sort = [0] # in case all the same
# with real representation, there will be many diffs ~ eps -
# 1e-8 or 1e-15*max - try to skip over these
# will have at least three cases
# - timebase with basically one diff and all diffdiffs in the
# noise
# - data with lots of diffs and lots of diffdiffs at a much
# lower level
min_real_diff_ind = (diff_sort > np.max(diff_sort) / 1e4).nonzero()
if np.size(min_real_diff_ind) == 0:
min_real_diff_ind = [[0]]
# min_real_diff_ind[0] is the array of inidices satisfying that
# condition
# discard all preceding this
diff_sort = diff_sort[min_real_diff_ind[0][0]:]
deltar = diff_sort[0]
diff_diff_sort = np.diff(diff_sort)
# now look for the point where the diff of differences first
# exceeds half the current estimate of difference
# the diff of differences should just be the discretization
# noise by looking further down the sorted diff array and
# averaging over elements which are close in value to the min
# real difference, we can reduce the effect of discretization
# error.
large_diff_diffs_ind = (abs(diff_diff_sort) > deltar / 2).nonzero()
if np.size(large_diff_diffs_ind) == 0:
last_small_diff_diffs_ind = len(diff_sort) - 1
else:
first_large_diff_diffs_ind = large_diff_diffs_ind[0][0]
last_small_diff_diffs_ind = first_large_diff_diffs_ind - 1
# When the step size is within a few orders of represeantaion
# accuracy, problems appear if there a systematic component in
# the representational noise.
# Could try to limit the number of samples averaged over, which
# would be very effective when the timebase starts from zero.
# MUST NOT sort the difference first in this case! Better IF we
# can reliably detect single rate timebase, then take
# (end-start)/(N-1) if last_small_diff_diffs_ind>10:
# last_small_diff_diffs_ind=2 This limit would only work if time
# started at zero. A smarter way would be to find times near
# zero, and get the difference there - this would work with
# variable sampling rates provided the different rates were
# integer multiples. another trick is to try a power of 10
# times an integer. (which is now implemented in the calling
# routine)
# Apr 2010 - fixed bug for len(diff_sort) == 1 +1 in four places
# like [0:last_small_diff_diffs_ind+1] - actually a bug for all,
# only obvious for len(diff_sort) == 1
deltar = np.mean(diff_sort[0:last_small_diff_diffs_ind + 1])
iarr = (0.5 + (arr - np.min(arr)) / deltar).astype('i')
remain = iarr - ((arr - np.min(arr)) / deltar)
# remain is relative to unit step, need to scale back down, over
# whole array
maxerr = np.max(abs(remain)) * deltar / np.max(arr)
# not clear what the max expected error is - small for 12 bits, gets
# larger quicly
# only use unsigned ints if we are NOT delta_encoding and signal >0
if delta_encode is False and np.min(iarr) >= 0:
if np.max(iarr) < 256:
iarr = iarr.astype(np.uint8)
elif np.max(iarr) < 16384:
iarr = iarr.astype(np.uint16)
else:
if np.max(iarr) < 128:
iarr = iarr.astype(np.int8)
elif np.max(iarr) < 8192:
iarr = iarr.astype(np.int16)
ret_value = {'iarr': iarr, 'maxerror': maxerr, 'deltar': deltar,
'minarr': np.min(arr), 'intmax': np.max(iarr)}
return ret_value
def find_subclasses(module, requested_class):
subclasses = []
for name, class_ in inspect.getmembers(module):
if inspect.isclass(class_) and issubclass(class_, requested_class) and class_ != BaseBackendShotManager:
subclasses.append(class_)
return subclasses
def get_backend_shot_manager(tree):
data_backend_module = tree.get_backend_module()
candidate_classes = find_subclasses(data_backend_module, BaseBackendShotManager)
if len(candidate_classes) == 1:
return candidate_classes[0]
if len(candidate_classes) == 0:
msg = "Data backend module {} does not contain a subclass of BaseBackendShotManager".format(
data_backend_module)
else:
msg = "Data backend module {} contains multiple subclasses of BaseBackendShotManager".format(
data_backend_module)
raise ImportError(msg)
def get_backend_shot_manager_for_device(device):
data_backend_module = device.get_backend_module()
candidate_classes = find_subclasses(data_backend_module, BaseBackendShotManager)
if len(candidate_classes) == 1:
return candidate_classes[0]
if len(candidate_classes) == 0:
msg = "Data backend module {} does not contain a subclass of BaseBackendShotManager".format(
data_backend_module)
else:
msg = "Data backend module {} contains multiple subclasses of BaseBackendShotManager".format(
data_backend_module)
raise ImportError(msg)
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from nova import exception
from nova.i18n import _LI
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova import policy
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
if not self.skip_policy_check:
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
policy.enforce(context, _action, target)
class API(base_api.NetworkAPI):
"""API for doing networking via the nova-network network manager.
This is a pluggable module - other implementations do networking via
other services (such as Neutron).
"""
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
helper = utils.ExceptionHelper
# NOTE(vish): this local version of floating_manager has to convert
# ClientExceptions back since they aren't going over rpc.
self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
"""Get all the networks.
If it is an admin user then api will return all the
networks. If it is a normal user and nova Flat or FlatDHCP
networking is being used then api will return all
networks. Otherwise api will only return the networks which
belong to the user's project.
"""
if "nova.network.manager.Flat" in CONF.network_manager:
project_only = "allow_none"
else:
project_only = True
try:
return objects.NetworkList.get_all(context,
project_only=project_only)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
return objects.Network.get_by_uuid(context, network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
return self.network_rpcapi.create_networks(context, **kwargs)
@wrap_check_policy
def delete(self, context, network_uuid):
network = self.get(context, network_uuid)
if network.project_id is not None:
raise exception.NetworkInUse(network_id=network_uuid)
return self.network_rpcapi.delete_network(context, network_uuid, None)
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
objects.Network.disassociate(context, network.id,
host=True, project=True)
@wrap_check_policy
def get_fixed_ip(self, context, id):
return objects.FixedIP.get_by_id(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
return objects.FixedIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
if not strutils.is_int_like(id):
raise exception.InvalidID(id=id)
return objects.FloatingIP.get_by_id(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
return objects.FloatingIP.get_pool_names(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
return objects.FloatingIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
return objects.FloatingIPList.get_by_project(context,
context.project_id)
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip.instance_uuid
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
for vif in vifs:
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
vif = objects.VirtualInterface.get_by_address(context,
mac_address)
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vif
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating IP to a project from a pool."""
return self.floating_manager.allocate_floating_ip(context,
context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating IP with address from a project."""
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating IP.
This api call was added to allow this to be done in one operation
if using neutron.
"""
address = floating_ip['address']
if floating_ip.get('fixed_ip_id'):
try:
self.disassociate_floating_ip(context, instance, address)
except exception.FloatingIpNotAssociated:
msg = ("Floating IP %s has already been disassociated, "
"perhaps by another concurrent action.") % address
LOG.debug(msg)
# release ip from project
return self.release_floating_ip(context, address)
@wrap_check_policy
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating IP with a fixed IP.
Ensures floating IP is allocated to the project in context.
Does not verify ownership of the fixed IP. Caller is assumed to have
checked that the instance is properly owned.
"""
orig_instance_uuid = self.floating_manager.associate_floating_ip(
context, floating_address, fixed_address, affect_auto_assigned)
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_LI('re-assign floating IP %(address)s from '
'instance %(instance_id)s'), msg_dict)
orig_instance = objects.Instance.get_by_uuid(
context, orig_instance_uuid, expected_attrs=['flavor'])
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
@wrap_check_policy
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating IP from fixed IP it is associated with."""
return self.floating_manager.disassociate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@base_api.refresh_cache
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None,
bind_host_id=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:param bind_host_id: ignored by this driver.
:returns: network info as from get_instance_nw_info() below
"""
# NOTE(vish): We can't do the floating ip allocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
flavor = instance.get_flavor()
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
args['instance_id'] = instance.uuid
args['project_id'] = instance.project_id
args['host'] = instance.host
args['rxtx_factor'] = flavor['rxtx_factor']
args['macs'] = macs
args['dhcp_options'] = dhcp_options
nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
# NOTE(vish): We can't do the floating ip deallocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
if not isinstance(instance, obj_base.NovaObject):
instance = objects.Instance._from_db_object(context,
objects.Instance(), instance)
self.network_rpcapi.deallocate_for_instance(context, instance=instance,
requested_networks=requested_networks)
# NOTE(danms): Here for neutron compatibility
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
bind_host_id=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def deallocate_port_for_instance(self, context, instance, port_id):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def list_ports(self, *args, **kwargs):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def show_port(self, *args, **kwargs):
raise NotImplementedError()
@wrap_check_policy
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed IP to instance from specified network."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'network_id': network_id}
nw_info = self.network_rpcapi.add_fixed_ip_to_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed IP from instance from specified network."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'address': address}
nw_info = self.network_rpcapi.remove_fixed_ip_from_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
@wrap_check_policy
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate or disassociate host or project to network."""
network = self.get(context, network_uuid)
if host is not base_api.SENTINEL:
if host is None:
objects.Network.disassociate(context, network.id,
host=True, project=False)
else:
network.host = host
network.save()
if project is not base_api.SENTINEL:
if project is None:
objects.Network.disassociate(context, network.id,
host=False, project=True)
else:
objects.Network.associate(context, project,
network_id=network.id, force=True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
return super(API, self).get_instance_nw_info(context, instance,
**kwargs)
def _get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'project_id': instance.project_id}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
if requested_networks:
self.network_rpcapi.validate_networks(context,
requested_networks)
# Neutron validation checks and returns how many of num_instances
# instances can be supported by the quota. For Nova network
# this is part of the subsequent quota check, so we just return
# the requested number in this case.
return num_instances
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
# This is NOOP for Nova network since it doesn't support SR-IOV.
pass
@wrap_check_policy
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating IPs.
"""
return self.network_rpcapi.get_dns_domains(context)
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return self.network_rpcapi.add_dns_entry(context, **args)
@wrap_check_policy
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
return self.network_rpcapi.modify_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.delete_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
@wrap_check_policy
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@wrap_check_policy
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
args = {'domain': domain, 'av_zone': availability_zone}
return self.network_rpcapi.create_private_dns_domain(context, **args)
@wrap_check_policy
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
args = {'domain': domain, 'project': project}
return self.network_rpcapi.create_public_dns_domain(context, **args)
@wrap_check_policy
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
host = host or instance.host
# NOTE(tr3buchet): host is passed in cases where we need to setup
# or teardown the networks on a host which has been migrated to/from
# and instance.host is not yet or is no longer equal to
args = {'instance_id': instance.id,
'host': host,
'teardown': teardown,
'instance': instance}
self.network_rpcapi.setup_networks_on_host(context, **args)
def _get_multi_addresses(self, context, instance):
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance.uuid)
except exception.FixedIpNotFoundForInstance:
return False, []
addresses = []
for fixed in fixed_ips:
for floating in fixed.floating_ips:
addresses.append(floating.address)
return fixed_ips[0].network.multi_host, addresses
@wrap_check_policy
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
flavor = instance.get_flavor()
args = dict(
instance_uuid=instance.uuid,
rxtx_factor=flavor['rxtx_factor'],
project_id=instance.project_id,
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@wrap_check_policy
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
flavor = instance.get_flavor()
args = dict(
instance_uuid=instance.uuid,
rxtx_factor=flavor['rxtx_factor'],
project_id=instance.project_id,
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
def setup_instance_network_on_host(self, context, instance, host):
"""Setup network for specified instance on host."""
self.migrate_instance_finish(context, instance,
{'source_compute': None,
'dest_compute': host})
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host."""
self.migrate_instance_start(context, instance,
{'source_compute': host,
'dest_compute': None})
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
import sys
from tornado.util import bytes_type, unicode_type, basestring_type, u
try:
from urllib.parse import parse_qs as _parse_qs # py3
except ImportError:
from urlparse import parse_qs as _parse_qs # Python 2.6+
try:
import htmlentitydefs # py2
except ImportError:
import html.entities as htmlentitydefs # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
import json
try:
unichr
except NameError:
unichr = chr
_XHTML_ESCAPE_RE = re.compile('[&<>"]')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"'}
def xhtml_escape(value):
"""Escapes a string so it is valid within HTML or XML."""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/facebook/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javscript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value, plus=True):
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib_parse.quote_plus if plus else urllib_parse.quote
return quote(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if sys.version_info[0] < 3:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
if encoding is None:
return unquote(utf8(value))
else:
return unicode_type(unquote(utf8(value)), encoding)
parse_qs_bytes = _parse_qs
else:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = _parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes_type, type(None))
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
assert isinstance(value, unicode_type), \
"Expected bytes, unicode, or None; got %r" % type(value)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
assert isinstance(value, bytes_type), \
"Expected bytes, unicode, or None; got %r" % type(value)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unicode
else:
native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
assert isinstance(value, bytes_type), \
"Expected bytes, unicode, or None; got %r" % type(value)
return value.decode("utf-8")
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes_type):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)"""))
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m):
if m.group(1) == "#":
try:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.items():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map()
|
|
import os
import tempfile
from OpenSSL.crypto import dump_certificate, load_certificate, FILETYPE_PEM
from OpenSSL.SSL import Error as SSLError
from OpenSSL.version import __version__ as pyopenssl_version
from twisted.internet import reactor
from twisted.internet.ssl import DefaultOpenSSLContextFactory
from twisted.protocols.policies import WrappingFactory
from twisted.python import log
from twisted.python.filepath import FilePath
from twisted.test.test_sslverify import makeCertificate
from twisted.web import server, static
try:
from twisted.web.client import ResponseFailed
except ImportError:
from twisted.web._newclient import ResponseFailed
from txaws import exception
from txaws.client import ssl
from txaws.client.base import BaseQuery
from txaws.service import AWSServiceEndpoint
from txaws.testing.base import TXAWSTestCase
def sibpath(path):
return FilePath(__file__).sibling(path).path
PRIVKEY = sibpath("private.ssl")
PUBKEY = sibpath("public.ssl")
BADPRIVKEY = sibpath("badprivate.ssl")
BADPUBKEY = sibpath("badpublic.ssl")
PRIVSANKEY = sibpath("private_san.ssl")
PUBSANKEY = sibpath("public_san.ssl")
class WebDefaultOpenSSLContextFactory(DefaultOpenSSLContextFactory):
def getContext(self, hostname=None, port=None):
return DefaultOpenSSLContextFactory.getContext(self)
class BaseQuerySSLTestCase(TXAWSTestCase):
def setUp(self):
self.cleanupServerConnections = 0
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent("0123456789")
r = static.File(name)
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
pub_key = file(PUBKEY)
pub_key_data = pub_key.read()
pub_key.close()
pub_key_san = file(PUBSANKEY)
pub_key_san_data = pub_key_san.read()
pub_key_san.close()
ssl._ca_certs = [load_certificate(FILETYPE_PEM, pub_key_data),
load_certificate(FILETYPE_PEM, pub_key_san_data)]
def tearDown(self):
ssl._ca_certs = None
# If the test indicated it might leave some server-side connections
# around, clean them up.
connections = self.wrapper.protocols.keys()
# If there are fewer server-side connections than requested,
# that's okay. Some might have noticed that the client closed
# the connection and cleaned up after themselves.
for n in range(min(len(connections), self.cleanupServerConnections)):
proto = connections.pop()
log.msg("Closing %r" % (proto,))
proto.transport.loseConnection()
if connections:
log.msg("Some left-over connections; this test is probably buggy.")
return self.port.stopListening()
def _get_url(self, path):
return "https://localhost:%d/%s" % (self.portno, path)
def test_ssl_verification_positive(self):
"""
The L{VerifyingContextFactory} properly allows to connect to the
endpoint if the certificates match.
"""
context_factory = WebDefaultOpenSSLContextFactory(PRIVKEY, PUBKEY)
self.port = reactor.listenSSL(
0, self.site, context_factory, interface="127.0.0.1")
self.portno = self.port.getHost().port
endpoint = AWSServiceEndpoint(ssl_hostname_verification=True)
query = BaseQuery("an action", "creds", endpoint)
d = query.get_page(self._get_url("file"))
return d.addCallback(self.assertEquals, "0123456789")
def test_ssl_verification_negative(self):
"""
The L{VerifyingContextFactory} fails with a SSL error the certificates
can't be checked.
"""
context_factory = WebDefaultOpenSSLContextFactory(BADPRIVKEY, BADPUBKEY)
self.port = reactor.listenSSL(
0, self.site, context_factory, interface="127.0.0.1")
self.portno = self.port.getHost().port
endpoint = AWSServiceEndpoint(ssl_hostname_verification=True)
query = BaseQuery("an action", "creds", endpoint)
d = query.get_page(self._get_url("file"))
def fail(ignore):
self.fail('Expected SSLError')
def check_exception(why):
# XXX kind of a mess here ... need to unwrap the
# exception and check
root_exc = why.value[0][0].value
self.assert_(isinstance(root_exc, SSLError))
return d.addCallbacks(fail, check_exception)
def test_ssl_verification_bypassed(self):
"""
L{BaseQuery} doesn't use L{VerifyingContextFactory}
if C{ssl_hostname_verification} is C{False}, thus allowing to connect
to non-secure endpoints.
"""
context_factory = DefaultOpenSSLContextFactory(BADPRIVKEY, BADPUBKEY)
self.port = reactor.listenSSL(
0, self.site, context_factory, interface="127.0.0.1")
self.portno = self.port.getHost().port
endpoint = AWSServiceEndpoint(ssl_hostname_verification=False)
query = BaseQuery("an action", "creds", endpoint)
d = query.get_page(self._get_url("file"))
return d.addCallback(self.assertEquals, "0123456789")
def test_ssl_subject_alt_name(self):
"""
L{VerifyingContextFactory} supports checking C{subjectAltName} in the
certificate if it's available.
"""
context_factory = WebDefaultOpenSSLContextFactory(PRIVSANKEY, PUBSANKEY)
self.port = reactor.listenSSL(
0, self.site, context_factory, interface="127.0.0.1")
self.portno = self.port.getHost().port
endpoint = AWSServiceEndpoint(ssl_hostname_verification=True)
query = BaseQuery("an action", "creds", endpoint)
d = query.get_page("https://127.0.0.1:%d/file" % (self.portno,))
return d.addCallback(self.assertEquals, "0123456789")
if pyopenssl_version < "0.12":
test_ssl_subject_alt_name.skip = (
"subjectAltName not supported by older PyOpenSSL")
class CertsFilesTestCase(TXAWSTestCase):
def setUp(self):
super(CertsFilesTestCase, self).setUp()
# set up temp dir with no certs
self.no_certs_dir = tempfile.mkdtemp()
# create certs
cert1 = makeCertificate(O="Server Certificate 1", CN="cn1")
cert2 = makeCertificate(O="Server Certificate 2", CN="cn2")
cert3 = makeCertificate(O="Server Certificate 3", CN="cn3")
# set up temp dir with one cert
self.one_cert_dir = tempfile.mkdtemp()
self.cert1 = self._write_pem(cert1, self.one_cert_dir, "cert1.pem")
# set up temp dir with two certs
self.two_certs_dir = tempfile.mkdtemp()
self.cert2 = self._write_pem(cert2, self.two_certs_dir, "cert2.pem")
self.cert3 = self._write_pem(cert3, self.two_certs_dir, "cert3.pem")
def tearDown(self):
super(CertsFilesTestCase, self).tearDown()
os.unlink(self.cert1)
os.unlink(self.cert2)
os.unlink(self.cert3)
os.removedirs(self.no_certs_dir)
os.removedirs(self.one_cert_dir)
os.removedirs(self.two_certs_dir)
def _write_pem(self, cert, dir, filename):
data = dump_certificate(FILETYPE_PEM, cert[1])
full_path = os.path.join(dir, filename)
fh = open(full_path, "w")
fh.write(data)
fh.close()
return full_path
def test_get_ca_certs_no_certs(self):
os.environ["TXAWS_CERTS_PATH"] = self.no_certs_dir
self.patch(ssl, "DEFAULT_CERTS_PATH", self.no_certs_dir)
self.assertRaises(exception.CertsNotFoundError, ssl.get_ca_certs)
def test_get_ca_certs_with_default_path(self):
self.patch(ssl, "DEFAULT_CERTS_PATH", self.two_certs_dir)
certs = ssl.get_ca_certs()
self.assertEqual(len(certs), 2)
def test_get_ca_certs_with_env_path(self):
os.environ["TXAWS_CERTS_PATH"] = self.one_cert_dir
certs = ssl.get_ca_certs()
self.assertEqual(len(certs), 1)
def test_get_ca_certs_multiple_paths(self):
os.environ["TXAWS_CERTS_PATH"] = "%s:%s" % (
self.one_cert_dir, self.two_certs_dir)
certs = ssl.get_ca_certs()
self.assertEqual(len(certs), 3)
def test_get_ca_certs_one_empty_path(self):
os.environ["TXAWS_CERTS_PATH"] = "%s:%s" % (
self.no_certs_dir, self.one_cert_dir)
certs = ssl.get_ca_certs()
self.assertEqual(len(certs), 1)
def test_get_ca_certs_no_current_dir(self):
"""
Do not include the current directory if the TXAWS_CERTS_PATH
environment variable ends with a ":".
"""
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.one_cert_dir)
os.environ["TXAWS_CERTS_PATH"] = "%s:" % self.no_certs_dir
self.assertRaises(exception.CertsNotFoundError, ssl.get_ca_certs)
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RootDir)%(Directory)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(FullPath)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
# We don't know this setting. Give a warning.
print >> stderr, ('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting))
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
print >> stderr, ('Warning: unrecognized setting %s/%s' %
(tool_name, setting))
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall'])) # /Gz
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2'])) # /arch:SSE2
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true', # /clr
'Pure', # /clr:pure
'Safe', # /clr:safe
'OldSyntax'])) # /clr:oldSyntax
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# These settings generate correctly in the MSVS output files when using
# e.g. DelayLoadDLLs! or AdditionalDependencies! to exclude files from
# configuration entries, but result in spurious artifacts which can be
# safely ignored here. See crbug.com/246570
_MSVSOnly(_link, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSVSOnly(_link, 'DelayLoadDLLs_excluded', _file_list)
_MSVSOnly(_link, 'AdditionalDependencies_excluded', _file_list)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
# TODO(jeanluc) I don't think these are genuine settings but byproducts of Gyp.
_MSVSOnly(_lib, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer']
# 'hair straighteners',
# 'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=1,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-4,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': BidirectionalRecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.9),
'nonlinearity': rectify,
'learn_init': False,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCITask logic methods."""
import datetime
import logging
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import db
from google.appengine.ext import ndb
from django.utils.translation import ugettext
from soc.modules.gci.logic import comment as comment_logic
from soc.modules.gci.logic import profile as profile_logic
from soc.modules.gci.logic import org_score as org_score_logic
from soc.modules.gci.models.comment import GCIComment
from soc.modules.gci.models import task as task_model
from soc.modules.gci.models.work_submission import GCIWorkSubmission
DEF_ACTION_NEEDED_TITLE = ugettext('Initial Deadline passed')
DEF_ACTION_NEEDED = ugettext(
'Melange has detected that the initial deadline has passed and it has '
'set the task status to ActionNeeded. The student has 24 hours to submit '
'the work before the task is reopened and sent back to the pool for '
'other students to claim.')
DEF_ASSIGNED_TITLE = ugettext('Task Assigned')
DEF_ASSIGNED = ugettext(
'This task has been assigned to %s. '
'You have %i hours to complete this task, good luck!')
DEF_CLAIM_REQUEST_TITLE = ugettext('Task Claimed')
DEF_CLAIM_REQUEST = ugettext('I would like to work on this task.')
DEF_CLOSED_TITLE = ugettext('Task Closed')
DEF_CLOSED = ugettext(
'Congratulations, this task has been completed successfully.')
DEF_NEEDS_WORK_TITLE = ugettext('Task Needs More Work')
DEF_NEEDS_WORK = ugettext(
'One of the mentors has sent this task back for more work. Talk to '
'the mentor(s) assigned to this task to satisfy the requirements needed '
'to complete this task, submit your work again and mark the task as '
'complete once you re-submit your work.')
DEF_EXTEND_DEADLINE_TITLE = ugettext('Deadline extended')
DEF_EXTEND_DEADLINE = ugettext(
'The deadline of the task has been extended with %i days and %i hours.')
DEF_NO_MORE_WORK_TITLE = ugettext('No more Work can be submitted')
DEF_NO_MORE_WORK = ugettext(
'Melange has detected that the deadline has passed and no more work can '
'be submitted. The submitted work should be reviewed.')
DEF_PUBLISHED_TITLE = ugettext('Task Published')
DEF_PUBLISHED = ugettext('This task is open and can be claimed.')
DEF_REOPENED_TITLE = ugettext('Task Reopened')
DEF_REOPENED = ugettext(
'Melange has detected that the final deadline has passed and it has '
'reopened the task.')
DEF_SEND_FOR_REVIEW_TITLE = ugettext('Ready for review')
DEF_SEND_FOR_REVIEW = ugettext(
'The work on this task is ready to be reviewed.')
DEF_UNASSIGNED_TITLE = ugettext('Task Reopened')
DEF_UNASSIGNED = ugettext('This task has been Reopened.')
DEF_UNCLAIMED_TITLE = ugettext('Claim Removed')
DEF_UNCLAIMED = ugettext(
'The claim on this task has been removed, someone else can claim it now.')
DEF_UNPUBLISHED_TITLE = ugettext('Task Unpublished')
DEF_UNPUBLISHED = ugettext('The task is unpublished.')
DELETE_EXPIRATION = datetime.timedelta(minutes=10)
# TODO(ljvderijk): Add basic subscribers when task is created
def _spawnUpdateTask(entity, transactional=False):
"""Spawns a task to update the state of the task."""
update_url = '/tasks/gci/task/update/%s' % entity.key().id()
new_task = taskqueue.Task(eta=entity.deadline, url=update_url)
new_task.add('gci-update', transactional=transactional)
def hasTaskEditableStatus(task):
"""Reports whether or not a task is in one of the editable states.
Args:
task: Any task_model.GCITask.
"""
editable_statuses = task_model.UNAVAILABLE[:]
editable_statuses.append(task_model.OPEN)
return task.status in editable_statuses
def isOwnerOfTask(task, profile):
"""Returns true if the given profile is owner/student of the task.
Args:
task: The task_model.GCITask entity
profile: The GCIProfile which might be the owner of the task
"""
if not (task_model.GCITask.student.get_value_for_datastore(task) and profile):
return False
else:
student_key = ndb.Key.from_old_key(
task_model.GCITask.student.get_value_for_datastore(task))
return student_key == profile.key
def canClaimRequestTask(task, profile):
"""Returns true if the given profile is allowed to claim the task.
Args:
task: The task_model.GCITask entity
profile: The GCIProfile which we check whether it can claim the task.
"""
# check if the task can be claimed at all
if task.status not in task_model.CLAIMABLE:
return False
# check if the user is allowed to claim this task
q = task_model.GCITask.all()
q.filter('student', profile.key.to_old_key())
q.filter('program', task.program)
q.filter('status IN', task_model.ACTIVE_CLAIMED_TASK)
max_tasks = task.program.nr_simultaneous_tasks
count = q.count(max_tasks)
has_forms = profile_logic.hasStudentFormsUploaded(profile)
return count < max_tasks and has_forms
def canSubmitWork(task, profile):
"""Returns true if the given profile can submit work to this task.
Args:
task: The task_model.GCITask entity
profile: The GCIProfile to check
"""
return (task.deadline and
datetime.datetime.utcnow() <= task.deadline and
isOwnerOfTask(task, profile) and
task.status in task_model.TASK_IN_PROGRESS)
def publishTask(task, publisher):
"""Publishes the task.
This will put the task in the Open state. A comment will also be generated
to record this event.
Args:
task: GCITask entity.
publisher: GCIProfile of the user that publishes the task.
"""
task.status = task_model.OPEN
comment_props = {
'parent': task,
'title': DEF_PUBLISHED_TITLE,
'content': DEF_PUBLISHED,
'created_by': publisher.key.parent().to_old_key(),
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def publishTaskTxn():
task.put()
comment_txn()
_spawnUpdateTask(task, transactional=True)
return db.run_in_transaction(publishTaskTxn)
def unpublishTask(task, unpublisher):
"""Unpublishes the task.
This will put the task in the Unpublished state. A comment will also be
generated to record this event.
Args:
task: GCITask entity.
publisher: GCIProfile of the user that unpublishes the task.
"""
task.status = task_model.UNPUBLISHED
comment_props = {
'parent': task,
'title': DEF_UNPUBLISHED_TITLE,
'content': DEF_UNPUBLISHED,
'created_by': unpublisher.key.parent().to_old_key(),
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def unpublishTaskTxn():
task.put()
comment_txn()
_spawnUpdateTask(task, transactional=True)
return db.run_in_transaction(unpublishTaskTxn)
def assignTask(task, student_key, assigner):
"""Assigns the task to the student.
This will put the task in the Claimed state and set the student and deadline
property. A comment will also be generated to record this event.
Args:
task: task_model.GCITask entity.
student_key: Key of the student to assign
assigner: GCIProfile of the user that assigns the student.
"""
task.student = student_key.to_old_key()
task.status = 'Claimed'
task.deadline = datetime.datetime.now() + \
datetime.timedelta(hours=task.time_to_complete)
student = student_key.get()
comment_props = {
'parent': task,
'title': DEF_ASSIGNED_TITLE,
'content': DEF_ASSIGNED %(
student.public_name, task.time_to_complete),
'created_by': assigner.key.parent().to_old_key(),
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def assignTaskTxn():
task.put()
comment_txn()
_spawnUpdateTask(task, transactional=True)
return db.run_in_transaction(assignTaskTxn)
def unassignTask(task, profile):
"""Unassigns a task.
This will put the task in the Reopened state and reset the student and
deadline property. A comment will also be generated to record this event.
Args:
task: task_model.GCITask entity.
profile: GCIProfile of the user that unassigns the task.
"""
task.student = None
task.status = task_model.REOPENED
task.deadline = None
comment_props = {
'parent': task,
'title': DEF_UNASSIGNED_TITLE,
'content': DEF_UNASSIGNED,
'created_by': profile.key.parent().to_old_key()
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def unassignTaskTxn():
task.put()
comment_txn()
return db.run_in_transaction(unassignTaskTxn)
def closeTask(task, profile):
"""Closes the task.
Args:
task: task_model.GCITask entity.
profile: GCIProfile of the user that closes the task.
"""
from soc.modules.gci.tasks.ranking_update import startUpdatingTask
task.status = 'Closed'
task.closed_on = datetime.datetime.now()
task.deadline = None
comment_props = {
'parent': task,
'title': DEF_CLOSED_TITLE,
'content': DEF_CLOSED,
'created_by': profile.key.parent().to_old_key()
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
student_key = ndb.Key.from_old_key(
task_model.GCITask.student.get_value_for_datastore(task))
student = student_key.get()
# student, who worked on the task, should receive a confirmation
# having submitted his or her first task
query = queryAllTasksClosedByStudent(student, keys_only=True)
if query.get() is None: # this is the first task
confirmation = profile_logic.sendFirstTaskConfirmationTxn(student, task)
else:
confirmation = lambda: None
org_score_txn = org_score_logic.updateOrgScoreTxn(task, student)
@db.transactional(xg=True)
def closeTaskTxn():
task.put()
comment_txn()
startUpdatingTask(task, transactional=True)
confirmation()
org_score_txn()
# TODO(daniel): move this to a transaction when other models are NDB
student = student_key.get()
student.student_data.number_of_completed_tasks += 1
student.put()
return closeTaskTxn()
def needsWorkTask(task, profile):
"""Closes the task.
Args:
task: task_model.GCITask entity.
profile: GCIProfile of the user that marks this task as needs more work.
"""
task.status = 'NeedsWork'
comment_props = {
'parent': task,
'title': DEF_NEEDS_WORK_TITLE,
'content': DEF_NEEDS_WORK,
'created_by': profile.key.parent().to_old_key()
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def needsWorkTaskTxn():
task.put()
comment_txn()
return db.run_in_transaction(needsWorkTaskTxn)
def extendDeadline(task, delta, profile):
"""Extends the deadline of a task.
Args:
task: The task to extend the deadline for.
delta: The timedelta object to be added to the current deadline.
profile: GCIProfile of the user that extends the deadline.
"""
if task.deadline:
deadline = task.deadline + delta
else:
deadline = datetime.datetime.utcnow() + delta
task.deadline = deadline
comment_props = {
'parent': task,
'title': DEF_EXTEND_DEADLINE_TITLE,
'content': DEF_EXTEND_DEADLINE %(delta.days, delta.seconds/3600),
'created_by': profile.key.parent().to_old_key()
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def extendDeadlineTxn():
task.put()
comment_txn()
return db.run_in_transaction(extendDeadlineTxn)
def claimRequestTask(task, student):
"""Used when a student requests to claim a task.
Updates the status of the tasks and places a comment notifying the org
that someone wants to work on this task.
Args:
task: The task to claim.
student: Profile of the student that wants to claim the task.
"""
task.status = 'ClaimRequested'
task.student = student.key.to_old_key()
if student.key.to_old_key() not in task.subscribers:
task.subscribers.append(student.key.to_old_key())
comment_props = {
'parent': task,
'title': DEF_CLAIM_REQUEST_TITLE,
'content': DEF_CLAIM_REQUEST,
'created_by': student.key.parent().to_old_key()
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def claimRequestTaskTxn():
task.put()
comment_txn()
return db.run_in_transaction(claimRequestTaskTxn)
def unclaimTask(task):
"""Used when a student requests to unclaim a task.
Args:
task: The task to unclaim.
"""
student_key = task_model.GCITask.student.get_value_for_datastore(task)
task.student = None
task.status = task_model.REOPENED
task.deadline = None
comment_props = {
'parent': task,
'title': DEF_UNCLAIMED_TITLE,
'content': DEF_UNCLAIMED,
'created_by': student_key.parent()
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def unclaimTaskTxn():
task.put()
comment_txn()
return db.run_in_transaction(unclaimTaskTxn)
def sendForReview(task, student):
"""Send in a task for review.
Args:
task: The task to send for review.
student: Profile of the student that is sending in the work.
"""
task.status = 'NeedsReview'
comment_props = {
'parent': task,
'title': DEF_SEND_FOR_REVIEW_TITLE,
'content': DEF_SEND_FOR_REVIEW,
'created_by': student.key.parent().to_old_key(),
}
comment = GCIComment(**comment_props)
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def sendForReviewTxn():
task.put()
comment_txn()
return db.run_in_transaction(sendForReviewTxn)
def updateTaskStatus(task):
"""Method used to transit a task from a state to another state
depending on the context. Whenever the deadline has passed.
To be called by the automated system running on Appengine tasks or
whenever the public page for the task is loaded in case the Appengine task
framework is running late.
Args:
task: The task_model.GCITask entity
Returns:
Boolean indicating whether the task has been updated.
"""
if not task.deadline or datetime.datetime.now() < task.deadline:
# do nothing if there is no deadline or it hasn't passed yet
return False
if task.program.timeline.stop_all_work_deadline < datetime.datetime.now():
# do not change the status of the task after the work deadline ends
return False
# the transition depends on the current state of the task
transit_func = STATE_TRANSITIONS.get(task.status, None)
if not transit_func:
logging.warning('Invalid state to transfer from %s', task.status)
return False
# update the task and create a comment
task, comment = transit_func(task)
_storeTaskAndComment(task, comment)
if task.deadline:
# only if there is a deadline set we should schedule another task
_spawnUpdateTask(task)
return True
def _storeTaskAndComment(task, comment):
"""Stores the task and comment and notifies those that are interested in a
single transaction.
"""
comment_txn = comment_logic.storeAndNotifyTxn(comment)
def updateTaskAndCreateCommentTxn():
db.put(task)
comment_txn()
db.run_in_transaction(updateTaskAndCreateCommentTxn)
def transitFromClaimed(task):
"""Makes a state transition of a GCI Task from Claimed state
to ActionNeeded.
Args:
task: The task_model.GCITask entity
"""
# deadline is extended by 24 hours.
task.status = 'ActionNeeded'
task.deadline = task.deadline + datetime.timedelta(hours=24)
comment_props = {
'parent': task,
'title': DEF_ACTION_NEEDED_TITLE,
'content': DEF_ACTION_NEEDED,
}
comment = GCIComment(**comment_props)
return task, comment
def transitFromNeedsReview(task):
"""Makes a state transition of a GCI Task that is in NeedsReview state.
This state transition is special since it actually only clears the deadline
field and does not change value of the state field. A Task is in this state
when work has been submitted and it has not been reviewed before the original
deadline runs out.
Args:
task: The task_model.GCITask entity
"""
# Clear the deadline since mentors are not forced to review work within a
# certain period.
task.deadline = None
comment_props = {
'parent': task,
'title': DEF_NO_MORE_WORK_TITLE,
'content': DEF_NO_MORE_WORK,
}
comment = GCIComment(**comment_props)
return task, comment
def transitFromActionNeeded(task):
"""Makes a state transition of a GCI Task from ActionNeeded state
to Reopened state.
Args:
task: The task_model.GCITask entity
"""
# reopen the task
task.student = None
task.status = task_model.REOPENED
task.deadline = None
comment_props = {
'parent': task,
'title': DEF_REOPENED_TITLE,
'content': DEF_REOPENED,
}
comment = GCIComment(**comment_props)
return task, comment
def transitFromNeedsWork(task):
"""Makes a state transition of a GCI Task from NeedsWork state
to Reopened state.
A task that has been marked as Needs(more)Work will NOT get a deadline
extension and will be reopened immediately.
Args:
task: The task_model.GCITask entity
"""
task.student = None
task.status = task_model.REOPENED
task.deadline = None
comment_props = {
'parent': task,
'title': DEF_REOPENED_TITLE,
'content': DEF_REOPENED,
}
comment = GCIComment(**comment_props)
return task, comment
def delete(task):
"""Delete existing task from datastore.
"""
def task_delete_txn(task):
"""Performs all necessary operations in a single transaction when a task
is deleted.
"""
to_delete = []
to_delete += GCIComment.all(keys_only=True).ancestor(task)
to_delete += GCIWorkSubmission.all(keys_only=True).ancestor(task)
to_delete += [task.key()]
db.delete(to_delete)
db.run_in_transaction(task_delete_txn, task)
def getFeaturedTask(program):
"""Return a featured task for a given program.
Args:
program: entity representing the program from which the featured
tasks should be fetched
"""
# expiry time to fetch the new featured gci task entity
# the current expiry time is 2 hours.
expiry_time = datetime.timedelta(seconds=7200)
def queryForTask():
query = task_model.GCITask.all()
query.filter('is_featured', True)
query.filter('program', program)
return query
q = queryForTask()
# the cache stores a 3-tuple in the order gci task entity,
# cursor and the last time the cache was updated
fgt_cache = memcache.get('featured_gci_task' + program.key().name())
if fgt_cache:
cached_task, cached_cursor, cache_expiry_time = fgt_cache
if (cached_task and not
datetime.datetime.now() > cache_expiry_time + expiry_time):
return cached_task
else:
q.with_cursor(cached_cursor)
if q.count() == 0:
q = queryForTask()
for task in q:
if task.status in task_model.CLAIMABLE + task_model.ACTIVE_CLAIMED_TASK:
new_task = task
break
else:
return None
new_cursor = q.cursor()
memcache.set(
key='featured_gci_task',
value=(new_task, new_cursor, datetime.datetime.now()))
return new_task
def setTaskStatus(task_key, status):
"""Set the status of the specified task in a transaction.
"""
def setTaskStatusTxn():
task = task_model.GCITask.get(task_key)
task.status = status
task.put()
db.run_in_transaction(setTaskStatusTxn)
# define the state transition functions
STATE_TRANSITIONS = {
'Claimed': transitFromClaimed,
'NeedsReview': transitFromNeedsReview,
'ActionNeeded': transitFromActionNeeded,
'NeedsWork': transitFromNeedsWork,
}
# useful queries for tasks
def queryClaimableTasksForProgram(program):
q = task_model.GCITask.all()
q.filter('program', program)
q.filter('status IN', task_model.CLAIMABLE)
return q
def queryAllTasksClosedByStudent(profile, keys_only=False):
"""Returns a query for all the tasks that have been closed by the
specified profile.
"""
if not profile.is_student:
raise ValueError('Only students can be queried for closed tasks.')
return task_model.GCITask.all(keys_only=keys_only).filter(
'student', profile.key.to_old_key()).filter('status', 'Closed')
def queryCurrentTaskForStudent(profile, keys_only=False):
"""Returns a query for the task that the specified student
is currently working on.
"""
if not profile.is_student:
raise ValueError('Only students can be queried for their current task.')
return task_model.GCITask.all(keys_only=keys_only).filter(
'student', profile.key.to_old_key()).filter('status != ', 'Closed')
def querySubscribedTasksForProfile(profile_key, keys_only=False):
"""Returns a query for tasks that the specified profile is subscribed to."""
return task_model.GCITask.all(keys_only=keys_only).filter(
'subscribers', profile_key.to_old_key())
def queryForStudentAndOrganizationAndStatus(student_key, org_key, status,
keys_only=False):
"""Returns a query for all tasks for the specified student,
organization and status.
Args:
student_key: Student key.
org_key: Organization key.
status: List of allowed statuses of the tasks to query.
Returns:
A query to fetch all tasks with the specified properties.
"""
query = task_model.GCITask.all()
query.filter('student', student_key)
query.filter('org', org_key)
if isinstance(status, list):
query.filter('status IN', status)
else:
query.filter('status', status)
return query
def queryForOrganization(org, keys_only=False):
"""Returns a query for all tasks for the specified organization.
"""
return task_model.GCITask.all().filter('org', org)
|
|
"""AuthZ Adapter implementations of grading managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import sessions
from ..osid import managers as osid_managers
from ..osid.osid_errors import Unimplemented
from ..osid.osid_errors import Unimplemented, OperationFailed, Unsupported
from ..primitives import Id
from ..utilities import raise_null_argument
from dlkit.manager_impls.grading import managers as grading_managers
class GradingProfile(osid_managers.OsidProfile, grading_managers.GradingProfile):
"""Adapts underlying GradingProfile methodswith authorization checks."""
def __init__(self):
osid_managers.OsidProfile.__init__(self)
def _get_hierarchy_session(self, proxy=None):
if proxy is not None:
try:
return self._provider_manager.get_gradebook_hierarchy_session(proxy)
except Unimplemented:
return None
try:
return self._provider_manager.get_gradebook_hierarchy_session()
except Unimplemented:
return None
def supports_grade_system_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_system_lookup()
def supports_grade_system_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_system_query()
def supports_grade_system_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_system_admin()
def supports_grade_system_gradebook(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_system_gradebook()
def supports_grade_system_gradebook_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_system_gradebook_assignment()
def supports_grade_entry_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_entry_lookup()
def supports_grade_entry_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_entry_query()
def supports_grade_entry_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_grade_entry_admin()
def supports_gradebook_column_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_column_lookup()
def supports_gradebook_column_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_column_query()
def supports_gradebook_column_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_column_admin()
def supports_gradebook_column_gradebook(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_column_gradebook()
def supports_gradebook_column_gradebook_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_column_gradebook_assignment()
def supports_gradebook_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_lookup()
def supports_gradebook_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_admin()
def supports_gradebook_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_hierarchy()
def supports_gradebook_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_gradebook_hierarchy_design()
def get_grade_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_grade_record_types()
grade_record_types = property(fget=get_grade_record_types)
def get_grade_system_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_grade_system_record_types()
grade_system_record_types = property(fget=get_grade_system_record_types)
def get_grade_system_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_grade_system_search_record_types()
grade_system_search_record_types = property(fget=get_grade_system_search_record_types)
def get_grade_entry_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_grade_entry_record_types()
grade_entry_record_types = property(fget=get_grade_entry_record_types)
def get_grade_entry_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_grade_entry_search_record_types()
grade_entry_search_record_types = property(fget=get_grade_entry_search_record_types)
def get_gradebook_column_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_gradebook_column_record_types()
gradebook_column_record_types = property(fget=get_gradebook_column_record_types)
def get_gradebook_column_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_gradebook_column_search_record_types()
gradebook_column_search_record_types = property(fget=get_gradebook_column_search_record_types)
def get_gradebook_column_summary_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_gradebook_column_summary_record_types()
gradebook_column_summary_record_types = property(fget=get_gradebook_column_summary_record_types)
def get_gradebook_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_gradebook_record_types()
gradebook_record_types = property(fget=get_gradebook_record_types)
def get_gradebook_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_gradebook_search_record_types()
gradebook_search_record_types = property(fget=get_gradebook_search_record_types)
class GradingManager(osid_managers.OsidManager, GradingProfile, grading_managers.GradingManager):
"""Adapts underlying GradingManager methodswith authorization checks."""
def __init__(self):
GradingProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:gradingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_manager('GRADING', provider_impl)
# need to add version argument
def get_grade_system_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_system_query_session()
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemLookupSession')(
provider_session=self._provider_manager.get_grade_system_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
grade_system_lookup_session = property(fget=get_grade_system_lookup_session)
@raise_null_argument
def get_grade_system_lookup_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_system_query_session_for_gradebook(gradebook_id)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemLookupSession')(
provider_session=self._provider_manager.get_grade_system_lookup_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_grade_system_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_system_query_session()
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemQuerySession')(
provider_session=self._provider_manager.get_grade_system_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
grade_system_query_session = property(fget=get_grade_system_query_session)
@raise_null_argument
def get_grade_system_query_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_system_query_session_for_gradebook(gradebook_id)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemQuerySession')(
provider_session=self._provider_manager.get_grade_system_query_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_grade_system_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeSystemAdminSession')(
provider_session=self._provider_manager.get_grade_system_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
grade_system_admin_session = property(fget=get_grade_system_admin_session)
@raise_null_argument
def get_grade_system_admin_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'GradeSystemAdminSession')(
provider_session=self._provider_manager.get_grade_system_admin_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_grade_system_gradebook_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeSystemGradebookSession')(
provider_session=self._provider_manager.get_grade_system_gradebook_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
grade_system_gradebook_session = property(fget=get_grade_system_gradebook_session)
def get_grade_system_gradebook_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeSystemGradebookAssignmentSession')(
provider_session=self._provider_manager.get_grade_system_gradebook_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
grade_system_gradebook_assignment_session = property(fget=get_grade_system_gradebook_assignment_session)
def get_grade_entry_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_entry_query_session()
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryLookupSession')(
provider_session=self._provider_manager.get_grade_entry_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
grade_entry_lookup_session = property(fget=get_grade_entry_lookup_session)
@raise_null_argument
def get_grade_entry_lookup_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_entry_query_session_for_gradebook(gradebook_id)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryLookupSession')(
provider_session=self._provider_manager.get_grade_entry_lookup_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_grade_entry_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_entry_query_session()
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryQuerySession')(
provider_session=self._provider_manager.get_grade_entry_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
grade_entry_query_session = property(fget=get_grade_entry_query_session)
@raise_null_argument
def get_grade_entry_query_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_entry_query_session_for_gradebook(gradebook_id)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryQuerySession')(
provider_session=self._provider_manager.get_grade_entry_query_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_grade_entry_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeEntryAdminSession')(
provider_session=self._provider_manager.get_grade_entry_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
grade_entry_admin_session = property(fget=get_grade_entry_admin_session)
@raise_null_argument
def get_grade_entry_admin_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'GradeEntryAdminSession')(
provider_session=self._provider_manager.get_grade_entry_admin_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_gradebook_column_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session()
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnLookupSession')(
provider_session=self._provider_manager.get_gradebook_column_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
gradebook_column_lookup_session = property(fget=get_gradebook_column_lookup_session)
@raise_null_argument
def get_gradebook_column_lookup_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session_for_gradebook(gradebook_id)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnLookupSession')(
provider_session=self._provider_manager.get_gradebook_column_lookup_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_gradebook_column_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session()
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnQuerySession')(
provider_session=self._provider_manager.get_gradebook_column_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
gradebook_column_query_session = property(fget=get_gradebook_column_query_session)
@raise_null_argument
def get_gradebook_column_query_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session_for_gradebook(gradebook_id)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnQuerySession')(
provider_session=self._provider_manager.get_gradebook_column_query_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_gradebook_column_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookColumnAdminSession')(
provider_session=self._provider_manager.get_gradebook_column_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
gradebook_column_admin_session = property(fget=get_gradebook_column_admin_session)
@raise_null_argument
def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'GradebookColumnAdminSession')(
provider_session=self._provider_manager.get_gradebook_column_admin_session_for_gradebook(gradebook_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_gradebook_column_gradebook_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookColumnGradebookSession')(
provider_session=self._provider_manager.get_gradebook_column_gradebook_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
gradebook_column_gradebook_session = property(fget=get_gradebook_column_gradebook_session)
def get_gradebook_column_gradebook_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookColumnGradebookAssignmentSession')(
provider_session=self._provider_manager.get_gradebook_column_gradebook_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
gradebook_column_gradebook_assignment_session = property(fget=get_gradebook_column_gradebook_assignment_session)
def get_gradebook_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookLookupSession')(
provider_session=self._provider_manager.get_gradebook_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
gradebook_lookup_session = property(fget=get_gradebook_lookup_session)
def get_gradebook_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookAdminSession')(
provider_session=self._provider_manager.get_gradebook_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
gradebook_admin_session = property(fget=get_gradebook_admin_session)
def get_gradebook_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookHierarchySession')(
provider_session=self._provider_manager.get_gradebook_hierarchy_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
gradebook_hierarchy_session = property(fget=get_gradebook_hierarchy_session)
def get_gradebook_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookHierarchyDesignSession')(
provider_session=self._provider_manager.get_gradebook_hierarchy_design_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
gradebook_hierarchy_design_session = property(fget=get_gradebook_hierarchy_design_session)
def get_grading_batch_manager(self):
raise Unimplemented()
grading_batch_manager = property(fget=get_grading_batch_manager)
def get_grading_calculation_manager(self):
raise Unimplemented()
grading_calculation_manager = property(fget=get_grading_calculation_manager)
def get_grading_transform_manager(self):
raise Unimplemented()
grading_transform_manager = property(fget=get_grading_transform_manager)
class GradingProxyManager(osid_managers.OsidProxyManager, GradingProfile, grading_managers.GradingProxyManager):
"""Adapts underlying GradingProxyManager methodswith authorization checks."""
def __init__(self):
GradingProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidProxyManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:gradingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_proxy_manager('GRADING', provider_impl)
# need to add version argument
@raise_null_argument
def get_grade_system_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_system_query_session(proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemLookupSession')(
provider_session=self._provider_manager.get_grade_system_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_system_lookup_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_system_query_session_for_gradebook(gradebook_id, proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemLookupSession')(
provider_session=self._provider_manager.get_grade_system_lookup_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_system_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_system_query_session(proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemQuerySession')(
provider_session=self._provider_manager.get_grade_system_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_system_query_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_system_query_session_for_gradebook(gradebook_id, proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeSystemQuerySession')(
provider_session=self._provider_manager.get_grade_system_query_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_system_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeSystemAdminSession')(
provider_session=self._provider_manager.get_grade_system_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_grade_system_admin_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'GradeSystemAdminSession')(
provider_session=self._provider_manager.get_grade_system_admin_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_grade_system_gradebook_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeSystemGradebookSession')(
provider_session=self._provider_manager.get_grade_system_gradebook_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_grade_system_gradebook_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeSystemGradebookAssignmentSession')(
provider_session=self._provider_manager.get_grade_system_gradebook_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_grade_entry_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_entry_query_session(proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryLookupSession')(
provider_session=self._provider_manager.get_grade_entry_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_entry_lookup_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_entry_query_session_for_gradebook(gradebook_id, proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryLookupSession')(
provider_session=self._provider_manager.get_grade_entry_lookup_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_entry_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_grade_entry_query_session(proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryQuerySession')(
provider_session=self._provider_manager.get_grade_entry_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_entry_query_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_grade_entry_query_session_for_gradebook(gradebook_id, proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradeEntryQuerySession')(
provider_session=self._provider_manager.get_grade_entry_query_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_grade_entry_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradeEntryAdminSession')(
provider_session=self._provider_manager.get_grade_entry_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_grade_entry_admin_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'GradeEntryAdminSession')(
provider_session=self._provider_manager.get_grade_entry_admin_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_column_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session(proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnLookupSession')(
provider_session=self._provider_manager.get_gradebook_column_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_gradebook_column_lookup_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session_for_gradebook(gradebook_id, proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnLookupSession')(
provider_session=self._provider_manager.get_gradebook_column_lookup_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_gradebook_column_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session(proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnQuerySession')(
provider_session=self._provider_manager.get_gradebook_column_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_gradebook_column_query_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_gradebook_column_query_session_for_gradebook(gradebook_id, proxy)
query_session.use_federated_gradebook_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'GradebookColumnQuerySession')(
provider_session=self._provider_manager.get_gradebook_column_query_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_gradebook_column_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookColumnAdminSession')(
provider_session=self._provider_manager.get_gradebook_column_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'GradebookColumnAdminSession')(
provider_session=self._provider_manager.get_gradebook_column_admin_session_for_gradebook(gradebook_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_column_gradebook_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookColumnGradebookSession')(
provider_session=self._provider_manager.get_gradebook_column_gradebook_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_column_gradebook_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookColumnGradebookAssignmentSession')(
provider_session=self._provider_manager.get_gradebook_column_gradebook_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookLookupSession')(
provider_session=self._provider_manager.get_gradebook_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookAdminSession')(
provider_session=self._provider_manager.get_gradebook_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookHierarchySession')(
provider_session=self._provider_manager.get_gradebook_hierarchy_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_gradebook_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'GradebookHierarchyDesignSession')(
provider_session=self._provider_manager.get_gradebook_hierarchy_design_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
def get_grading_batch_proxy_manager(self):
raise Unimplemented()
grading_batch_proxy_manager = property(fget=get_grading_batch_proxy_manager)
def get_grading_calculation_proxy_manager(self):
raise Unimplemented()
grading_calculation_proxy_manager = property(fget=get_grading_calculation_proxy_manager)
def get_grading_transform_proxy_manager(self):
raise Unimplemented()
grading_transform_proxy_manager = property(fget=get_grading_transform_proxy_manager)
|
|
from collections import namedtuple
import heapq as hq
import logging
import numpy
from operator import attrgetter
import pandas as pd
from py_entitymatching.utils.validation_helper import validate_object_type
import py_entitymatching as em
import py_entitymatching.catalog.catalog_manager as cm
logger = logging.getLogger(__name__)
def backup_debug_blocker(candset, ltable, rtable, output_size=200,
attr_corres=None, verbose=False):
"""
This is the old version of the blocker debugger. It is not reccomended
to use this version unless the new blocker debugger is not working properly.
This function debugs the blocker output and reports a list of potential
matches that are discarded by a blocker (or a blocker sequence).
Specifically, this function takes in the two input tables for
matching and the candidate set returned by a blocker (or a blocker
sequence), and produces a list of tuple pairs which are rejected by the
blocker but with high potential of being true matches.
Args:
candset (DataFrame): The candidate set generated by
applying the blocker on the ltable and rtable.
ltable,rtable (DataFrame): The input DataFrames that are used to
generate the blocker output.
output_size (int): The number of tuple pairs that will be
returned (defaults to 200).
attr_corres (list): A list of attribute correspondence tuples.
When ltable and rtable have different schemas, or the same
schema but different words describing the attributes, the
user needs to manually specify the attribute correspondence.
Each element in this list should be a tuple of strings
which are the corresponding attributes in ltable and rtable.
The default value is None, and if the user doesn't specify
this list, a built-in function for finding the
attribute correspondence list will be called. But we highly
recommend the users manually specify the attribute
correspondences, unless the schemas of ltable and rtable are
identical (defaults to None).
verbose (boolean): A flag to indicate whether the debug information
should be logged (defaults to False).
Returns:
A pandas DataFrame with 'output_size' number of rows. Each row in the
DataFrame is a tuple pair which has potential of being a true
match, but is rejected by the blocker (meaning that the tuple
pair is in the Cartesian product of ltable and rtable subtracted
by the candidate set). The fields in the returned DataFrame are
from ltable and rtable, which are useful for determining similar
tuple pairs.
Raises:
AssertionError: If `ltable`, `rtable` or `candset` is not of type
pandas DataFrame.
AssertionError: If `ltable` or `rtable` is empty (size of 0).
AssertionError: If the output `size` parameter is less than or equal
to 0.
AssertionError: If the attribute correspondence (`attr_corres`) list is
not in the correct format (a list of tuples).
AssertionError: If the attribute correspondence (`attr_corres`)
cannot be built correctly.
Examples:
>>> import py_entitymatching as em
>>> ob = em.OverlapBlocker()
>>> C = ob.block_tables(A, B, l_overlap_attr='title', r_overlap_attr='title', overlap_size=3)
>>> corres = [('ID','ssn'), ('name', 'ename'), ('address', 'location'),('zipcode', 'zipcode')]
>>> D = em.backup_debug_blocker(C, A, B, attr_corres=corres)
>>> import py_entitymatching as em
>>> ob = em.OverlapBlocker()
>>> C = ob.block_tables(A, B, l_overlap_attr='name', r_overlap_attr='name', overlap_size=3)
>>> D = em.backup_debug_blocker(C, A, B, output_size=150)
"""
# Check input types.
_validate_types(ltable, rtable, candset, output_size,
attr_corres, verbose)
# Check table size.
if len(ltable) == 0:
raise AssertionError('Error: ltable is empty!')
if len(rtable) == 0:
raise AssertionError('Error: rtable is empty!')
# Check the value of output size.
if output_size <= 0:
raise AssertionError('The input parameter: \'output_size\''
' is less than or equal to 0. Nothing needs'
' to be done!')
# Get table metadata.
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger, verbose)
# Validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger, verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger, verbose)
# Check the user input field correst list (if exists) and get the raw
# version of our internal correst list.
_check_input_field_correspondence_list(ltable, rtable, attr_corres)
corres_list = _get_field_correspondence_list(ltable, rtable,
l_key, r_key, attr_corres)
# Build the (col_name: col_index) dict to speed up locating a field in
# the schema.
ltable_col_dict = _build_col_name_index_dict(ltable)
rtable_col_dict = _build_col_name_index_dict(rtable)
# Filter correspondence list to remove numeric types. We only consider
# string types for document concatenation.
_filter_corres_list(ltable, rtable, l_key, r_key,
ltable_col_dict, rtable_col_dict, corres_list)
# Get field filtered new table.
ltable_filtered, rtable_filtered = _get_filtered_table(
ltable, rtable, l_key, r_key, corres_list)
# Select a subset of fields with high scores.
feature_list = _select_features(ltable_filtered, rtable_filtered, l_key)
# Map the record key value to its index in the table.
lrecord_id_to_index_map = _get_record_id_to_index_map(ltable_filtered, l_key)
rrecord_id_to_index_map = _get_record_id_to_index_map(rtable_filtered, r_key)
# Build the tokenized record list delimited by a white space on the
# selected fields.
lrecord_list = _get_tokenized_table(ltable_filtered, l_key, feature_list)
rrecord_list = _get_tokenized_table(rtable_filtered, r_key, feature_list)
# Reformat the candidate set from a dataframe to a list of record index
# tuple pair.
new_formatted_candidate_set = _index_candidate_set(
candset, lrecord_id_to_index_map, rrecord_id_to_index_map, verbose)
# Build the token order according to token's frequency. To run a
# prefix filtering based similarity join algorithm, we first need
# the global token order.
order_dict = {}
_build_global_token_order(lrecord_list, order_dict)
_build_global_token_order(rrecord_list, order_dict)
# Sort the tokens in each record by the global order.
_sort_record_tokens_by_global_order(lrecord_list, order_dict)
_sort_record_tokens_by_global_order(rrecord_list, order_dict)
# Run the topk similarity join.
topk_heap = _topk_sim_join(
lrecord_list, rrecord_list, new_formatted_candidate_set, output_size)
# Assemble the topk record list to a dataframe.
ret_dataframe = _assemble_topk_table(topk_heap, ltable_filtered, rtable_filtered)
return ret_dataframe
# Validate the types of input parameters.
def _validate_types(ltable, rtable, candidate_set, output_size,
attr_corres, verbose):
validate_object_type(ltable, pd.DataFrame, 'Input left table')
validate_object_type(rtable, pd.DataFrame, 'Input right table')
validate_object_type(candidate_set, pd.DataFrame, 'Input candidate set')
validate_object_type(output_size, int, 'Output size')
if attr_corres is not None:
if not isinstance(attr_corres, list):
logging.error('Input attribute correspondence is not of'
' type list')
raise AssertionError('Input attribute correspondence is'
' not of type list')
for pair in attr_corres:
if not isinstance(pair, tuple):
logging.error('Pair in attribute correspondence list is not'
' of type tuple')
raise AssertionError('Pair in attribute correspondence list'
' is not of type tuple')
if not isinstance(verbose, bool):
logger.error('Parameter verbose is not of type bool')
raise AssertionError('Parameter verbose is not of type bool')
# Assemble the topk heap to a dataframe.
def _assemble_topk_table(topk_heap, ltable, rtable, ret_key='_id',
l_output_prefix='ltable_', r_output_prefix='rtable_'):
topk_heap.sort(key=lambda tup: tup[0], reverse=True)
ret_data_col_name_list = ['_id', 'similarity']
ltable_col_names = list(ltable.columns)
rtable_col_names = list(rtable.columns)
lkey = em.get_key(ltable)
rkey = em.get_key(rtable)
lkey_index = 0
rkey_index = 0
for i in range(len(ltable_col_names)):
if ltable_col_names[i] == lkey:
lkey_index = i
for i in range(len(rtable_col_names)):
if rtable_col_names[i] == rkey:
rkey_index = i
ret_data_col_name_list.append(l_output_prefix + lkey)
ret_data_col_name_list.append(r_output_prefix + rkey)
ltable_col_names.remove(lkey)
rtable_col_names.remove(rkey)
for i in range(len(ltable_col_names)):
ret_data_col_name_list.append(l_output_prefix + ltable_col_names[i])
for i in range(len(rtable_col_names)):
ret_data_col_name_list.append(r_output_prefix + rtable_col_names[i])
ret_tuple_list = []
for i in range(len(topk_heap)):
tup = topk_heap[i]
lrecord = list(ltable.loc[tup[1]])
rrecord = list(rtable.loc[tup[2]])
ret_tuple = [i, tup[0]]
ret_tuple.append(lrecord[lkey_index])
ret_tuple.append(rrecord[rkey_index])
for j in range(len(lrecord)):
if j != lkey_index:
ret_tuple.append(lrecord[j])
for j in range(len(rrecord)):
if j != rkey_index:
ret_tuple.append(rrecord[j])
ret_tuple_list.append(ret_tuple)
data_frame = pd.DataFrame(ret_tuple_list)
# When the ret data frame is empty, we cannot assign column names.
if len(data_frame) == 0:
return data_frame
data_frame.columns = ret_data_col_name_list
lkey = em.get_key(ltable)
rkey = em.get_key(rtable)
cm.set_candset_properties(data_frame, ret_key, l_output_prefix + lkey,
r_output_prefix + rkey, ltable, rtable)
return data_frame
# Topk similarity join wrapper.
def _topk_sim_join(lrecord_list, rrecord_list, cand_set, output_size):
# Build prefix events.
prefix_events = _generate_prefix_events(lrecord_list, rrecord_list)
topk_heap = _topk_sim_join_impl(lrecord_list, rrecord_list,
prefix_events, cand_set, output_size)
return topk_heap
# Implement topk similarity join. Refer to "top-k set similarity join"
# by Xiao et al. for details.
def _topk_sim_join_impl(lrecord_list, rrecord_list, prefix_events,
cand_set, output_size):
total_compared_pairs = 0
compared_set = set()
l_inverted_index = {}
r_inverted_index = {}
topk_heap = []
while len(prefix_events) > 0:
if len(topk_heap) == output_size and\
topk_heap[0][0] >= prefix_events[0][0] * -1:
break
event = hq.heappop(prefix_events)
table_indicator = event[1]
rec_idx = event[2]
tok_idx = event[3]
if table_indicator == 0:
token = lrecord_list[rec_idx][tok_idx]
if token in r_inverted_index:
r_records = r_inverted_index[token]
for r_rec_idx in r_records:
pair = (rec_idx, r_rec_idx)
# Skip if the pair is in the candidate set.
if pair in cand_set:
continue
# Skip if the pair has been compared.
if pair in compared_set:
continue
sim = _jaccard_sim(
set(lrecord_list[rec_idx]), set(rrecord_list[r_rec_idx]))
if len(topk_heap) == output_size:
hq.heappushpop(topk_heap, (sim, rec_idx, r_rec_idx))
else:
hq.heappush(topk_heap, (sim, rec_idx, r_rec_idx))
total_compared_pairs += 1
compared_set.add(pair)
# Update the inverted index.
if token not in l_inverted_index:
l_inverted_index[token] = set()
l_inverted_index[token].add(rec_idx)
else:
token = rrecord_list[rec_idx][tok_idx]
if token in l_inverted_index:
l_records = l_inverted_index[token]
for l_rec_idx in l_records:
pair = (l_rec_idx, rec_idx)
# Skip if the pair is in the candidate set.
if pair in cand_set:
continue
# Skip if the pair has been compared.
if pair in compared_set:
continue
sim = _jaccard_sim(
set(lrecord_list[l_rec_idx]), set(rrecord_list[rec_idx]))
if len(topk_heap) == output_size:
hq.heappushpop(topk_heap, (sim, l_rec_idx, rec_idx))
else:
hq.heappush(topk_heap, (sim, l_rec_idx, rec_idx))
total_compared_pairs += 1
compared_set.add(pair)
# Update the inverted index.
if token not in r_inverted_index:
r_inverted_index[token] = set()
r_inverted_index[token].add(rec_idx)
return topk_heap
# Calculate the token-based Jaccard similarity of two string sets.
def _jaccard_sim(l_token_set, r_token_set):
l_len = len(l_token_set)
r_len = len(r_token_set)
intersect_size = len(l_token_set & r_token_set)
if l_len + r_len == 0:
return 0.0
return intersect_size * 1.0 / (l_len + r_len - intersect_size)
# Check the input field correspondence list.
def _check_input_field_correspondence_list(ltable, rtable, field_corres_list):
if field_corres_list is None:
return
true_ltable_fields = list(ltable.columns)
true_rtable_fields = list(rtable.columns)
for pair in field_corres_list:
# Raise an error if the pair in not a tuple or the length is not two.
if type(pair) != tuple or len(pair) != 2:
raise AssertionError('Error in checking user input field'
' correspondence: the input field pairs'
'are not in the required tuple format!')
given_ltable_fields = [field[0] for field in field_corres_list]
given_rtable_fields = [field[1] for field in field_corres_list]
# Raise an error if a field is in the correspondence list but not in
# the table schema.
for given_field in given_ltable_fields:
if given_field not in true_ltable_fields:
raise AssertionError('Error in checking user input field'
' correspondence: the field \'%s\' is'
' not in the ltable!' % given_field)
for given_field in given_rtable_fields:
if given_field not in true_rtable_fields:
raise AssertionError('Error in checking user input field'
' correspondence:'
' the field \'%s\' is not in the'
' rtable!' % given_field)
return
# Get the field correspondence list. If the input list is empty, call
# the system builtin function to get the correspondence, or use the
# user input as the correspondence.
def _get_field_correspondence_list(ltable, rtable, lkey, rkey, attr_corres):
corres_list = []
if attr_corres is None or len(attr_corres) == 0:
corres_list = em.get_attr_corres(ltable, rtable)['corres']
if len(corres_list) == 0:
raise AssertionError('Error: the field correspondence list'
' is empty. Please specify the field'
' correspondence!')
else:
for tu in attr_corres:
corres_list.append(tu)
# If the key correspondence is not in the list, add it in.
key_pair = (lkey, rkey)
if key_pair not in corres_list:
corres_list.append(key_pair)
return corres_list
# Filter the correspondence list. Remove the fields in numeric types.
def _filter_corres_list(ltable, rtable, ltable_key, rtable_key,
ltable_col_dict, rtable_col_dict, corres_list):
ltable_dtypes = list(ltable.dtypes)
rtable_dtypes = list(rtable.dtypes)
for i in reversed(range(len(corres_list))):
lcol_name = corres_list[i][0]
rcol_name = corres_list[i][1]
# Filter the pair where both fields are numeric types.
if ltable_dtypes[ltable_col_dict[lcol_name]] != numpy.dtype('O')\
and rtable_dtypes[rtable_col_dict[rcol_name]] != numpy.dtype('O'):
if lcol_name != ltable_key and rcol_name != rtable_key:
corres_list.pop(i)
if len(corres_list) == 1 and corres_list[0][0] == ltable_key\
and corres_list[0][1] == rtable_key:
raise AssertionError('The field correspondence list is empty after'
' filtering: please verify your correspondence'
' list, or check if each field is of numeric'
' type!')
# Filter the original input tables according to the correspondence list.
# The filtered tables will only contain the fields in the correspondence list.
def _get_filtered_table(ltable, rtable, lkey, rkey, corres_list):
ltable_cols = [col_pair[0] for col_pair in corres_list]
rtable_cols = [col_pair[1] for col_pair in corres_list]
lfiltered_table = ltable[ltable_cols]
rfiltered_table = rtable[rtable_cols]
em.set_key(lfiltered_table, lkey)
em.set_key(rfiltered_table, rkey)
return lfiltered_table, rfiltered_table
# Build the mapping bewteen field name and its index in the schema.
def _build_col_name_index_dict(table):
col_dict = {}
col_names = list(table.columns)
for i in range(len(col_names)):
col_dict[col_names[i]] = i
return col_dict
# Select the most important fields for similarity join. The importance
# of a fields is measured by the combination of field value uniqueness
# and non-emptyness.
def _select_features(ltable, rtable, lkey):
lcolumns = list(ltable.columns)
rcolumns = list(rtable.columns)
lkey_index = -1
if len(lcolumns) != len(rcolumns):
raise AssertionError('Error: FILTERED ltable and FILTERED rtable'
' have different number of fields!')
for i in range(len(lcolumns)):
if lkey == lcolumns[i]:
lkey_index = i
lweight = _get_feature_weight(ltable)
rweight = _get_feature_weight(rtable)
Rank = namedtuple('Rank', ['index', 'weight'])
rank_list = []
for i in range(len(lweight)):
rank_list.append(Rank(i, lweight[i] * rweight[i]))
rank_list.pop(lkey_index)
rank_list = sorted(rank_list, key=attrgetter('weight'), reverse=True)
rank_index_list = []
num_selected_fields = 0
if len(rank_list) <= 3:
num_selected_fields = len(rank_list)
elif len(rank_list) <= 5:
num_selected_fields = 3
else:
num_selected_fields = int(len(rank_list) / 2)
for i in range(num_selected_fields):
rank_index_list.append(rank_list[i].index)
return sorted(rank_index_list)
# Calculate the importance (weight) for each field in a table.
def _get_feature_weight(table):
num_records = len(table)
if num_records == 0:
raise AssertionError('Error: empty table!')
weight = []
for col in table.columns:
value_set = set()
non_empty_count = 0
col_values = table[col]
for value in col_values:
if not pd.isnull(value) and value != '':
value_set.add(value)
non_empty_count += 1
selectivity = 0.0
if non_empty_count != 0:
selectivity = len(value_set) * 1.0 / non_empty_count
non_empty_ratio = non_empty_count * 1.0 / num_records
# The field weight is the combination of non-emptyness
# and uniqueness.
weight.append(non_empty_ratio + selectivity)
return weight
# Build the mapping of record key value and its index in the table.
def _get_record_id_to_index_map(table, table_key):
record_id_to_index = {}
id_col = list(table[table_key])
for i in range(len(id_col)):
if id_col[i] in record_id_to_index:
raise AssertionError('Duplicate keys found:', id_col[i])
record_id_to_index[id_col[i]] = i
return record_id_to_index
# Tokenize a table. First tokenize each table column by a white space,
# then concatenate the column of each record. The reason for tokenizing
# columns first is that it's more efficient than iterate each dataframe
# tuple.
def _get_tokenized_table(table, table_key, feature_list):
record_list = []
columns = table.columns[feature_list]
tmp_table = []
for col in columns:
column_token_list = _get_tokenized_column(table[col])
tmp_table.append(column_token_list)
num_records = len(table[table_key])
for i in range(num_records):
token_list = []
index_map = {}
for j in range(len(columns)):
tmp_col_tokens = tmp_table[j][i]
for token in tmp_col_tokens:
if token != '':
if token in index_map:
token_list.append(token + '_' + str(index_map[token]))
index_map[token] += 1
else:
token_list.append(token)
index_map[token] = 1
record_list.append(token_list)
return record_list
# Tokenize each table column by white spaces.
def _get_tokenized_column(column):
column_token_list = []
for value in list(column):
tmp_value = _replace_nan_to_empty(value)
if tmp_value != '':
tmp_list = list(tmp_value.lower().split(' '))
column_token_list.append(tmp_list)
else:
column_token_list.append([''])
return column_token_list
# Check the value of each field. Replace nan with empty string.
# Cast floats into integers.
def _replace_nan_to_empty(field):
if pd.isnull(field):
return ''
elif type(field) in [float, numpy.float64, int, numpy.int64]:
return str('{0:.0f}'.format(field))
else:
return field
# Reformat the input candidate set. Since the input format is DataFrame,
# it's difficult for us to know if a tuple pair is in the candidate
# set or not. We will use the reformatted candidate set in the topk
# similarity join.
def _index_candidate_set(candidate_set, lrecord_id_to_index_map, rrecord_id_to_index_map, verbose):
new_formatted_candidate_set = set()
if len(candidate_set) == 0:
return new_formatted_candidate_set
# Get metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key =\
cm.get_metadata_for_candset(candidate_set, logger, verbose)
# validate metadata
cm._validate_metadata_for_candset(candidate_set, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
ltable_key_data = list(candidate_set[fk_ltable])
rtable_key_data = list(candidate_set[fk_rtable])
for i in range(len(ltable_key_data)):
new_formatted_candidate_set.add((lrecord_id_to_index_map[ltable_key_data[i]],
rrecord_id_to_index_map[rtable_key_data[i]]))
return new_formatted_candidate_set
# Build the global order of tokens in the table by frequency.
def _build_global_token_order(record_list, order_dict):
for record in record_list:
for token in record:
if token in order_dict:
order_dict[token] += 1
else:
order_dict[token] = 1
# Sort each tokenized record by the global token order.
def _sort_record_tokens_by_global_order(record_list, order_dict):
for i in range(len(record_list)):
tmp_record = []
for token in record_list[i]:
if token in order_dict:
tmp_record.append(token)
record_list[i] = sorted(tmp_record, key=lambda x: (order_dict[x], x))
# Generate the prefix events of two tables for topk similarity joins.
# Refer to "top-k set similarity join" by Xiao et al. for details.
def _generate_prefix_events(lrecord_list, rrecord_list):
prefix_events = []
_generate_prefix_events_impl(lrecord_list, prefix_events, 0)
_generate_prefix_events_impl(rrecord_list, prefix_events, 1)
return prefix_events
# Prefix event generation for a table.
def _generate_prefix_events_impl(record_list, prefix_events, table_indicator):
for i in range(len(record_list)):
length = len(record_list[i])
for j in range(length):
threshold = _calc_threshold(j, length)
hq.heappush(prefix_events,
(-1.0 * threshold, table_indicator, i, j, record_list[i][j]))
# Calculate the corresponding topk similarity join of a token in a record.
# Refer to "top-k set similarity join" by Xiao et al. for details.
def _calc_threshold(token_index, record_length):
return 1 - token_index * 1.0 / record_length
|
|
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import time
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers import remotefs
from cinder.volume import utils as vutils
VERSION = '1.4.0'
LOG = logging.getLogger(__name__)
nfs_opts = [
cfg.StrOpt('nfs_shares_config',
default='/etc/cinder/nfs_shares',
help='File with the list of available NFS shares.'),
cfg.BoolOpt('nfs_sparsed_volumes',
default=True,
help='Create volumes as sparsed files which take no space. '
'If set to False volume is created as regular file. '
'In such case volume creation takes a lot of time.'),
cfg.BoolOpt('nfs_qcow2_volumes',
default=False,
help='Create volumes as QCOW2 files rather than raw files.'),
cfg.StrOpt('nfs_mount_point_base',
default='$state_path/mnt',
help='Base dir containing mount points for NFS shares.'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the NFS man page for details.'),
cfg.IntOpt('nfs_mount_attempts',
default=3,
help='The number of attempts to mount NFS shares before '
'raising an error. At least one attempt will be '
'made to mount an NFS share, regardless of the '
'value specified.'),
cfg.BoolOpt('nfs_snapshot_support',
default=False,
help='Enable support for snapshots on the NFS driver. '
'Platforms using libvirt <1.2.7 will encounter issues '
'with this feature.'),
]
CONF = cfg.CONF
CONF.register_opts(nfs_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class NfsDriver(remotefs.RemoteFSSnapDriverDistributed):
"""NFS based cinder driver.
Creates file on NFS share for using it as block device on hypervisor.
"""
driver_volume_type = 'nfs'
driver_prefix = 'nfs'
volume_backend_name = 'Generic_NFS'
VERSION = VERSION
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Cinder_Jenkins"
def __init__(self, execute=putils.execute, *args, **kwargs):
self._remotefsclient = None
super(NfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(nfs_opts)
root_helper = utils.get_root_helper()
# base bound to instance is used in RemoteFsConnector.
self.base = getattr(self.configuration,
'nfs_mount_point_base')
self.base = os.path.realpath(self.base)
opts = getattr(self.configuration,
'nfs_mount_options')
nas_mount_options = getattr(self.configuration,
'nas_mount_options',
None)
if nas_mount_options is not None:
LOG.debug('overriding nfs_mount_options with nas_mount_options')
opts = nas_mount_options
self._remotefsclient = remotefs_brick.RemoteFsClient(
'nfs', root_helper, execute=execute,
nfs_mount_point_base=self.base,
nfs_mount_options=opts)
supports_auto_mosr = kwargs.get('supports_auto_mosr', False)
self._sparse_copy_volume_data = True
self.reserved_percentage = self.configuration.reserved_percentage
self.max_over_subscription_ratio = (
vutils.get_max_over_subscription_ratio(
self.configuration.max_over_subscription_ratio,
supports_auto=supports_auto_mosr))
def initialize_connection(self, volume, connector):
LOG.debug('Initializing connection to volume %(vol)s. '
'Connector: %(con)s', {'vol': volume.id, 'con': connector})
active_vol = self.get_active_image_from_info(volume)
volume_dir = self._local_volume_dir(volume)
path_to_vol = os.path.join(volume_dir, active_vol)
info = self._qemu_img_info(path_to_vol,
volume['name'])
data = {'export': volume.provider_location,
'name': active_vol}
if volume.provider_location in self.shares:
data['options'] = self.shares[volume.provider_location]
conn_info = {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
# Test file for raw vs. qcow2 format
if info.file_format not in ['raw', 'qcow2']:
msg = _('nfs volume must be a valid raw or qcow2 image.')
raise exception.InvalidVolume(reason=msg)
conn_info['data']['format'] = info.file_format
LOG.debug('NfsDriver: conn_info: %s', conn_info)
return conn_info
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(NfsDriver, self).do_setup(context)
nas_host = getattr(self.configuration,
'nas_host',
None)
nas_share_path = getattr(self.configuration,
'nas_share_path',
None)
# If both nas_host and nas_share_path are set we are not
# going to use the nfs_shares_config file. So, only check
# for its existence if it is going to be used.
if((not nas_host) or (not nas_share_path)):
config = self.configuration.nfs_shares_config
if not config:
msg = (_("There's no NFS config file configured (%s)") %
'nfs_shares_config')
LOG.warning(msg)
raise exception.NfsException(msg)
if not os.path.exists(config):
msg = (_("NFS config file at %(config)s doesn't exist") %
{'config': config})
LOG.warning(msg)
raise exception.NfsException(msg)
self.shares = {} # address : options
# Check if mount.nfs is installed on this system; note that we
# need to be root, to also find mount.nfs on distributions, where
# it is not located in an unprivileged users PATH (e.g. /sbin).
package = 'mount.nfs'
try:
self._execute(package, check_exit_code=False,
run_as_root=True)
except OSError as exc:
if exc.errno == errno.ENOENT:
msg = _('%s is not installed') % package
raise exception.NfsException(msg)
else:
raise
# Now that all configuration data has been loaded (shares),
# we can "set" our final NAS file security options.
self.set_nas_security_options(self._is_voldb_empty_at_startup)
self._check_snapshot_support(setup_checking=True)
def _ensure_share_mounted(self, nfs_share):
mnt_flags = []
if self.shares.get(nfs_share) is not None:
mnt_flags = self.shares[nfs_share].split()
num_attempts = max(1, self.configuration.nfs_mount_attempts)
for attempt in range(num_attempts):
try:
self._remotefsclient.mount(nfs_share, mnt_flags)
return
except Exception as e:
if attempt == (num_attempts - 1):
LOG.error('Mount failure for %(share)s after '
'%(count)d attempts.',
{'share': nfs_share,
'count': num_attempts})
raise exception.NfsException(six.text_type(e))
LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n'
'Retrying mount ...',
{'attempt': attempt, 'exc': e})
time.sleep(1)
def _find_share(self, volume):
"""Choose NFS share among available ones for given volume size.
For instances with more than one share that meets the criteria, the
share with the least "allocated" space will be selected.
:param volume: the volume to be created.
"""
if not self._mounted_shares:
raise exception.NfsNoSharesMounted()
target_share = None
target_share_reserved = 0
for nfs_share in self._mounted_shares:
total_size, total_available, total_allocated = (
self._get_capacity_info(nfs_share))
share_info = {'total_size': total_size,
'total_available': total_available,
'total_allocated': total_allocated,
}
if not self._is_share_eligible(nfs_share,
volume.size,
share_info):
continue
if target_share is not None:
if target_share_reserved > total_allocated:
target_share = nfs_share
target_share_reserved = total_allocated
else:
target_share = nfs_share
target_share_reserved = total_allocated
if target_share is None:
raise exception.NfsNoSuitableShareFound(
volume_size=volume.size)
LOG.debug('Selected %s as target NFS share.', target_share)
return target_share
def _is_share_eligible(self, nfs_share, volume_size_in_gib,
share_info=None):
"""Verifies NFS share is eligible to host volume with given size.
First validation step: ratio of actual space (used_space / total_space)
is less than used_ratio. Second validation step: apparent space
allocated (differs from actual space used when using sparse files)
and compares the apparent available
space (total_available * oversub_ratio) to ensure enough space is
available for the new volume.
:param nfs_share: NFS share
:param volume_size_in_gib: int size in GB
"""
# Because the generic NFS driver aggregates over all shares
# when reporting capacity and usage stats to the scheduler,
# we still have to perform some scheduler-like capacity
# checks here, and these have to take into account
# configuration for reserved space and oversubscription.
# It would be better to do all this in the scheduler, but
# this requires either pool support for the generic NFS
# driver or limiting each NFS backend driver to a single share.
# derive used_ratio from reserved percentage
if share_info is None:
total_size, total_available, total_allocated = (
self._get_capacity_info(nfs_share))
share_info = {'total_size': total_size,
'total_available': total_available,
'total_allocated': total_allocated,
}
used_percentage = 100 - self.reserved_percentage
used_ratio = used_percentage / 100.0
requested_volume_size = volume_size_in_gib * units.Gi
apparent_size = max(0, share_info['total_size'] *
self.max_over_subscription_ratio)
apparent_available = max(0, apparent_size -
share_info['total_allocated'])
actual_used_ratio = ((share_info['total_size'] -
share_info['total_available']) /
float(share_info['total_size']))
if actual_used_ratio > used_ratio:
# NOTE(morganfainberg): We check the used_ratio first since
# with oversubscription it is possible to not have the actual
# available space but be within our oversubscription limit
# therefore allowing this share to still be selected as a valid
# target.
LOG.debug('%s is not eligible - used ratio exceeded.',
nfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug('%s is not eligible - insufficient (apparent) available '
'space.',
nfs_share)
return False
if share_info['total_allocated'] / share_info['total_size'] >= (
self.max_over_subscription_ratio):
LOG.debug('%s is not eligible - utilization exceeds max '
'over subscription ratio.',
nfs_share)
return False
return True
def _get_mount_point_for_share(self, nfs_share):
"""Needed by parent class."""
return self._remotefsclient.get_mount_point(nfs_share)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
mount_point = self._get_mount_point_for_share(nfs_share)
df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point,
run_as_root=self._execute_as_root)
block_size, blocks_total, blocks_avail = map(float, df.split())
total_available = block_size * blocks_avail
total_size = block_size * blocks_total
du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude',
'*snapshot*', mount_point,
run_as_root=self._execute_as_root)
total_allocated = float(du.split()[0])
return total_size, total_available, total_allocated
def _get_mount_point_base(self):
return self.base
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
LOG.info('Extending volume %s.', volume.id)
extend_by = int(new_size) - volume.size
if not self._is_share_eligible(volume.provider_location,
extend_by):
raise exception.ExtendVolumeError(reason='Insufficient space to'
' extend volume %s to %sG'
% (volume.id, new_size))
path = self.local_path(volume)
LOG.info('Resizing file to %sG...', new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if not self._is_file_size_equal(path, new_size):
raise exception.ExtendVolumeError(
reason='Resizing image file failed.')
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path,
run_as_root=self._execute_as_root)
virt_size = int(data.virtual_size / units.Gi)
return virt_size == size
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
Value of each NAS Security option is checked and updated. If the
option is currently 'auto', then it is set to either true or false
based upon if this is a new Cinder installation. The RemoteFS variable
'_execute_as_root' will be updated for this driver.
:param is_new_cinder_install: bool indication of new Cinder install
"""
doc_html = "https://docs.openstack.org/cinder/latest" \
"/admin/blockstorage-nfs-backend.html"
self._ensure_shares_mounted()
if not self._mounted_shares:
raise exception.NfsNoSharesMounted()
nfs_mount = self._get_mount_point_for_share(self._mounted_shares[0])
self.configuration.nas_secure_file_permissions = \
self._determine_nas_security_option_setting(
self.configuration.nas_secure_file_permissions,
nfs_mount, is_new_cinder_install)
LOG.debug('NAS variable secure_file_permissions setting is: %s',
self.configuration.nas_secure_file_permissions)
if self.configuration.nas_secure_file_permissions == 'false':
LOG.warning("The NAS file permissions mode will be 666 "
"(allowing other/world read & write access). "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure "
"NFS configuration.",
doc_html)
self.configuration.nas_secure_file_operations = \
self._determine_nas_security_option_setting(
self.configuration.nas_secure_file_operations,
nfs_mount, is_new_cinder_install)
# If secure NAS, update the '_execute_as_root' flag to not
# run as the root user; run as process' user ID.
# TODO(eharney): need to separate secure NAS vs. execute as root.
# There are requirements to run some commands as root even
# when running in secure NAS mode. (i.e. read volume file
# attached to an instance and owned by qemu:qemu)
if self.configuration.nas_secure_file_operations == 'true':
self._execute_as_root = False
LOG.debug('NAS secure file operations setting is: %s',
self.configuration.nas_secure_file_operations)
if self.configuration.nas_secure_file_operations == 'false':
LOG.warning("The NAS file operations will be run as "
"root: allowing root level access at the storage "
"backend. This is considered an insecure NAS "
"environment. Please see %s "
"for information on a secure NAS configuration.",
doc_html)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return the keys and values updated from NFS for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
name_id = None
if original_volume_status == 'available':
current_name = CONF.volume_name_template % new_volume.id
original_volume_name = CONF.volume_name_template % volume.id
current_path = self.local_path(new_volume)
# Replace the volume name with the original volume name
original_path = current_path.replace(current_name,
original_volume_name)
try:
os.rename(current_path, original_path)
except OSError:
LOG.error('Unable to rename the logical volume '
'for volume: %s', volume.id)
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume._name_id or new_volume.id
else:
# The back-end will not be renamed.
name_id = new_volume._name_id or new_volume.id
return {'_name_id': name_id,
'provider_location': new_volume.provider_location}
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(NfsDriver, self)._update_volume_stats()
self._stats['sparse_copy_volume'] = True
data = self._stats
global_capacity = data['total_capacity_gb']
global_free = data['free_capacity_gb']
thin_enabled = self.configuration.nfs_sparsed_volumes
if thin_enabled:
provisioned_capacity = self._get_provisioned_capacity()
else:
provisioned_capacity = round(global_capacity - global_free, 2)
data['provisioned_capacity_gb'] = provisioned_capacity
data['max_over_subscription_ratio'] = self.max_over_subscription_ratio
data['reserved_percentage'] = self.reserved_percentage
data['thin_provisioning_support'] = thin_enabled
data['thick_provisioning_support'] = not thin_enabled
self._stats = data
@coordination.synchronized('{self.driver_prefix}-{volume[id]}')
def create_volume(self, volume):
"""Apply locking to the create volume operation."""
return super(NfsDriver, self).create_volume(volume)
@coordination.synchronized('{self.driver_prefix}-{volume[id]}')
def delete_volume(self, volume):
"""Deletes a logical volume."""
LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s',
{'vol': volume.id, 'loc': volume.provider_location})
if not volume.provider_location:
LOG.warning('Volume %s does not have provider_location '
'specified, skipping', volume.name)
return
info_path = self._local_path_volume_info(volume)
info = self._read_info_file(info_path, empty_if_missing=True)
if info:
base_volume_path = os.path.join(self._local_volume_dir(volume),
info['active'])
self._delete(info_path)
else:
base_volume_path = self._local_path_volume(volume)
self._delete(base_volume_path)
def _qemu_img_info(self, path, volume_name):
return super(NfsDriver, self)._qemu_img_info_base(
path,
volume_name,
self.configuration.nfs_mount_point_base,
force_share=True,
run_as_root=True)
def _check_snapshot_support(self, setup_checking=False):
"""Ensure snapshot support is enabled in config."""
if (not self.configuration.nfs_snapshot_support and
not setup_checking):
msg = _("NFS driver snapshot support is disabled in cinder.conf.")
raise exception.VolumeDriverException(message=msg)
if (self.configuration.nas_secure_file_operations == 'true' and
self.configuration.nfs_snapshot_support):
msg = _("Snapshots are not supported with "
"nas_secure_file_operations enabled ('true' or 'auto'). "
"Please set it to 'false' if you intend to have "
"it enabled.")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
@coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
self._check_snapshot_support()
return self._create_snapshot(snapshot)
@coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
self._check_snapshot_support()
return self._delete_snapshot(snapshot)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("Copying snapshot: %(snap)s -> volume: %(vol)s, "
"volume_size: %(size)s GB",
{'snap': snapshot.id,
'vol': volume.id,
'size': volume_size})
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path)
vol_path = self._local_volume_dir(snapshot.volume)
forward_file = snap_info[snapshot.id]
forward_path = os.path.join(vol_path, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path, snapshot.volume.name)
path_to_snap_img = os.path.join(vol_path, img_info.backing_file)
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.nfs_qcow2_volumes:
out_format = 'qcow2'
else:
out_format = 'raw'
image_utils.convert_image(path_to_snap_img,
path_to_new_vol,
out_format,
run_as_root=self._execute_as_root)
self._set_rw_permissions_for_all(path_to_new_vol)
|
|
from __future__ import absolute_import, division, print_function
import unittest
import os
import tempfile
import json
from dynd import nd
import datashape
from collections import Iterator
from datashape.discovery import discover
from blaze.data import JSON, JSON_Streaming
from blaze.utils import filetext, raises
from blaze.data.utils import tuplify
class TestBigJSON(unittest.TestCase):
maxDiff = None
data = {
"type": "ImageCollection",
"images": [{
"Width": 800,
"Height": 600,
"Title": "View from 15th Floor",
"Thumbnail": {
"Url": "http://www.example.com/image/481989943",
"Height": 125,
"Width": "100"
},
"IDs": [116, 943, 234, 38793]
}]
}
ordered = (u'ImageCollection',
((800, 600, u'View from 15th Floor',
(u'http://www.example.com/image/481989943', 125, 100),
(116, 943, 234, 38793)),))
dshape = """{
type: string,
images: var * {
Width: int16,
Height: int16,
Title: string,
Thumbnail: {
Url: string,
Height: int16,
Width: int16,
},
IDs: var * int32,
}
}
"""
def setUp(self):
self.filename= tempfile.mktemp(".json")
with open(self.filename, "w") as f:
json.dump(self.data, f)
def tearDown(self):
os.remove(self.filename)
def test_basic(self):
dd = JSON(self.filename, 'r', dshape=self.dshape)
self.assertRaises(Exception, lambda: tuple(dd))
def test_as_py(self):
dd = JSON(self.filename, 'r', dshape=self.dshape)
self.assertEqual(tuplify(dd.as_py()), self.ordered)
def test_discovery(self):
dd = JSON(self.filename, 'r')
s = str(dd.dshape)
for word in ['Thumbnail', 'string', 'int', 'images', 'type']:
assert word in s
json_buf = u"[1, 2, 3, 4, 5]"
json_dshape = "var * int8"
class TestJSON(unittest.TestCase):
def setUp(self):
handle, self.json_file = tempfile.mkstemp(".json")
with os.fdopen(handle, "w") as f:
f.write(json_buf)
def tearDown(self):
os.remove(self.json_file)
def test_raise_error_on_non_existent_file(self):
self.assertRaises(ValueError,
lambda: JSON('does-not-exist23424.josn', 'r'))
def test_basic_object_type(self):
dd = JSON(self.json_file, dshape=json_dshape)
self.assertEqual(list(dd), [1, 2, 3, 4, 5])
def test_iter(self):
dd = JSON(self.json_file, dshape=json_dshape)
# This equality does not work yet
# self.assertEqual(dd.dshape, datashape.dshape(
# 'Var, %s' % json_schema))
self.assertEqual(list(dd), [1, 2, 3, 4, 5])
class AccountTestData(unittest.TestCase):
def setUp(self):
self.fn = tempfile.mktemp(".json")
with open(self.fn, 'w') as f:
for d in self.dicts:
f.write(json.dumps(d))
f.write('\n')
self.dd = JSON_Streaming(self.fn, schema=self.schema)
def tearDown(self):
if os.path.exists(self.fn):
os.remove(self.fn)
dicts = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 50},
{'name': 'Bob', 'amount': 10},
{'name': 'Charlie', 'amount': 200},
{'name': 'Bob', 'amount': 100}]
tuples = (('Alice', 100),
('Alice', 50),
('Bob', 10),
('Charlie', 200),
('Bob', 100))
text = '\n'.join(map(json.dumps, dicts))
schema = '{name: string, amount: int32}'
class TestDiscovery(AccountTestData):
def test_discovery(self):
dd = JSON_Streaming(self.fn)
assert set(dd.schema[0].names) == set(['name', 'amount'])
assert 'string' in str(dd.schema[0]['name'])
class Test_Indexing(AccountTestData):
def test_indexing_basic(self):
assert tuplify(self.dd[0]) == self.tuples[0]
assert tuplify(self.dd[0:3]) == self.tuples[0:3]
assert tuplify(self.dd[0::2]) == self.tuples[0::2]
self.assertEqual(tuplify(self.dd[[3, 1, 3]]),
tuple(self.tuples[i] for i in [3, 1, 3]))
def test_indexing_nested(self):
assert tuplify(self.dd[0, 'name']) == self.tuples[0][0]
assert tuplify(self.dd[0, 0]) == self.tuples[0][0]
self.assertEqual(tuplify(self.dd[[2, 0], 'name']), ('Bob', 'Alice'))
self.assertEqual(tuplify(self.dd[[2, 0], 0]), ('Bob', 'Alice'))
self.assertEqual(tuplify(self.dd[[2, 0], [1, 0]]), ((10, 'Bob'),
(100, 'Alice')))
def test_laziness(self):
assert isinstance(self.dd[:, 'name'], Iterator)
class Test_StreamingTransfer(AccountTestData):
def test_init(self):
with filetext(self.text) as fn:
dd = JSON_Streaming(fn, schema=self.schema)
self.assertEquals(tuple(dd), self.tuples)
assert dd.dshape in set((
datashape.dshape('var * {name: string, amount: int32}'),
datashape.dshape('5 * {name: string, amount: int32}')))
def test_chunks(self):
with filetext(self.text) as fn:
dd = JSON_Streaming(fn, schema=self.schema)
chunks = list(dd.chunks(blen=2))
assert isinstance(chunks[0], nd.array)
self.assertEquals(len(chunks), 3)
self.assertEquals(nd.as_py(chunks[0]), self.dicts[:2])
def test_append(self):
with filetext('') as fn:
dd = JSON_Streaming(fn, mode='w', schema=self.schema)
dd.extend([self.tuples[0]])
with open(fn) as f:
self.assertEquals(json.loads(f.read().strip()), self.dicts[0])
self.assertRaises(ValueError, lambda : dd.extend([5.5]))
self.assertRaises(ValueError,
lambda : dd.extend([{'name': 5, 'amount': 1.3}]))
def test_extend_dicts(self):
with filetext('') as fn:
dd = JSON_Streaming(fn, mode='r+', schema=self.schema)
dd.extend(self.dicts)
self.assertEquals(tuplify(tuple(dd)), self.tuples)
def test_extend_tuples(self):
with filetext('') as fn:
dd = JSON_Streaming(fn, mode='r+', schema=self.schema)
dd.extend(self.tuples)
self.assertEquals(tuplify(tuple(dd)), self.tuples)
def test_getitem(self):
with filetext(self.text) as fn:
dd = JSON_Streaming(fn, mode='r', schema=self.schema)
self.assertEqual(tuplify(dd[0]), self.tuples[0])
self.assertEqual(tuplify(dd[2:4]), self.tuples[2:4])
def test_as_dynd(self):
with filetext(self.text) as fn:
dd = JSON_Streaming(fn, mode='r', schema=self.schema)
assert nd.as_py(dd.as_dynd()) == self.dicts
def test_as_py(self):
with filetext(self.text) as fn:
dd = JSON_Streaming(fn, mode='r', schema=self.schema)
self.assertEqual(dd.as_py(), self.tuples)
if __name__ == '__main__':
unittest.main()
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent
import uuid
import time
sys.path.append("../config/common/tests")
from testtools.matchers import Equals, Contains, Not
from test_utils import *
import test_common
import test_case
from vnc_api.vnc_api import *
from vnc_api.gen.resource_xsd import *
from cfgm_common.exceptions import *
from gevent import sleep
import discoveryclient.client as client
EP_DELIM=','
PUBSUB_DELIM=' '
def parse_pubsub_ep(pubsub_str):
r = pubsub_str.split(EP_DELIM)
if len(r) < 4:
for i in range(4-len(r)):
r.append('')
return r
# '1.1.1.1/24' or '1.1.1.1'
def prefix_str_to_obj(prefix_str):
if '/' not in prefix_str:
prefix_str += '/32'
x = prefix_str.split('/')
if len(x) != 2:
return None
return SubnetType(x[0], int(x[1]))
def build_dsa_rule_entry(rule_str):
r = parse_pubsub_ep(rule_str)
r = rule_str.split(PUBSUB_DELIM) if rule_str else []
if len(r) < 2:
return None
# [0] is publisher-spec, [1] is subscriber-spec
pubspec = parse_pubsub_ep(r[0])
subspec = parse_pubsub_ep(r[1])
pfx_pub = prefix_str_to_obj(pubspec[0])
pfx_sub = prefix_str_to_obj(subspec[0])
if pfx_sub is None or pfx_sub is None:
return None
publisher = DiscoveryPubSubEndPointType(ep_prefix = pfx_pub,
ep_type = pubspec[1], ep_id = pubspec[2],
ep_version = pubspec[3])
subscriber = [DiscoveryPubSubEndPointType(ep_prefix = pfx_sub,
ep_type = subspec[1], ep_id = subspec[2],
ep_version = subspec[3])]
dsa_rule_entry = DiscoveryServiceAssignmentType(publisher, subscriber)
return dsa_rule_entry
server_list = {}
def info_callback(info, client_id):
print 'subscribe[%s]=%s' % (client_id, info)
global server_list
server_list[client_id] = [entry['@publisher-id'] for entry in info]
def validate_in_use_count(response, expected_counts, context):
services = response['services']
in_use_counts = {entry['ep_id']:entry['in_use'] for entry in services}
print '%s %s' % (context, in_use_counts)
return in_use_counts == expected_counts
class TestDsa(test_case.DsTestCase):
def setUp(self):
extra_config_knobs = [
('pulkit-pub', 'policy', 'load-balance'),
('test_bug_1548638', 'policy', 'fixed'),
]
super(TestDsa, self).setUp(extra_disc_server_config_knobs=extra_config_knobs)
def tearDown(self):
global server_list
server_list = {}
super(TestDsa, self).tearDown()
def test_bug_1549243(self):
puburl = '/publish'
suburl = "/subscribe"
service_type = 'pulkit-pub'
subscriber_type = "pulkit-sub"
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.2.0/24,%s 77.77.2.0/24,%s' % (service_type, subscriber_type))
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
# publish 3 instances
pub_tasks = []
client_type = 'test-discovery'
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
pub_id = 'test_discovery-%s' % ipaddr
pub_data = {service_type : '%s-%s' % (service_type, ipaddr)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
disc.remote_addr = ipaddr
task = disc.publish(service_type, pub_data)
pub_tasks.append(task)
time.sleep(1)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
service_count = 2
sub_tasks = []
for remote, count in [("77.77.3.11", 6), ("77.77.2.11", 4)]:
for i in range(count):
subscriber_id = "client-%s-%d" % (remote, i)
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
subscriber_type, pub_id=subscriber_id)
disc.remote_addr = remote
obj = disc.subscribe(
service_type, service_count, info_callback, subscriber_id)
sub_tasks.append(obj.task)
time.sleep(1)
print 'Started tasks to subscribe service %s, count %d' \
% (service_type, service_count)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 6*2+4)
# verify service assignment is 4,4,8
expected_in_use_counts = {
'test_discovery-77.77.1.10':4,
'test_discovery-77.77.2.10':8,
'test_discovery-77.77.3.10':4,
}
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
success = validate_in_use_count(response, expected_in_use_counts, 'In-use count after initial subscribe')
self.assertEqual(success, True)
# validate assignment remains same after resubscribe
time.sleep(2*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
success = validate_in_use_count(response, expected_in_use_counts, 'In-use count after initial subscribe')
self.assertEqual(success, True)
def test_bug_1548638(self):
puburl = '/publish'
suburl = "/subscribe"
service_type = 'test_bug_1548638'
# publish 3 dns servers
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
payload = {
service_type: { "ip-addr" : ipaddr, "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : '%s-%s' % (service_type, ipaddr),
'remote-addr': ipaddr,
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
# verify all agents see only 2 publishers due to fixed policy
expectedpub_set = set(["test_bug_1548638-77.77.1.10", "test_bug_1548638-77.77.2.10"])
for ipaddr in ["77.77.1.11", "77.77.2.11", "77.77.3.11"]:
payload = {
'service' : service_type,
'client' : ipaddr,
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), payload['instances'])
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.3.0/24,%s 77.77.3.11/32,contrail-vrouter-agent:0' % service_type)
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
expectedpub_set = set(["test_bug_1548638-77.77.1.10", "test_bug_1548638-77.77.2.10"])
for ipaddr in ["77.77.1.11", "77.77.2.11"]:
payload = {
'service' : service_type,
'client' : ipaddr,
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), payload['instances'])
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
expectedpub_set = set(["test_bug_1548638-77.77.3.10"])
for ipaddr in ["77.77.3.11"]:
payload = {
'service' : service_type,
'client' : ipaddr,
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 1)
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
def test_bug_1548771(self):
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.3.0/24,xmpp-server 77.77.0.0/16,contrail-vrouter-agent:0')
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
rule_entry = build_dsa_rule_entry('77.77.3.0/24,dns-server 77.77.3.11/32,contrail-vrouter-agent:0')
rule_uuid = uuid.uuid4()
dsa_rule2 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule2.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule2)
puburl = '/publish'
suburl = "/subscribe"
# publish 3 control nodes and dns servers
for service_type in ['xmpp-server', 'dns-server']:
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
payload = {
service_type: { "ip-addr" : ipaddr, "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : '%s-%s' % (service_type, ipaddr),
'remote-addr': ipaddr,
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 6)
# verify all agents see only 1 xmpp-server (rule #1)
service_type = 'xmpp-server'
expectedpub_set = set(["xmpp-server-77.77.3.10"])
for ipaddr in ["77.77.1.11", "77.77.2.11", "77.77.3.11"]:
payload = {
'service' : '%s' % service_type,
'client' : '%s-%s' % (service_type, ipaddr),
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 1)
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
self._vnc_lib.dsa_rule_delete(id = dsa_rule1.get_uuid())
self._vnc_lib.dsa_rule_delete(id = dsa_rule2.get_uuid())
def test_bug_1540777(self):
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.3.10/32,pulkit-pub 77.77.3.11/32,pulkit-sub')
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
rule_entry = build_dsa_rule_entry('77.77.2.10/32,pulkit-pub 77.77.3.11/32,pulkit-sub')
rule_uuid = uuid.uuid4()
dsa_rule2 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule2.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule2)
puburl = '/publish'
suburl = "/subscribe"
service_type = 'pulkit-pub'
# publish 3 control nodes - 2 subject to rules above
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
payload = {
service_type: { "ip-addr" : ipaddr, "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : 'pulkit-pub-%s' % ipaddr,
'remote-addr': ipaddr,
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'service' : '%s' % service_type,
'client' : 'discovery-ut',
'instances' : 3,
'client-type' : 'pulkit-sub',
'remote-addr' : '77.77.3.11',
}
# should see 2 publishers due to two rules
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 2)
expectedpub_set = set(["pulkit-pub-77.77.2.10", "pulkit-pub-77.77.3.10"])
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
self._vnc_lib.dsa_rule_delete(id = dsa_rule1.get_uuid())
self._vnc_lib.dsa_rule_delete(id = dsa_rule2.get_uuid())
def test_dsa_config(self):
# Assign DC1 control nodes to DC1 agents
rule_entry = build_dsa_rule_entry('1.1.1.0/24,Control-Node 1.1.1.0/24,Vrouter-Agent')
dsa = DiscoveryServiceAssignment()
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
# Assign DC2 control nodes to DC1 agents
rule_entry = build_dsa_rule_entry('2.2.2.0/24,Control-Node 2.2.2.0/24,Vrouter-Agent')
rule_uuid = uuid.uuid4()
dsa_rule2 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule2.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule2)
puburl = '/publish'
service_type = 'Control-Node'
# publish 4 control nodes - 2 in two data centers each
payload = {
'%s' % service_type: { "ip-addr" : "1.1.1.1", "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : 'DC1-CN1',
'remote-addr': '1.1.1.1',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'%s' % service_type: { "ip-addr" : "1.1.1.2", "port" : "1112" },
'service-type' : '%s' % service_type,
'service-id' : 'DC1-CN2',
'remote-addr': '1.1.1.2',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'%s' % service_type: { "ip-addr" : "2.2.2.1", "port" : "2221" },
'service-type' : '%s' % service_type,
'service-id' : 'DC2-CN1',
'remote-addr': '2.2.2.1',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'%s' % service_type: { "ip-addr" : "2.2.2.2", "port" : "2222" },
'service-type' : '%s' % service_type,
'service-id' : 'DC2-CN2',
'remote-addr': '2.2.2.2',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 4)
# json subscribe request
suburl = "/subscribe"
payload = {
'service' : '%s' % service_type,
'client' : 'DC1-VA1',
'instances' : 0,
'client-type' : 'Vrouter-Agent',
'remote-addr' : '3.3.3.3',
'version' : '2.2',
}
# should see all 4 publishers for sub that is not in DC1 or DC2
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 4)
# Sub in DC1 - should see only DC1 services
payload['remote-addr'] = '1.1.1.3'
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 2)
for svc in response[service_type]:
self.assertEqual("DC1-CN" in svc['@publisher-id'], True)
# Sub in DC2 - should see only DC2 services
payload['remote-addr'] = '2.2.2.3'
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 2)
for svc in response[service_type]:
self.assertEqual("DC2-CN" in svc['@publisher-id'], True)
# Subscribe to IfmapServer from DC1, DC2 and DC3. There are no
# assignment rules applicable to IfmapServer. Thus clients from
# all DC should be able to subscribe to singtleton IfmapServer
service_type = 'IfmapServer'
payload = {
service_type: { "ip-addr" : "4.4.4.4", "port" : "4444" },
'service-type' : service_type,
'service-id' : 'Controller',
'remote-addr': '4.4.4.4',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'service' : '%s' % service_type,
'client' : 'DC1-VA1',
'instances' : 0,
'client-type' : 'Vrouter-Agent',
}
for remote in ['1.1.1.1', '2.2.2.2', '3.3.3.3']:
payload['remote-addr'] = remote
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 1)
# Delete service assignment rule.
# Subs from any DC should see all DC1+DC2 services
self._vnc_lib.dsa_rule_delete(id = dsa_rule1.uuid)
self._vnc_lib.dsa_rule_delete(id = dsa_rule2.uuid)
service_type = 'Control-Node'
payload = {
'service' : '%s' % service_type,
'client' : 'Dont Care',
'instances' : 0,
'client-type' : 'Vrouter-Agent',
}
# Sub in DC1 or DC2 should see DC1+DC2 services
expectedpub_set = set(["DC1-CN1", "DC1-CN2", "DC2-CN1", "DC2-CN2"])
for sub_ip in ['1.1.1.3', '2.2.2.3']:
payload['remote-addr'] = sub_ip
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 4)
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
#end class TestDsa
|
|
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import ctypes
import random
import re
import struct
from binascii import hexlify
class Matrix(object):
def __init__(self, width, height, data=None, valid=True):
self.width = width
self.height = height
self.data = data
# Disallow singular matrices to avoid vulnerability
while not self.data or self.det() == 0.0:
self.data = []
for i in xrange(height):
self.data.append([])
for j in xrange(width):
self.data[i].append(random.uniform(-Support.FLOAT_LIMIT, Support.FLOAT_LIMIT))
if not valid:
self.data[random.randint(0, self.height - 1)][random.randint(0, self.width - 1)] = float('inf')
def __str__(self):
return str(self.data)
def __add__(self, other):
data = []
if isinstance(other, (int, long, float)):
for i in xrange(self.height):
data.append([])
for j in xrange(self.width):
data[i].append(self.data[i][j] + float(other))
return Matrix(self.width, self.height, data)
elif isinstance(other, Matrix):
if self.width != other.width or self.height != other.height:
raise ValueError()
for i in xrange(self.height):
data.append([])
for j in xrange(self.width):
data[i].append(self.data[i][j] + other.data[i][j])
return Matrix(self.width, self.height, data)
else:
return NotImplemented
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, (int, long, float)):
return self + -other
elif isinstance(other, Matrix):
neg = Matrix(other.width, other.height, [[-x for x in c] for c in other.data])
return self + neg
else:
return NotImplemented
def __rsub__(self, other):
neg = Matrix(self.width, self.height, [[-x for x in c] for c in self.data])
return neg + other
def __mul__(self, other):
data = []
if isinstance(other, (int, long, float)):
for i in xrange(self.height):
data.append([])
for j in xrange(self.width):
data[i].append(self.data[i][j] * float(other))
return Matrix(self.width, self.height, data)
elif isinstance(other, Matrix):
if self.width != other.height:
raise ValueError()
for i in xrange(self.height):
data.append([])
for j in xrange(other.width):
data[i].append(0.0)
for k in xrange(self.width):
data[i][j] += self.data[i][k] * other.data[k][j]
return Matrix(self.height, other.width, data)
else:
return NotImplemented
def __rmul__(self, other):
return self * other
def __div__(self, other):
data = []
if isinstance(other, Matrix):
return NotImplemented
elif isinstance(other, (int, long, float)):
for i in xrange(self.height):
data.append([])
for j in xrange(self.width):
data[i].append(self.data[i][j] / other)
return Matrix(self.width, self.height, data)
else:
return NotImplemented
def __rdiv__(self, other):
return Matrix(self.width, self.height, [[other / x for x in c] for c in self.data])
def det(self):
if self.width != self.height:
return None
if self.width == 1:
return self.data[0][0]
elif self.width == 2:
return (self.data[0][0] * self.data[1][1]) - (self.data[0][1] * self.data[1][0])
else:
return None
def inv(self):
det = self.det()
if det is None or det == 0.0:
raise ValueError()
data = []
for i in xrange(self.height):
data.append([])
for j in xrange(self.width):
data[i].append(0.0)
if self.width == 1:
data[0][0] = 1.0 / det
elif self.width == 2:
data[0][0] = self.data[1][1] / det
data[0][1] = (-self.data[0][1]) / det
data[1][0] = (-self.data[1][0]) / det
data[1][1] = self.data[0][0] / det
else:
raise ValueError()
return Matrix(self.width, self.height, data)
class Support(object):
FLOAT_LIMIT = 100000
INT_LIMIT = 0x7fffffff
def __init__(self):
self.stack = []
def push(self, operand):
self.stack.append(operand)
def pop(self):
if not self.stack:
return None
return self.stack.pop()
def clear(self):
self.stack = []
def binary_op(self, op):
b = self.pop()
if b is None:
return -1
a = self.pop()
if a is None:
self.push(b)
return -1
try:
res = op(a, b)
if isinstance(res, (int, long)):
# Simulate integer overflow
res = ctypes.c_int(res & 0xffffffff).value
self.push(res)
return 0
except:
self.push(a)
self.push(b)
return -1
def add(self):
return self.binary_op(lambda a, b: a + b)
def sub(self):
return self.binary_op(lambda a, b: a - b)
def mul(self):
return self.binary_op(lambda a, b: a * b)
def div(self):
# Python integer div floors, not truncate towards 0 like C
def c_div(a, b):
if isinstance(a, (int, long)) and isinstance(b, (int, long)):
return int(float(a) / b)
else:
return a / b
return self.binary_op(c_div)
def inv(self):
a = self.pop()
try:
if a.det() == 0.0:
raise ValueError()
res = a.inv()
self.push(res)
return 0
except:
self.push(a)
return -1
def pack_value(self, value, fuzzy=False):
if isinstance(value, (int, long)):
ret = struct.pack('<II', 0, value & 0xffffffff)
return ret, len(ret)
elif isinstance(value, Matrix):
ret = struct.pack('<HBB', 1, value.width, value.height)
length = len(ret)
if fuzzy:
ret = ''.join(['\\x' + hexlify(c) for c in ret])
for i in xrange(value.height):
for j in xrange(value.width):
packed = struct.pack('<d', value.data[i][j])
length += len(packed)
# Deal with differences in float precision
if fuzzy:
packed = '...' + ''.join(['\\x' + hexlify(c) for c in packed[3:]])
ret += packed
return ret, length
elif isinstance(value, float):
ret = struct.pack('<Id', 2, value)
return ret, len(ret)
def make_push(self, value):
return struct.pack('<I', 0) + self.pack_value(value)[0]
def make_pop(self):
return struct.pack('<I', 1)
def make_clear(self):
return struct.pack('<I', 2)
def make_add(self):
return struct.pack('<I', 3)
def make_sub(self):
return struct.pack('<I', 4)
def make_mul(self):
return struct.pack('<I', 5)
def make_div(self):
return struct.pack('<I', 6)
def make_inv(self):
return struct.pack('<I', 7)
def make_quit(self):
return '\xff' * 4
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mock
from rally.common import logging as log
from tests.unit import test
class LogTestCase(test.TestCase):
@mock.patch("rally.common.logging.CONF")
@mock.patch("rally.common.logging.handlers")
@mock.patch("rally.common.logging.oslogging")
def test_setup(self, mock_oslogging, mock_handlers, mock_conf):
proj = "fakep"
version = "fakev"
mock_handlers.ColorHandler.LEVEL_COLORS = {
logging.DEBUG: "debug_color"}
mock_conf.rally_debug = True
log.setup(proj, version)
self.assertIn(logging.RDEBUG, mock_handlers.ColorHandler.LEVEL_COLORS)
self.assertEqual(
mock_handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG],
mock_handlers.ColorHandler.LEVEL_COLORS[logging.RDEBUG])
mock_oslogging.setup.assert_called_once_with(mock_conf, proj, version)
mock_oslogging.getLogger(None).logger.setLevel.assert_called_once_with(
logging.RDEBUG)
@mock.patch("rally.common.logging.log")
@mock.patch("rally.common.logging.RallyContextAdapter")
@mock.patch("rally.common.logging.oslogging")
def test_getLogger(self, mock_oslogging, mock_rally_context_adapter,
mock_log):
name = "fake"
vers = "fake"
mock_oslogging._loggers = {}
returned_logger = log.getLogger(name, vers)
self.assertIn(name, mock_oslogging._loggers)
mock_rally_context_adapter.assert_called_once_with(
mock_log.getLogger(name),
{"project": "rally", "version": vers})
self.assertEqual(mock_oslogging._loggers[name], returned_logger)
class LogRallyContaxtAdapter(test.TestCase):
@mock.patch("rally.common.logging.log")
@mock.patch("rally.common.logging.oslogging.KeywordArgumentAdapter")
def test_debug(self, mock_keyword_argument_adapter, mock_log):
mock_log.RDEBUG = 123
fake_msg = "fake message"
radapter = log.RallyContextAdapter(mock.MagicMock(), "fakep")
radapter.log = mock.MagicMock()
radapter.debug(fake_msg)
radapter.log.assert_called_once_with(mock_log.RDEBUG,
fake_msg)
class ExceptionLoggerTestCase(test.TestCase):
@mock.patch("rally.common.logging.is_debug")
def test_context(self, mock_is_debug):
# Prepare
mock_is_debug.return_value = True
logger = mock.MagicMock()
exception = Exception()
# Run
with log.ExceptionLogger(logger, "foo") as e:
raise exception
# Assertions
logger.warning.assert_called_once_with("foo")
logger.exception.assert_called_once_with(exception)
logger.debug.assert_called_once_with(exception)
self.assertEqual(e.exception, exception)
class LogCatcherTestCase(test.TestCase):
# FIXME(pboldin): These are really functional tests and should be moved
# there when the infrastructure is ready
def test_logcatcher(self):
LOG = log.getLogger("testlogger")
LOG.logger.setLevel(log.INFO)
with log.LogCatcher(LOG) as catcher:
LOG.warning("Warning")
LOG.info("Info")
LOG.debug("Debug")
catcher.assertInLogs("Warning")
self.assertRaises(AssertionError, catcher.assertInLogs, "Error")
self.assertEqual(["Warning", "Info"], catcher.fetchLogs())
self.assertEqual(2, len(catcher.fetchLogRecords()))
class CatcherHandlerTestCase(test.TestCase):
@mock.patch("logging.handlers.BufferingHandler.__init__")
def test_init(self, mock_buffering_handler___init__):
catcher_handler = log.CatcherHandler()
mock_buffering_handler___init__.assert_called_once_with(
catcher_handler, 0)
def test_shouldFlush(self):
catcher_handler = log.CatcherHandler()
self.assertFalse(catcher_handler.shouldFlush())
def test_emit(self):
catcher_handler = log.CatcherHandler()
catcher_handler.buffer = mock.Mock()
catcher_handler.emit("foobar")
catcher_handler.buffer.append.assert_called_once_with("foobar")
class LogCatcherUnitTestCase(test.TestCase):
def setUp(self):
super(LogCatcherUnitTestCase, self).setUp()
patcher = mock.patch("rally.common.logging.CatcherHandler")
self.catcher_handler = patcher.start()
self.catcher_handler.return_value.buffer = [
mock.Mock(msg="foo"), mock.Mock(msg="bar")]
self.addCleanup(patcher.stop)
self.logger = mock.Mock()
def test_init(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(self.logger.logger, catcher.logger)
self.assertEqual(self.catcher_handler.return_value, catcher.handler)
self.catcher_handler.assert_called_once_with()
def test_enter(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(catcher, catcher.__enter__())
self.logger.logger.addHandler.assert_called_once_with(
self.catcher_handler.return_value)
def test_exit(self):
catcher = log.LogCatcher(self.logger)
catcher.__exit__(None, None, None)
self.logger.logger.removeHandler.assert_called_once_with(
self.catcher_handler.return_value)
def test_assertInLogs(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(["foo"], catcher.assertInLogs("foo"))
self.assertEqual(["bar"], catcher.assertInLogs("bar"))
self.assertRaises(AssertionError, catcher.assertInLogs, "foobar")
def test_assertInLogs_contains(self):
catcher = log.LogCatcher(self.logger)
record_mock = mock.MagicMock()
self.catcher_handler.return_value.buffer = [record_mock]
record_mock.msg.__contains__.return_value = True
self.assertEqual([record_mock.msg], catcher.assertInLogs("foo"))
record_mock.msg.__contains__.assert_called_once_with("foo")
def test_fetchLogRecords(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(self.catcher_handler.return_value.buffer,
catcher.fetchLogRecords())
def test_fetchLogs(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(
[r.msg for r in self.catcher_handler.return_value.buffer],
catcher.fetchLogs())
|
|
#! /usr/bin/env python
import dbus
import dbus.service
import dbus.mainloop.glib
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
import yaml
import os
import turk
from turk import get_config
import urllib, urllib2
import logging
from twisted.internet import glib2reactor
glib2reactor.install()
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from twisted.words.protocols.jabber import client, jstrports
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber import xmlstream
from twisted.words.xish.domish import Element
from twisted.words.xish import xpath
from twisted.application.internet import TCPClient
from twisted.internet import reactor
from wokkel.xmppim import PresenceClientProtocol, RosterClientProtocol
server = ('skynet.local', 5222)
jid = JID("platform@skynet.local")
password = 'password'
log = turk.init_logging('bridge')
class Bridge(dbus.service.Object):
"""
Manages the interface between the Turk Cloud and this platform using a
combination of HTTP (for driver->app updates) and XMPP (for app->driver messages).
Listens for relevant D-BUS signals and logs them to the XMPP server.
"""
def __init__(self, server, port, jid, password, bus):
# Setup D-BUS service and signal callbacks
bus_name = dbus.service.BusName(turk.TURK_BRIDGE_SERVICE, bus)
self.bus = bus
dbus.service.Object.__init__(self, bus_name, '/Bridge')
# Setup XMPP client
factory = client.XMPPClientFactory(jid, password)
self.manager = xmlstream.StreamManager(factory)
self.handler = BridgeXMPPHandler(self, self.manager)
self.manager.addHandler(self.handler)
client_svc = TCPClient(server, port, factory)
client_svc.startService()
# Setup driver registry
self.drivers = {}
# Init driver subscriptions
self.subscriptions = {}
log.debug('started')
@dbus.service.method(dbus_interface=turk.TURK_BRIDGE_INTERFACE,
in_signature='t', out_signature='')
def RegisterDriver(self, driver_id):
"""
Registers a driver ID with the bridge, so that it can receive updates.
"""
if driver_id not in self.drivers:
log.debug('registering driver %d' % driver_id)
self.drivers[driver_id] = Driver(self.bus, driver_id)
else:
log.debug('driver %d already registered' % driver_id)
@dbus.service.method(dbus_interface=turk.TURK_BRIDGE_INTERFACE,
in_signature='sss', out_signature='')
def PublishUpdate(self, type, update, driver):
"""
Publishes a new update via HTTP to all apps that have registered to
this data driver
"""
log.debug('publishing update from %s - ' % (driver))
driver = int(driver)
log.debug('subscriptions: %s'% self.subscriptions)
if driver in self.subscriptions:
for app in self.subscriptions[driver]:
try:
# build app URL
url = self.subscriptions[driver][app]
log.debug('POSTing to url %s' % url)
# encode params
request = urllib2.Request(url, update)
# POST request
response = urllib2.urlopen(request, timeout=1)
page = response.read(100)
log.debug('successfully updated app %d' % (app) )
log.debug(page)
except urllib2.HTTPError, e:
log.debug('PublishUpdate: HTTP error %d' % e.getcode())
except Exception, e:
log.debug('PublishUpdate: %s'% e)
def SignalUpdate(self, driver, app, update):
""" Sends a signal to indicate an update for a driver has been received. """
if driver not in self.drivers:
self.drivers[driver] = Driver(self.bus, driver)
self.drivers[driver].Update(driver, app, update)
def registerObserver(self, driver, app, url):
"""
Registers app to be notified of events coming from driver.
All updates will be POSTed to url with HTTP
"""
log.debug('registerObserver: driver:%s app:%s url:%s' % (driver, app, url))
if driver in self.subscriptions:
if app not in self.subscriptions[driver]:
self.subscriptions[driver][app] = url
log.debug('Added subscription to driver %s for app %s' % (driver, app))
else:
log.debug('App %d is already subscribed to driver %d' % (app, driver))
else:
self.subscriptions[driver] = {app : url}
log.debug('Added subscription to driver %s for app %s' % (driver, app))
def requireService(self, driver, app):
"""
Notifies Spawner that driver needs to be started or already running.
Forwards any error notifications to the server through XMPP
"""
log.debug('requireService: driver:%s app:%s' % (driver, app))
try:
spawner = self.bus.get_object(turk.TURK_SPAWNER_SERVICE, '/Spawner')
spawner.requireService(type, driver, app,
reply_handler=lambda:None, error_handler=self.driverFail)
except dbus.DBusException, e:
log.debug(e)
def driverFail(self, exception):
log.debug('failed to start require driver: %s' % exception)
@dbus.service.signal(dbus_interface=turk.TURK_BRIDGE_INTERFACE, signature='')
def BridgeStarted(self):
"""
Called to indicate that the Bridge has successfully started up and
authenticated with the XMPP server
"""
class BridgeXMPPHandler(PresenceClientProtocol, RosterClientProtocol):
REQUIRE = "/message/require[@xmlns='http://turkinnovations.com/protocol']"
REGISTER = "/message/register[@xmlns='http://turkinnovations.com/protocol']"
UPDATE = "/message/update[@xmlns='http://turkinnovations.com/protocol']"
def __init__(self, bridge, stream):
self.bridge = bridge
self.stream = stream
def connectionInitialized(self):
"""
Called right after connecting to the XMPP server. Sets up handlers
and subscriptions and sends out presence notifications
"""
log.debug('connectionInitialized')
PresenceClientProtocol.connectionInitialized(self)
RosterClientProtocol.connectionInitialized(self)
# Debug callback for all stanza types
#self.xmlstream.addObserver('/*', self.dataReceived)
# Callback for chat messages
self.xmlstream.addObserver('/message/body', self.onMessage)
# Callback for subscribed presence
self.xmlstream.addObserver("/presence[@type='subscribe']", self.subscribeReceived)
# Callbacks for require, register, update
self.xmlstream.addObserver(self.REQUIRE, self.onRequire)
self.xmlstream.addObserver(self.REGISTER, self.onRegister)
self.xmlstream.addObserver(self.UPDATE, self.onUpdate)
self.bridge.BridgeStarted()
def rosterReceived(roster):
""" Subscribe to all contacts in roster """
for jid in roster:
self.subscribe(JID(jid))
# Get roster
self.getRoster().addCallback(rosterReceived)
# Set status to available
self.available(show="chat", statuses={'':'Turk Platform Ready'})
def dataReceived(self, element):
"""
Called when any data is received
"""
log.debug(element.toXml())
def send(self, element):
"""
Sends a message over the XML stream
"""
self.stream.send(element)
def sendMessage(self, to, message, type='normal'):
"""
Sends a message to another XMPP client
"""
msg = Element(("jabber:client", "message"))
msg.attributes = { 'to': to.full(), 'type':type }
body = msg.addElement("body", content=message)
self.send(msg)
def onMessage(self, message):
"""
Called when a message stanza was received.
"""
print
text = str(message.body)
type = message.getAttribute('type')
log.debug("BridgeXMPPHandler: received a '%s' message: '%s'" % (type, text))
def onRequire(self, message):
""" Called when Turk require element(s) are received """
log.debug('require stanza received')
for driver in xpath.queryForNodes(self.REQUIRE + "/driver", message):
require = driver.parent
id = int(str(driver))
app = int(require['app'])
log.debug('driver %s required for app %s' % (id, app))
# Check for and/or start the driver
self.bridge.requireService(id, app)
def onRegister(self, message):
"""
Called when Turk register element(s) are received
"""
log.debug('register stanza received')
for driver in xpath.queryForNodes(self.REGISTER + "/driver", message):
register = driver.parent
id = int(driver['id'])
app = int(register['app'])
url = register['url']
log.debug('app %s registering to driver %s' % (id, app))
self.bridge.registerObserver(id, app, url)
def onUpdate(self, message):
"""
Called when Turk update element(s) are received
"""
log.debug('update stanza received')
for update in xpath.queryForNodes(self.UPDATE, message):
try:
driver = int(update['to'])
app = int(update['from'])
log.debug('got an update for driver#%s from app#%s' % (driver, app))
# Send the update to the driver
self.bridge.SignalUpdate(driver, app, update.toXml())
except Exception, e:
log.debug('Error parsing update XML: %s' % e)
def subscribeReceived(self, entity):
"""
Subscription request was received.
Approve the request automatically by sending a 'subscribed' presence back
"""
self.subscribed(entity)
class Driver(dbus.service.Object):
def __init__(self, bus, id):
self.path = '/Bridge/Drivers/%d' % (id)
bus_name = dbus.service.BusName(turk.TURK_BRIDGE_SERVICE, bus)
dbus.service.Object.__init__(self, bus_name=bus_name, object_path=self.path)
self.type = type
self.id = id
self.last_update = ''
@dbus.service.signal(dbus_interface=turk.TURK_BRIDGE_INTERFACE, signature='tts')
def Update(self, driver, app, update):
self.last_update = update
log.debug('%s received an update: %s' % (self.path, update.replace('\n','')))
@dbus.service.method(dbus_interface=turk.TURK_DRIVER_INTERFACE, in_signature='', out_signature='s')
def GetLastUpdate(self):
return self.last_update
def run(conf='/etc/turk/turk.yml', daemon=False):
if isinstance(conf, basestring):
try:
conf = yaml.load(open(conf, 'rU'))
except Exception:
print 'failed opening configuration file "%s"' % (conf)
exit(1)
log = turk.init_logging('bridge', conf, debug=get_config('bridge.debug'))
jid = JID(get_config('bridge.username', conf))
bus = getattr(dbus, get_config('global.bus', conf))()
server, port = get_config('bridge.server', conf), get_config('bridge.port', conf)
password = get_config('bridge.password', conf)
bridge = Bridge(server, port, jid, password, bus)
reactor.run()
if __name__ == '__main__':
run()
|
|
import argparse, json, os, time
from parcellearning import jkgat
from parcellearning.utilities import gnnio
from parcellearning.utilities.early_stop import EarlyStopping
from parcellearning.utilities.batch import partition_graphs
from parcellearning.utilities.load import load_schema, load_model
from shutil import copyfile
from pathlib import Path
import numpy as np
import pandas as pd
import dgl
from dgl.data import register_data_args
import dgl.function as fn
import torch
import torch.nn.functional as F
def main(args):
schema = load_schema(args.schema_file)
out_dir = schema['data']['out']
Path(out_dir).mkdir(parents=True, exist_ok=True)
# copy schema file to output directory
copy_schema = ''.join([out_dir, args.schema_file.split('/')[-1]])
if not os.path.exists(copy_schema):
copyfile(args.schema_file, copy_schema)
##### GET PARAMETERS FROM SCHEMA FILE #####
# - - - - - - - - - - - - - - - - - - - - #
# - - - - - - - - - - - - - - - - - - - - #
MODEL_PARAMS = schema['model_parameters']
OPT_PARAMS = schema['optimizer_parameters']
TRAIN_PARAMS = schema['training_parameters']
STOP_PARAMS = schema['stopping_parameters']
DATA_PARAMS = schema['variable_parameters']
# - - - - - - - - - - - - - - - - - - - - #
# - - - - - - - - - - - - - - - - - - - - #
features = DATA_PARAMS['features']
features.sort()
# load training and validation data
training = gnnio.dataset(features=features,
dSet=schema['data']['training'],
atlas=DATA_PARAMS['response'],
norm=True,
clean=True)
validation = gnnio.dataset(features=features,
dSet=schema['data']['validation'],
atlas=DATA_PARAMS['response'],
norm=True,
clean=True)
validation = dgl.batch(validation)
val_X = validation.ndata['features']
val_Y = validation.ndata['label']
##### MODEL INITIALIZATION #####
# - - - - - - - - - - - - #
# - - - - - - - - - - - - #
# instantiate model using schema parameters
# instantiate model using schema parameters
if args.existing:
print('Loading existing model.')
model_parameters = '%s%s.earlystop.Loss.pt' % (schema['data']['out'], schema['model'])
model = load_model(schema, model_parameters)
model_progress = '%sperformance.%s.json' % (schema['data']['out'], schema['model'])
try:
with open(model_progress, 'r') as f:
progress = json.load(f)
except:
print('Progress file doest exist.')
print('Creating new.')
pass
else:
print('Training new model')
model = jkgat.JKGAT(**MODEL_PARAMS)
progress = {k: [] for k in ['Epoch',
'Duration',
'Train Loss',
'Train Acc',
'Val Loss',
'Val Acc']}
print(model)
# instantiate Adam optimizer using scheme parameters
optimizer = torch.optim.Adam(model.parameters(), **OPT_PARAMS)
# initialize early stopper
stopped_model_output='%s%s.earlystop.Loss.pt' % (out_dir, schema['model'])
stopper = EarlyStopping(filename=stopped_model_output, **STOP_PARAMS)
cross_entropy = torch.nn.CrossEntropyLoss()
dur = []
##### MODEL TRAINING #####
# - - - - - - - - - - - - #
# - - - - - - - - - - - - #
starting_epoch = np.max([len(progress['Epoch']), args.starting_epoch])
print('Starting at epoch %i' % (starting_epoch))
print('\nTraining model\n')
for epoch in range(starting_epoch, TRAIN_PARAMS['epochs']):
# learn model on training data
batches = partition_graphs(training, TRAIN_PARAMS['n_batch'])
model.train()
t0 = time.time()
# zero the gradients for this epoch
optimizer.zero_grad()
# aggregate training batch losses
train_loss = 0
# aggregate training batch accuracies
train_acc = 0
for iteration, batch in enumerate(batches):
# get training features for this batch
batch_X = batch.ndata['features']
batch_Y = batch.ndata['label']
# push batch through network and compute loss
batch_logits = model(batch, batch_X)
batch_loss = cross_entropy(batch_logits, batch_Y)
# accuracy
batch_softmax = F.softmax(batch_logits, dim=1)
_, batch_indices = torch.max(batch_softmax, dim=1)
batch_acc = (batch_indices == batch_Y).sum() / batch_Y.shape[0]
# apply backward parameter update pass
batch_loss.backward()
print('Batch: %i | Batch Acc: %.3f | Batch Loss: %.3f ' % (iteration+1, batch_acc.item(), batch_loss.item()))
# update training performance
train_loss += batch_loss
train_acc += batch_acc
# accumulate the gradients from each batch
if (iteration+1) % TRAIN_PARAMS['n_batch'] == 0:
optimizer.step()
optimizer.zero_grad()
dur.append(time.time() - t0)
# switch model into evaluation mode
# so we don't update the gradients using the validation data
model.eval()
with torch.no_grad():
# push validation through network
val_logits = model(validation, val_X)
val_loss = cross_entropy(val_logits, val_Y)
# accuracy
val_softmax = F.softmax(val_logits, dim=1)
_, val_indices = torch.max(val_softmax, dim=1)
val_acc = (val_indices == val_Y).sum() / val_Y.shape[0]
train_loss /= TRAIN_PARAMS['n_batch']
train_acc /= TRAIN_PARAMS['n_batch']
# Show current performance
print("Epoch {:05d} | Time(s) {:.4f} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f}".format(
epoch, np.mean(dur),
train_loss.item(), train_acc.item(),
val_loss.item(), val_acc.item()))
progress['Epoch'].append(epoch)
if epoch > 3:
progress['Duration'].append(time.time() - t0)
else:
progress['Duration'].append(0)
# update training performance
progress['Train Loss'].append(train_loss.item())
progress['Train Acc'].append(train_acc.item())
# update validation performance
progress['Val Loss'].append(val_loss.item())
progress['Val Acc'].append(val_acc.item())
performance_output = '%sperformance.%s.json' % (out_dir, schema['model'])
with open(performance_output, "w") as outparams:
json.dump(progress, outparams, ensure_ascii=True, indent=4, sort_keys=True)
# set up early stopping criteria on validation loss
early_stop = stopper.step(val_loss.detach().data, model)
if early_stop:
break
##### MODEL SAVING #####
# - - - - - - - - - - - - #
# - - - - - - - - - - - - #
model_output = '%s%s.pt' % (out_dir, schema['model'])
model.save(filename=model_output)
# save performance to json
performance_output = '%sperformance.%s.json' % (out_dir, schema['model'])
with open(performance_output, "w") as outparams:
json.dump(progress, outparams, ensure_ascii=True, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='JKGAT')
parser.add_argument('--schema-file',
type=str,
help='JSON file with parameters for model, training, and output.')
parser.add_argument('--existing',
help='Load pre-existing model to continue training.',
action='store_true',
required=False)
parser.add_argument('--starting-epoch',
help='Which epoch to start at (for example, if youve trained the model somewhat already).',
default=0,
type=int,
required=False)
args = parser.parse_args()
main(args)
|
|
# coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2016-2017 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
==================================================
RadialLineOfSightAndRange.py
--------------------------------------------------
requirements: ArcGIS 10.3+, Python 2.7+
author: ArcGIS Solutions
contact: support@esri.com
company: Esri
==================================================
description:
Creates a viewshed within a range fan based on input parameters
==================================================
'''
import math
import os
import arcpy
try:
from . import VisibilityUtilities
except ImportError:
import VisibilityUtilities
DEBUG = False
def drawWedge(cx, cy, r1, r2, startBearing, endBearing):
# Convert to radians and from north bearing to XY angle
start = math.radians(90.0 - startBearing)
# Adjust end if it crosses 360
if startBearing > endBearing:
endBearing = endBearing + 360.0
end = math.radians(90.0 - endBearing)
point = arcpy.Point()
array = arcpy.Array()
# Calculate the end x,y for the wedge
x_end = cx + r2*math.cos(start)
y_end = cy + r2*math.sin(start)
# Use intervalInDegrees as the angle step value for each circle point
intervalInDegrees = 5
intervalInRadians = math.radians(intervalInDegrees)
# Calculate the outer edge of the wedge
a = start
# If r1 == 0 then create a wedge from the center point
if r1 == 0:
#Add the start point to the array
point.X = cx
point.Y = cy
array.add(point)
#Calculate the rest of the wedge
while a >= end:
point.X = cx + r2*math.cos(a)
point.Y = cy + r2*math.sin(a)
array.add(point)
a -= intervalInRadians
#Close the polygon
point.X = cx
point.Y = cy
array.add(point)
else:
# Calculate the outer edge of the wedge (clockwise)
while a >= end:
point.X = cx + r2*math.cos(a)
point.Y = cy + r2*math.sin(a)
a -= intervalInRadians
array.add(point)
# Step back one interval - so angle matches last point added above
a += intervalInRadians
# Calculate the inner edge of the wedge (counter-clockwise)
while a <= start:
point.X = cx + r1*math.cos(a)
point.Y = cy + r1*math.sin(a)
a += intervalInRadians
array.add(point)
# Close the polygon by adding the end point
point.X = x_end
point.Y = y_end
array.add(point)
#Create the polygon
polygon = arcpy.Polygon(array)
return polygon
# Solution reused from:
# http://joshwerts.com/blog/2015/09/10/arcpy-dot-project-in-memory-featureclass/
# create destination feature class using the source as a template to establish schema
# and set destination spatial reference
def copyFeaturesAndProject(source_fc, out_projected_fc, spatial_reference):
""" projects source_fc to out_projected_fc using cursors (and supports in_memory workspace) """
path, name = os.path.split(out_projected_fc)
arcpy.management.CreateFeatureclass(path, name, \
arcpy.Describe(source_fc).shapeType, \
template=source_fc, \
spatial_reference=spatial_reference)
# specify copy of all fields from source to destination
fields = ["Shape@"] + [f.name for f in arcpy.ListFields(source_fc) if not f.required]
# project source geometries on the fly while inserting to destination featureclass
with arcpy.da.SearchCursor(source_fc, fields, spatial_reference=spatial_reference) as source_curs, \
arcpy.da.InsertCursor(out_projected_fc, fields) as ins_curs:
for row in source_curs:
ins_curs.insertRow(row)
def addViewshedFields(observerPointsFC, innerRadiusInput, outerRadiusInput, \
leftAzimuthInput, rightAzimuthInput, observerOffsetInput, targetOffsetInput):
desc = arcpy.Describe(observerPointsFC)
fieldNames = [x.name for x in desc.Fields]
# arcpy.AddMessage('Current Fields: ' + str(fieldNames))
if "RADIUS1" not in fieldNames :
arcpy.AddField_management(observerPointsFC, "RADIUS1", "SHORT")
arcpy.CalculateField_management(observerPointsFC, "RADIUS1", innerRadiusInput, "PYTHON_9.3", "")
if "RADIUS2" not in fieldNames :
arcpy.AddField_management(observerPointsFC, "RADIUS2", "SHORT")
arcpy.CalculateField_management(observerPointsFC, "RADIUS2", outerRadiusInput, "PYTHON_9.3", "")
if "AZIMUTH1" not in fieldNames :
arcpy.AddField_management(observerPointsFC, "AZIMUTH1", "SHORT")
arcpy.CalculateField_management(observerPointsFC, "AZIMUTH1", leftAzimuthInput, "PYTHON_9.3", "")
if "AZIMUTH2" not in fieldNames :
arcpy.AddField_management(observerPointsFC, "AZIMUTH2", "SHORT")
arcpy.CalculateField_management(observerPointsFC, "AZIMUTH2", rightAzimuthInput, "PYTHON_9.3", "")
if "OFFSETA" not in fieldNames :
arcpy.AddField_management(observerPointsFC, "OFFSETA", "SHORT")
arcpy.CalculateField_management(observerPointsFC, "OFFSETA", observerOffsetInput, "PYTHON_9.3", "")
if "OFFSETB" not in fieldNames :
arcpy.AddField_management(observerPointsFC, "OFFSETB", "SHORT")
arcpy.CalculateField_management(observerPointsFC, "OFFSETB", targetOffsetInput, "PYTHON_9.3", "")
def createViewshed(inputObserverPoints, elevationRaster, outerRadiusInput, \
leftAzimuthInput, rightAzimuthInput, observerOffsetInput, \
innerRadiusInput, viewshed, sectorWedge, fullWedge):
# Error Checking:
if arcpy.CheckExtension("3D") != "Available":
arcpy.AddError("3D license is not available.")
return
if not arcpy.Exists(inputObserverPoints) :
arcpy.AddError('Dataset does not exist: ' + str(inputObserverPoints))
return
if not arcpy.Exists(elevationRaster) :
arcpy.AddError('Dataset does not exist: ' + str(elevationRaster))
return
inputPointsCount = int(arcpy.GetCount_management(inputObserverPoints).getOutput(0))
if inputPointsCount == 0 :
arcpy.AddError('No features in input feature set: ' + str(inputObserverPoints))
return
elevDesc = arcpy.Describe(elevationRaster)
elevationSR = elevDesc.spatialReference
if not elevationSR.type == "Projected":
msgErrorNonProjectedSurface = \
"Error: Input elevation raster must be in a projected coordinate system. Existing elevation raster is in {0}.".format(elevationSR.name)
arcpy.AddError(msgErrorNonProjectedSurface)
return
# Done error checking, do processing:
arcpy.env.outputCoordinateSystem = elevationSR
donutWedges = []
pieWedges = []
tempObserverPoints = r"in_memory\tempPoints"
copyFeaturesAndProject(inputObserverPoints, tempObserverPoints, elevationSR)
# Check if points falls within surface extent
isWithin = VisibilityUtilities.surfaceContainsPoints(tempObserverPoints, elevationRaster)
if not isWithin:
msgErrorPointNotInSurface = \
"Error: Input Observer(s) does not fall within the extent of the input surface: {0}!".format(os.path.basename(elevationRaster))
arcpy.AddError(msgErrorPointNotInSurface)
return
addViewshedFields(tempObserverPoints, innerRadiusInput, outerRadiusInput, \
leftAzimuthInput, rightAzimuthInput, observerOffsetInput, \
0) # Set Target Height to 0
arcpy.AddMessage("Buffering observers...")
arcpy.Buffer_analysis(tempObserverPoints, \
r"in_memory\OuterBuffer", "RADIUS2", "FULL", "ROUND", "NONE", "", "GEODESIC")
desc = arcpy.Describe(r"in_memory\OuterBuffer")
xMin = desc.Extent.XMin
yMin = desc.Extent.YMin
xMax = desc.Extent.XMax
yMax = desc.Extent.YMax
Extent = str(xMin) + " " + str(yMin) + " " + str(xMax) + " " + str(yMax)
arcpy.env.extent = desc.Extent
# Set Raster Output Mask (to improve performance)
arcpy.env.mask = r"in_memory\OuterBuffer"
arcpy.AddMessage("Clipping image to observer buffer...")
arcpy.Clip_management(elevationRaster, Extent, r"in_memory\clip")
arcpy.AddMessage("Calculating viewshed...")
arcpy.Viewshed_3d("in_memory\clip", tempObserverPoints, r"in_memory\intervis", "1", "FLAT_EARTH", "0.13")
arcpy.AddMessage("Creating features from raster...")
arcpy.RasterToPolygon_conversion(in_raster=r"in_memory\intervis", out_polygon_features=r"in_memory\unclipped",simplify="NO_SIMPLIFY")
fields = ["SHAPE@XY","RADIUS1","RADIUS2","AZIMUTH1","AZIMUTH2"]
## get the attributes from the input point
with arcpy.da.SearchCursor(tempObserverPoints,fields) as cursor:
for row in cursor:
centerX = row[0][0]
centerY = row[0][1]
radiusInner = row[1]
radiusOuter = row[2]
startBearing = row[3]
endBearing = row[4]
# TODO/IMPORTANT: radius must be in map units
donutWedge = drawWedge(centerX, centerY, radiusInner, radiusOuter, startBearing, endBearing)
donutWedges.append(donutWedge)
pieWedge = drawWedge(centerX, centerY, 0, radiusOuter, startBearing, endBearing)
pieWedges.append(pieWedge)
arcpy.CopyFeatures_management(donutWedges, sectorWedge)
arcpy.CopyFeatures_management(pieWedges, fullWedge)
arcpy.AddMessage("Finishing output features...")
arcpy.Clip_analysis(r"in_memory\unclipped", sectorWedge, r"in_memory\dissolve")
arcpy.Dissolve_management(r"in_memory\dissolve", viewshed, "gridcode", "", "MULTI_PART", "DISSOLVE_LINES")
# Output Symbol layer requires the field to be "VISIBILITY"
arcpy.AddField_management(viewshed, "VISIBILITY", "LONG")
arcpy.CalculateField_management(viewshed, "VISIBILITY", '!gridcode!', "PYTHON_9.3")
def main():
########Script Parameters########
inputObserverPoints = arcpy.GetParameterAsText(0)
elevationRaster = arcpy.GetParameterAsText(1)
outerRadiusInput = arcpy.GetParameterAsText(2)
leftAzimuthInput = arcpy.GetParameterAsText(3)
rightAzimuthInput = arcpy.GetParameterAsText(4)
observerOffsetInput = arcpy.GetParameterAsText(5)
innerRadiusInput = arcpy.GetParameterAsText(6)
viewshed = arcpy.GetParameterAsText(7)
sectorWedge = arcpy.GetParameterAsText(8)
fullWedge = arcpy.GetParameterAsText(9)
createViewshed(inputObserverPoints, elevationRaster, \
outerRadiusInput, leftAzimuthInput, rightAzimuthInput, observerOffsetInput, \
innerRadiusInput, viewshed, sectorWedge, fullWedge)
# MAIN =============================================
if __name__ == "__main__":
main()
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_tmtrafficpolicy_binding(base_resource) :
""" Binding class showing the tmtrafficpolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._name = ""
self._targetlbvserver = ""
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
"""For a rewrite policy, the bind point to which to bind the policy. Note: This parameter applies only to rewrite policies, because content switching policies are evaluated only at request time.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""For a rewrite policy, the bind point to which to bind the policy. Note: This parameter applies only to rewrite policies, because content switching policies are evaluated only at request time.
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to be invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label to be invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def targetlbvserver(self) :
"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of label to be invoked.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""Type of label to be invoked.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_tmtrafficpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_tmtrafficpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_tmtrafficpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_tmtrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_tmtrafficpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_tmtrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch csvserver_tmtrafficpolicy_binding resources.
"""
try :
obj = csvserver_tmtrafficpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of csvserver_tmtrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_tmtrafficpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count csvserver_tmtrafficpolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_tmtrafficpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of csvserver_tmtrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_tmtrafficpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_tmtrafficpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_tmtrafficpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_tmtrafficpolicy_binding = [csvserver_tmtrafficpolicy_binding() for _ in range(length)]
|
|
import numpy as np
########################################################
# Common functions for phonon workflows #
# (should be placed in the same directory of workflow) #
# Used for workflows: gruneisen, qha #
########################################################
def get_phonon(structure, force_constants, phonopy_input):
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'],
distance=phonopy_input['distance'],
symprec=phonopy_input['symmetry_precision'])
phonon.set_force_constants(force_constants)
return phonon
def thermal_expansion(volumes, electronic_energies, gruneisen, stresses=None, t_max=1000, t_step=10):
fit_ve = np.polyfit(volumes, electronic_energies, 2)
test_volumes = np.arange(volumes[0] * 0.8, volumes[0] * 1.2, volumes[0] * 0.01)
electronic_energies = np.array([np.polyval(fit_ve, i) for i in test_volumes])
gruneisen.set_thermal_properties(test_volumes, t_min=0, t_max=t_max, t_step=t_step)
tp = gruneisen.get_thermal_properties()
normalize = gruneisen.get_phonon().unitcell.get_number_of_atoms() / gruneisen.get_phonon().primitive.get_number_of_atoms()
free_energy_array = []
cv_array = []
entropy_array = []
total_free_energy_array = []
for energy, tpi in zip(electronic_energies, tp.get_thermal_properties()):
temperatures, free_energy, entropy, cv = tpi.get_thermal_properties()
free_energy_array.append(free_energy)
entropy_array.append(entropy)
cv_array.append(cv)
total_free_energy_array.append(free_energy/normalize + energy)
total_free_energy_array = np.array(total_free_energy_array)
fit = np.polyfit(test_volumes, total_free_energy_array, 2)
min_volume = []
e_min = []
for j, t in enumerate(temperatures):
min_v = -fit.T[j][1] / (2 * fit.T[j][0])
e_min.append(np.polyval(fit.T[j], min_v))
min_volume.append(min_v)
if stresses is not None:
from scipy.optimize import curve_fit, OptimizeWarning
try:
# Fit to an exponential equation
def fitting_function(x, a, b, c):
return np.exp(-b * (x + a)) + c
p_b = 0.1
p_c = -200
p_a = -np.log(-p_c) / p_b - volumes[0]
popt, pcov = curve_fit(fitting_function, volumes, stresses, p0=[p_a, p_b, p_c], maxfev=100000)
min_stress = fitting_function(min_volume, *popt)
except OptimizeWarning:
# Fit to a quadratic equation
fit_vs = np.polyfit(volumes, stresses, 2)
min_stress = np.array([np.polyval(fit_vs, v) for v in min_volume])
else:
min_stress = None
return temperatures, min_volume, min_stress
def arrange_band_labels(band_structure):
substitutions = {'GAMMA': u'\u0393'
}
def replace_list(text_string, substitutions):
for item in substitutions.iteritems():
text_string = text_string.replace(item[0], item[1])
return text_string
labels_array = band_structure.get_array('labels')
labels = []
labels_positions = []
for i, freq in enumerate(band_structure.get_array('q_path')):
if labels_array[i][0] == labels_array[i-1][1]:
labels.append(replace_list(labels_array[i][0],substitutions))
else:
labels.append(replace_list(labels_array[i-1][1]+'/'+labels_array[i][0], substitutions))
labels_positions.append(band_structure.get_array('q_path')[i][0])
labels_positions.append(band_structure.get_array('q_path')[-1][-1])
labels.append(replace_list(labels_array[-1][1], substitutions))
labels[0] = replace_list(labels_array[0][0], substitutions)
return labels_positions, labels
def write_unicode_file(labels_positions, labels):
import StringIO
output = StringIO.StringIO()
for i, j in zip(labels_positions, labels):
output.write(u'{0:12.8f} {1}\n'.format(i, j).encode('utf-8'))
output.seek(0)
return output
def get_file_from_txt(text):
import StringIO
output = StringIO.StringIO()
output.write(text)
output.seek(0)
return output
def smearing_function_mesh(X, Y, frequencies, gruneisen, sigma=0.1):
frequencies = frequencies.reshape(-1)
gruneisen = gruneisen.reshape(-1)
def gaussian(X, Y, sigma, freq, grune):
result = 1.0/np.sqrt(2*np.pi*sigma**2) * np.exp(-((X-freq)**2 + (Y-grune)**2)/(2*sigma**2))
return result
total = np.zeros_like(X)
for freq, grune in zip(frequencies, gruneisen):
total += gaussian(X,Y, sigma, freq, grune)
return total/len(frequencies)
# convert numpy string into web page ready text file
def get_file_from_numpy_array(data, text_list=None):
import StringIO
output = StringIO.StringIO()
if text_list is None:
output.write('# No caption\n')
else:
output.write(' '.join(text_list) + '\n')
for line in np.array(data).astype(str):
output.write(' '.join(line) + '\n')
output.seek(0)
return output
def get_data_info(structure):
pmg_structure = structure.get_pymatgen_structure()
formula = pmg_structure.formula
space_group = pmg_structure.get_space_group_info()
lattice_vectors = pmg_structure.lattice.matrix
positions = pmg_structure.frac_coords
species = pmg_structure.species
volume = pmg_structure.volume
info_data = ''
info_data += '<b>Formula:</b> {}\n'.format(formula)
info_data += '<br><b>Space group:</b> {} #{}\n'.format(*space_group)
info_data += '\n'
info_data += '<br><br><b>Lattice vectors (Angstroms)</b>\n'
info_data += ('<br>{0:10.8f} {1:10.8f} {2:10.8f}\n'.format(*lattice_vectors[0]) +
'<br>{0:10.8f} {1:10.8f} {2:10.8f}\n'.format(*lattice_vectors[1]) +
'<br>{0:10.8f} {1:10.8f} {2:10.8f}\n'.format(*lattice_vectors[2]))
info_data += '\n'
info_data += '<br><br><b>Positions (frac. coord)</b>\n'
for i, xyz in enumerate(positions):
info_data += ('<br>{} '.format(species[i]) + '{0:10.8f} {1:10.8f} {2:10.8f}\n'.format(*xyz))
info_data += '\n'
info_data += '<br><br><b>Volume:</b> {} Angstroms<sup>3</sup>\n'.format(volume)
return info_data
def get_helmholtz_volume_from_phonopy_qha(phonopy_qha, thin_number=10):
from numpy import max, min
self = phonopy_qha._qha
volume_points = np.linspace(min(self._volumes),
max(self._volumes),
201)
min_volumes = []
min_energies = []
volumes = self._volumes
selected_energies = []
energies_points = []
for i, t in enumerate(self._temperatures[:self._max_t_index]):
if i % thin_number == 0:
min_volumes.append(self._equiv_volumes[i])
min_energies.append(self._equiv_energies[i])
selected_energies.append(self._free_energies[i])
energies_points.append(self._eos(volume_points, *self._equiv_parameters[i]))
return {'fit': (volume_points, np.array(energies_points)),
'points': (volumes, np.array(selected_energies)),
'minimum': (min_volumes, min_energies)}
# Write to files
def get_FORCE_CONSTANTS_txt(force_constants_object):
force_constants = force_constants_object.get_array('force_constants')
# Write FORCE CONSTANTS
force_constants_txt = '{0}\n'.format(len(force_constants))
for i, fc in enumerate(force_constants):
for j, atomic_fc in enumerate(fc):
force_constants_txt += '{0} {1}\n'.format(i, j)
for line in atomic_fc:
force_constants_txt += '{0:20.16f} {1:20.16f} {2:20.16f}\n'.format(*line)
return force_constants_txt
def structure_to_poscar(structure):
types = [site.kind_name for site in structure.sites]
atom_type_unique = np.unique(types, return_index=True)
sort_index = np.argsort(atom_type_unique[1])
elements = np.array(atom_type_unique[0])[sort_index]
elements_count= np.diff(np.append(np.array(atom_type_unique[1])[sort_index], [len(types)]))
poscar = '# VASP POSCAR generated using aiida workflow '
poscar += '\n1.0\n'
cell = structure.cell
for row in cell:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(*row)
poscar += ' '.join([str(e) for e in elements]) + '\n'
poscar += ' '.join([str(e) for e in elements_count]) + '\n'
poscar += 'Cartesian\n'
for site in structure.sites:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(*site.position)
return poscar
if __name__ == '__main__':
def gaussian(X, Y, sigma, freq, grune):
result = 1.0/np.sqrt(2*np.pi*sigma**2) * np.exp(-((X-freq)**2 + (Y-grune)**2)/(2*sigma**2))
return result
x = np.arange(-2, 2, 0.1)
y = np.arange(-2, 2, 0.1)
X, Y = np.meshgrid(x,y)
frequencies = np.sin(np.linspace(-6.3, 6.3, 1000))
gruneisen = np.cos(np.linspace(-6.3, 6.3, 1000))
Z = smearing_function_mesh(X, Y, frequencies, gruneisen)
#Z = gaussian(X, Y, 0.1, 0, 0)
import matplotlib.pyplot as plt
plt.contour(X, Y, Z)
plt.show()
exit()
plt.plot(np.arange(-10, 10, 0.1), [gaussian(x, 0, 0.5, [0, 0]) for x in np.arange(-10, 10, 0.1)])
plt.show()
exit()
|
|
from unittest import TestCase
import unittest
import active_subspaces.subspaces as ss
import active_subspaces.domains as dom
import helper
import numpy as np
import pdb
class TestDomains(TestCase):
writeData = False
def test_unbounded_active_variable_domain(self):
data = helper.load_test_npz('test_spec_decomp_0.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
uavd = dom.UnboundedActiveVariableDomain(sub)
np.testing.assert_equal(uavd.vertY, None)
np.testing.assert_equal(uavd.vertX, None)
np.testing.assert_equal(uavd.convhull, None)
np.testing.assert_equal(uavd.constraints, None)
np.testing.assert_almost_equal(uavd.n, sub.W1.shape[1])
np.testing.assert_almost_equal(uavd.m, sub.W1.shape[0])
def test_bounded_active_variable_domain_0(self):
data = helper.load_test_npz('test_spec_decomp_0.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
data_bavd = helper.load_test_npz('bavd_0.npz')
bavd = dom.BoundedActiveVariableDomain(sub)
np.testing.assert_almost_equal(bavd.vertY, np.dot(bavd.vertX, sub.W1))
np.testing.assert_almost_equal(bavd.vertY, data_bavd['vertY'])
np.testing.assert_almost_equal(bavd.vertX, data_bavd['vertX'])
np.testing.assert_almost_equal(bavd.n, sub.W1.shape[1])
np.testing.assert_almost_equal(bavd.m, sub.W1.shape[0])
def test_bounded_active_variable_domain_1(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
data_bavd = helper.load_test_npz('bavd_1.npz')
np.random.seed(42)
bavd = dom.BoundedActiveVariableDomain(sub)
np.testing.assert_almost_equal(bavd.vertY, np.dot(bavd.vertX, sub.W1))
np.testing.assert_almost_equal(bavd.vertY, data_bavd['vertY'])
np.testing.assert_almost_equal(bavd.vertX, data_bavd['vertX'])
np.testing.assert_almost_equal(bavd.n, sub.W1.shape[1])
np.testing.assert_almost_equal(bavd.m, sub.W1.shape[0])
def test_unbounded_active_variable_map_0(self):
data = helper.load_test_npz('test_spec_decomp_0.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
uavd = dom.UnboundedActiveVariableDomain(sub)
uavm = dom.UnboundedActiveVariableMap(uavd)
X = np.random.normal(size=(100,m))
Y,Z = uavm.forward(X)
X0 = np.dot(Y, sub.W1.T) + np.dot(Z, sub.W2.T)
np.testing.assert_almost_equal(X0, X)
def test_unbounded_active_variable_map_1(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
uavd = dom.UnboundedActiveVariableDomain(sub)
uavm = dom.UnboundedActiveVariableMap(uavd)
X = np.random.normal(size=(100,m))
Y,Z = uavm.forward(X)
X0 = np.dot(Y, sub.W1.T) + np.dot(Z, sub.W2.T)
np.testing.assert_almost_equal(X0, X)
def test_unbounded_active_variable_map_2(self):
data = helper.load_test_npz('test_spec_decomp_0.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
uavd = dom.UnboundedActiveVariableDomain(sub)
uavm = dom.UnboundedActiveVariableMap(uavd)
X = np.random.normal(size=(100,m))
Y,Z = uavm.forward(X)
X0 = uavm.inverse(Y, N=10)[0]
np.testing.assert_almost_equal(np.dot(X0, sub.W1), np.kron(Y, np.ones((10,1))) )
def test_unbounded_active_variable_map_3(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
uavd = dom.UnboundedActiveVariableDomain(sub)
uavm = dom.UnboundedActiveVariableMap(uavd)
X = np.random.normal(size=(100,m))
Y,Z = uavm.forward(X)
X0 = uavm.inverse(Y, N=10)[0]
np.testing.assert_almost_equal(np.dot(X0, sub.W1), np.kron(Y, np.ones((10,1))) )
def test_bounded_active_variable_map_0(self):
data = helper.load_test_npz('test_spec_decomp_0.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
bavd = dom.BoundedActiveVariableDomain(sub)
bavm = dom.BoundedActiveVariableMap(bavd)
X = np.random.uniform(-1.0,1.0,size=(100,m))
Y,Z = bavm.forward(X)
X0 = np.dot(Y, sub.W1.T) + np.dot(Z, sub.W2.T)
np.testing.assert_almost_equal(X0, X)
def test_bounded_active_variable_map_1(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
bavd = dom.BoundedActiveVariableDomain(sub)
bavm = dom.BoundedActiveVariableMap(bavd)
X = np.random.uniform(-1.0,1.0,size=(100,m))
Y,Z = bavm.forward(X)
X0 = np.dot(Y, sub.W1.T) + np.dot(Z, sub.W2.T)
np.testing.assert_almost_equal(X0, X)
def test_bounded_active_variable_map_2(self):
data = helper.load_test_npz('test_spec_decomp_0.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
bavd = dom.BoundedActiveVariableDomain(sub)
bavm = dom.BoundedActiveVariableMap(bavd)
X = np.random.uniform(-1.0,1.0,size=(10,m))
Y,Z = bavm.forward(X)
X0 = bavm.inverse(Y, N=10)[0]
np.testing.assert_almost_equal(np.dot(X0, sub.W1), np.kron(Y, np.ones((10,1))) )
def test_bounded_active_variable_map_3(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
m, n = sub.W1.shape
bavd = dom.BoundedActiveVariableDomain(sub)
bavm = dom.BoundedActiveVariableMap(bavd)
X = np.random.uniform(-1.0,1.0,size=(10,m))
Y,Z = bavm.forward(X)
X0 = bavm.inverse(Y, N=10)[0]
np.testing.assert_almost_equal(np.dot(X0, sub.W1), np.kron(Y, np.ones((10,1))) )
def test_rejection_sample_z(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
W1, W2 = sub.W1, sub.W2
m, n = W1.shape
np.random.seed(43)
x = np.random.uniform(-1.0,1.0,size=(1,m))
y = np.dot(x, W1).reshape((n, ))
N = 10
np.random.seed(42)
Z = dom.rejection_sampling_z(N, y, W1, W2)
if self.writeData:
np.savez('data/test_sampling_z_0_0',Z=Z)
data_test = helper.load_test_npz('test_sampling_z_0_0.npz')
np.testing.assert_almost_equal(Z, data_test['Z'])
def test_hit_and_run_z(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
W1, W2 = sub.W1, sub.W2
m, n = W1.shape
np.random.seed(43)
x = np.random.uniform(-1.0,1.0,size=(1,m))
y = np.dot(x, W1).reshape((n, ))
N = 10
np.random.seed(42)
Z = dom.hit_and_run_z(N, y, W1, W2)
if self.writeData:
np.savez('data/test_sampling_z_0_1',Z=Z)
data_test = helper.load_test_npz('test_sampling_z_0_1.npz')
np.testing.assert_almost_equal(Z, data_test['Z'])
def test_random_walk_z(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
W1, W2 = sub.W1, sub.W2
m, n = W1.shape
np.random.seed(43)
x = np.random.uniform(-1.0,1.0,size=(1,m))
y = np.dot(x, W1).reshape((n, ))
N = 10
np.random.seed(42)
Z = dom.random_walk_z(N, y, W1, W2)
if self.writeData:
np.savez('data/test_sampling_z_0_2',Z=Z)
data_test = helper.load_test_npz('test_sampling_z_0_2.npz')
np.testing.assert_almost_equal(Z, data_test['Z'])
def test_sample_z(self):
data = helper.load_test_npz('test_spec_decomp_1.npz')
df0, e0, W0 = data['df'], data['e'], data['W']
sub = ss.Subspaces()
sub.compute(df0)
W1, W2 = sub.W1, sub.W2
m, n = W1.shape
np.random.seed(43)
x = np.random.uniform(-1.0,1.0,size=(1,m))
y = np.dot(x, W1).reshape((n, ))
N = 10
np.random.seed(42)
Z = dom.sample_z(N, y, W1, W2)
data_test = helper.load_test_npz('test_sampling_z_0_0.npz')
np.testing.assert_almost_equal(Z, data_test['Z'])
if __name__ == '__main__':
unittest.main()
|
|
"""
TODO:
- Feature: Clicking on a point in the parameter space plots the integral curve with that initial condition
so that the parameter space can be explored interactively.
- Feature: Link the x axes for all the plots in 1D embedding domain.
-
"""
import glob
import heisenberg.library.util
import matplotlib.pyplot as plt
import numpy as np
import os
import pyqtgraph as pg
import pyqtgraph.Qt
import scipy.interpolate
import sys
import vorpy.pickle
subprogram_description = 'Provides visualization of the data generated by the heisenberg.sample subprogram. In particular, this gives a colormapped scatterplot of the objective function on the fully reduced, 2-parameter initial condition space.'
def read_sample_pickles (samples_dir, range_v):
glob_pattern = os.path.join(samples_dir, 'sample_v.*.pickle')
print('glob_pattern = "{0}"'.format(glob_pattern))
pickle_filename_v = glob.glob(glob_pattern)
print(pickle_filename_v)
data_v = []
dimension_d = {1:0, 2:0}
for pickle_filename in pickle_filename_v:
pickle_data = vorpy.pickle.unpickle(pickle_filename=pickle_filename, log_out=sys.stdout)
# TEMP legacy compatibility
if type(pickle_data) == list:
sample_v = pickle_data
elif type(pickle_data) == dict:
sample_v = pickle_data['sample_v']
else:
assert False, 'unknown data type {0} found in pickle'.format(type(pickle_data))
for sample in sample_v:
initial = sample[0]
objective = sample[4]
t_min = sample[5]
max_abs_H = sample[6]
max_abs_J_minus_J_0 = sample[7]
if range_v[0] <= objective < range_v[1]:
# TEMP HACK -- probably just use a different function altogether, or use a different data structure
if initial.shape == (1,):
dimension_d[1] += 1
data_v.append(np.array((objective, t_min, max_abs_H, max_abs_J_minus_J_0, initial[0])))
else:
dimension_d[2] += 1
data_v.append(np.array((objective, t_min, max_abs_H, max_abs_J_minus_J_0, initial[0], initial[1])))
assert dimension_d[1] == 0 or dimension_d[2] == 0, 'inhomogeneous data (mixed dimensions)'
dimension = 1 if dimension_d[1] > 0 else 2
if len(data_v) == 0:
print('No data found in "{0}" files.'.format(glob_pattern))
return None, dimension
else:
return np.array(data_v), dimension
def plot_samples (dynamics_context, options, *, rng):
data_v,dimension = read_sample_pickles(options.samples_dir, (1.0e-16, np.inf))
if data_v is None:
return
if options.use_white_background:
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
print('number of points: {0}'.format(data_v.shape[0]))
app = pyqtgraph.Qt.QtGui.QApplication([])
mw = pyqtgraph.Qt.QtGui.QMainWindow()
mw.resize(1200,1200)
view = pg.GraphicsLayoutWidget() ## GraphicsView with GraphicsLayout inserted by default
mw.setCentralWidget(view)
mw.show()
#mw.setWindowTitle('(p_x,p_y) initial condition scatterplot')
## create areas to add plots
w1 = view.addPlot(name='w1', title='objective')
view.nextRow()
w3 = view.addPlot(name='w3', title='t_min')
#w3 = None
#view.nextRow()
#w2 = view.addPlot(name='w2', title='max(abs(H))')
w2 = None
#view.nextRow()
#w4 = view.addPlot(name='w4', title='max(abs(J-J(0)))')
w4 = None
## Make all plots clickable
lastClicked = []
def clicked(plot, points):
global lastClicked
for p in lastClicked:
p.resetPen()
print('clicked points', points)
for p in points:
p.setPen('b', width=2)
lastClicked = points
if dimension == 1:
def scatterplot (plot, point_v, value_v, *, use_log=False):
assert np.all(np.isfinite(point_v))
filter_v = np.isfinite(value_v)
filtered_point_v = point_v[filter_v]
filtered_value_v = value_v[filter_v]
brush = pg.mkBrush(255, 255, 255, 255)
s = pg.ScatterPlotItem(size=2, brush=brush)
plot.addItem(s)
s.addPoints(x=filtered_point_v, y=filtered_value_v)
s.sigClicked.connect(clicked)
plot.setLogMode(x=False, y=use_log)
return s
def lineplot (plot, point_v, value_v, *, use_log=False):
assert np.all(np.isfinite(point_v))
filter_v = np.isfinite(value_v)
filtered_point_v = point_v[filter_v]
filtered_value_v = value_v[filter_v]
plot.plot(filtered_point_v, filtered_value_v)
plot.setLogMode(x=False, y=use_log)
if w1 is not None:
w1.setLabel('bottom', 'p_theta')
lineplot(w1, data_v[:,4], data_v[:,0], use_log=False) # objective
if w2 is not None:
w2.setLabel('bottom', 'p_theta')
lineplot(w2, data_v[:,4], data_v[:,2], use_log=False) # max_abs_H
if w3 is not None:
w3.setLabel('bottom', 'p_theta')
scatterplot(w3, data_v[:,4], data_v[:,1], use_log=False) # t_min
if w4 is not None:
w4.setLabel('bottom', 'p_theta')
lineplot(w4, data_v[:,4], data_v[:,3], use_log=False) # max_abs_J_minus_J_0
# Make some static plots
if True:
row_height = 5
col_width = 10
row_count = 2
col_count = 1
fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(col_width*col_count,row_height*row_count))
axis = axis_vv[0][0]
axis.set_title('objective function value of orbit with initial p_theta')
axis.set_xlabel('p_theta')
axis.set_ylabel('objective')
axis.plot(data_v[:,4], data_v[:,0], color='black')
axis = axis_vv[1][0]
axis.set_title('objective-minimizing time (t_min) of orbit with initial p_theta')
axis.set_xlabel('p_theta')
axis.set_ylabel('t_min')
axis.scatter(data_v[:,4], data_v[:,1], s=1, color='black')
fig.tight_layout()
filename = os.path.join(options.samples_dir, 'objective-and-t_min.pdf')
plt.savefig(filename, bbox_inches='tight')
print('wrote to "{0}"'.format(filename))
plt.close('all') # Does this work? Who knows.
if True:
row_height = 5
col_width = 10
row_count = 1
col_count = 1
fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(col_width*col_count,row_height*row_count))
axis = axis_vv[0][0]
axis.set_title('objective function value of orbit with initial p_theta')
axis.set_xlabel('p_theta')
axis.set_ylabel('objective')
axis.plot(data_v[:,4], data_v[:,0], color='black')
fig.tight_layout()
filename = os.path.join(options.samples_dir, 'objective.pdf')
plt.savefig(filename, bbox_inches='tight')
print('wrote to "{0}"'.format(filename))
plt.close('all') # Does this work? Who knows.
if True:
row_height = 5
col_width = 10
row_count = 1
col_count = 1
fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(col_width*col_count,row_height*row_count))
axis = axis_vv[0][0]
axis.set_title('objective-minimizing time (t_min) of orbit with initial p_theta')
axis.set_xlabel('p_theta')
axis.set_ylabel('t_min')
axis.scatter(data_v[:,4], data_v[:,1], s=1, color='black')
fig.tight_layout()
filename = os.path.join(options.samples_dir, 't_min.pdf')
plt.savefig(filename, bbox_inches='tight')
print('wrote to "{0}"'.format(filename))
plt.close('all') # Does this work? Who knows.
# Link all plots' x axes together
if w1 is not None:
if w2 is not None:
w2.setXLink('w1')
if w3 is not None:
w3.setXLink('w1')
if w4 is not None:
w4.setXLink('w1')
# Create a vertical line on each plot that follows the mouse cursor
if False:
if w1 is not None:
vline1 = pg.InfiniteLine(angle=90, movable=False)
w1.addItem(vline1, ignoreBounds=True)
if w2 is not None:
vline2 = pg.InfiniteLine(angle=90, movable=False)
w2.addItem(vline2, ignoreBounds=True)
if w3 is not None:
vline3 = pg.InfiniteLine(angle=90, movable=False)
w3.addItem(vline3, ignoreBounds=True)
if w4 is not None:
vline4 = pg.InfiniteLine(angle=90, movable=False)
w4.addItem(vline4, ignoreBounds=True)
def mouse_moved (plot, event):
pos = event[0] ## using signal proxy turns original arguments into a tuple
if plot.sceneBoundingRect().contains(pos):
mouse_point = plot.vb.mapSceneToView(pos)
#index = int(mouse_point.x())
#if index >= 0 and index < len(p_y_v):
#label1.setText('<span style="font-size: 12pt">x={0}, <span style="color: red">p_y={1}</span>, <span style="color: green">objective={2}</span>'.format(mouse_point.x(), p_y_v[index], objective_v[index]))
if w1 is not None:
vline1.setPos(mouse_point.x())
if w2 is not None:
vline2.setPos(mouse_point.x())
if w3 is not None:
vline3.setPos(mouse_point.x())
if w4 is not None:
vline4.setPos(mouse_point.x())
#hLine.setPos(mouse_point.y())
if w1 is not None:
proxy1 = pg.SignalProxy(w1.scene().sigMouseMoved, rateLimit=60, slot=lambda event:mouse_moved(w1,event))
if w2 is not None:
proxy2 = pg.SignalProxy(w2.scene().sigMouseMoved, rateLimit=60, slot=lambda event:mouse_moved(w2,event))
if w3 is not None:
proxy3 = pg.SignalProxy(w3.scene().sigMouseMoved, rateLimit=60, slot=lambda event:mouse_moved(w3,event))
if w4 is not None:
proxy4 = pg.SignalProxy(w4.scene().sigMouseMoved, rateLimit=60, slot=lambda event:mouse_moved(w4,event))
elif dimension == 2:
# NOTE
# NOTE: In this whole section, the p_x (which is the same as J) and p_y (which is the same as
# NOTE: p_theta) initial condition coordinates are switched to be plotted (p_y,p_x) (i.e. (p_theta,J)).
# NOTE: Furthermore, note that p_x == J and p_y == p_theta because of the constraints made in choosing
# NOTE: the 2-dimensional initial conditions embedding.
# NOTE
def color_scatterplot_2d (plot, point_v, value_v, *, use_log=False):
if use_log:
func = np.log
else:
func = lambda x:x
assert np.all(np.isfinite(point_v))
filter_v = np.isfinite(value_v)
filtered_point_v = point_v[filter_v]
filtered_value_v = value_v[filter_v]
low = np.nanmin(func(filtered_value_v))
high = np.nanmax(func(filtered_value_v))
divisor = high - low
print('low = {0}, high = {1}, divisor = {2}'.format(low, high, divisor))
def brush_from_objective (objective):
parameter = (func(objective) - low) / divisor
return pg.mkBrush(int(round(255*parameter)), int(round(255*(1.0-parameter))), 0, 255)
s = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None))#, brush=pg.mkBrush(255, 255, 255, 128))
plot.addItem(s)
s.addPoints(x=filtered_point_v[:,1], y=filtered_point_v[:,0], brush=[brush_from_objective(objective) for objective in filtered_value_v])
s.sigClicked.connect(clicked)
return s
if w1 is not None:
color_scatterplot_2d(w1, data_v[:,4:6], data_v[:,0], use_log=True) # objective
if w2 is not None:
color_scatterplot_2d(w2, data_v[:,4:6], data_v[:,1], use_log=False) # t_min
if w3 is not None:
color_scatterplot_2d(w3, data_v[:,4:6], data_v[:,2], use_log=True) # max_abs_H
if w4 is not None:
color_scatterplot_2d(w4, data_v[:,4:6], data_v[:,3], use_log=True) # max_abs_J_minus_J_0
# Make some static plot(s)
if True:
row_height = 10
col_width = 10
row_count = 1
col_count = 1
fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(col_width*col_count,row_height*row_count))
initial_v = data_v[:,-2:]
initial_min_v = np.min(initial_v, axis=0)
initial_max_v = np.max(initial_v, axis=0)
value_v = data_v[:,0]
assert np.all(np.isfinite(initial_v))
filter_v = np.isfinite(value_v)
filtered_initial_v = initial_v[filter_v]
filtered_value_v = value_v[filter_v]
use_log = True
if use_log:
func = np.log
else:
func = lambda x:x
low = np.nanmin(func(filtered_value_v))
high = np.nanmax(func(filtered_value_v))
divisor = high - low
print('low = {0}, high = {1}, divisor = {2}'.format(low, high, divisor))
def unstretch (objective):
return (func(objective) - low) / divisor
unstretched_filtered_value_v = np.apply_along_axis(unstretch, 0, filtered_value_v)
# Define the grid, ensuring that the x grid point count is odd, so that it covers the central axis.
x_v = np.linspace(initial_min_v[1], initial_max_v[1], 401)
y_v = np.linspace(initial_min_v[0], initial_max_v[0], 401)
z_v = scipy.interpolate.griddata((filtered_initial_v[:,1], filtered_initial_v[:,0]), unstretched_filtered_value_v, (x_v[None,:], y_v[:,None]), method='cubic')
print('x_v.shape = {0}'.format(x_v.shape))
print('y_v.shape = {0}'.format(y_v.shape))
print('z_v.shape = {0}'.format(z_v.shape))
#contour_level_v = [10.0**p for p in range(-11,3)]
contour_level_v = np.linspace(0.0, 1.0, 11)
axis = axis_vv[0][0]
axis.set_title('objective function value of orbit with initial (p_theta,J)')
axis.contour(x_v, y_v, z_v, contour_level_v, linewidths=0.5, colors='k')
axis.contourf(x_v, y_v, z_v, contour_level_v, cmap=plt.cm.jet)
axis.set_aspect('equal')
#axis.set_aspect(0.5)
axis.scatter(filtered_initial_v[:,1], filtered_initial_v[:,0], color='black', alpha=0.1, s=1)
#axis.colorbar()
axis.set_xlim(initial_min_v[1], initial_max_v[1])
axis.set_ylim(initial_min_v[0], initial_max_v[0])
fig.tight_layout()
filename = os.path.join(options.samples_dir, 'objective.pdf')
plt.savefig(filename, bbox_inches='tight')
print('wrote to "{0}"'.format(filename))
plt.close('all') # Does this work? Who knows.
else:
assert False, 'dimension = {0}, which should never happen'.format(dimension)
### Start Qt event loop unless running in interactive mode.
#if (sys.flags.interactive != 1) or not hasattr(pyqtgraph.Qt.QtCore, 'PYQT_VERSION'):
#pyqtgraph.Qt.QtGui.QApplication.instance().exec_()
|
|
# -*- coding: utf-8 -*-
import logging
import os
import os.path
import shutil
import stat
import core.provider.property as property
import core.utils as utils
PRECONDITION_FAILED = 17
NOT_FOUND = 2
class StorageProvider():
path = None
CHUNK_SIZE = 16384 # 16KB buffer
propertyProvider = None # Properties Provider
def __init__(self, path, propertyProvider=None):
if propertyProvider is None:
propertyProvider = property.Base()
self.path = path
self.propertyProvider = propertyProvider
def get(self, uri, **kwargs):
return open(self._translateUri(uri, **kwargs))
def exists(self, uri, **kwargs):
"""
Checks if the storage node for a given resource
is existing
"""
return os.path.exists(self._translateUri(uri, **kwargs))
def isCollection(self, uri, **kwargs):
return os.path.isdir(self._translateUri(uri, **kwargs))
def getMeta(self, uri, depth=0, list=None, **kwargs):
"""
Gets the live + dead properties of an object
"""
uris = [uri]
if depth > 0 and self.isCollection(uri, **kwargs):
resources = os.listdir(self._translateUri(uri, **kwargs))
prefix = uri
if uri != '/':
prefix += '/'
for resource in resources:
uris.append(prefix + resource)
meta = {}
for href in uris:
path = self._translateUri(href, **kwargs)
meta[href] = self.propertyProvider.get(path)
if meta[href].has_key('{DAV:}resourcetype'):
if meta[href]['{DAV:}resourcetype'] == '':
meta[href]['{DAV:}resourcetype'] = 0
else:
meta[href]['{DAV:}resourcetype'] = int(meta[href]['{DAV:}resourcetype'])
props = {}
if not meta[href].has_key('{DAV:}getcontenttype'):
mime_type, encoding = utils.getMime(path)
props.update({'{DAV:}getcontenttype': mime_type})
if encoding is not None:
props.update({'{DAV:}encoding': encoding})
if not meta[href].has_key('{DAV:}getcontentlength'):
stats = os.stat(path)
dir = stat.S_ISDIR(stats.st_mode)
if dir:
dir = 1
mime_type = 'application/x-directory'
else:
dir = 0
props.update({
'{DAV:}getlastmodified': stats.st_mtime,
'{DAV:}getcontentlength': stats.st_size,
'{DAV:}creationdate': stats.st_ctime,
'{DAV:}resourcetype': dir
})
meta[href].update(props)
for field, value in props.items():
self.propertyProvider.set(path, field, value)
return meta
def setMeta(self, uri, hashes, depth=0, **kwargs):
if hashes is not None and not isinstance(hashes,dict):
raise ValueError("Second parameter must be dictionary")
logging.getLogger().debug("setMeta(%s): %s" % (uri, hashes))
path = self._translateUri(uri, **kwargs)
for key, value in hashes.items():
self.propertyProvider.set(path,key,value)
def delMeta(self, uri, keys=None,**kwargs):
if keys is not None and not isinstance(keys,list):
raise ValueError("Second parameter must be list")
logging.getLogger().debug("delMeta(%s): %s" % (uri, keys))
path = self._translateUri(uri, **kwargs)
if keys is None:
self.propertyProvider.delete(path)
else:
for key in keys:
self.propertyProvider.delete(path, key)
def create(self, uri, data, env=None, expectedSize=None, **kwargs):
localPath = self._translateUri(uri, **kwargs)
self._saveToFile(data,localPath, expectedSize)
mime_type, encoding = utils.getMime(localPath)
stats = os.stat(localPath)
return {
'{DAV:}getcontenttype': mime_type,
'{DAV:}encoding': encoding,
'{DAV:}getcontentlength': stats.st_size,
'{DAV:}getlastmodified' : stats.st_mtime,
}
def createCollection(self, uri, **kwargs):
os.mkdir(self._translateUri(uri, **kwargs))
def copy(self, sourceUri, targetUri, depth=0, **kwargs):
"""
Copy the source data to the destination
"""
logging.getLogger().warn("@todo: Handle the depth value")
if(self.isCollection(sourceUri, **kwargs)):
shutil.copytree(self._translateUri(sourceUri,**kwargs), self._translateUri(targetUri, **kwargs))
else:
shutil.copy2(self._translateUri(sourceUri, **kwargs), self._translateUri(targetUri, **kwargs))
def move(self, sourceUri, targetUri, depth=0, **kwargs):
"""
Move the source data to the destination
"""
logging.getLogger().warn("@todo: Handle the depth value")
shutil.move(self._translateUri(sourceUri, **kwargs), self._translateUri(targetUri, **kwargs))
def delete(self, uri, **kwargs):
try:
if self.isCollection(uri, **kwargs):
shutil.rmtree(self._translateUri(uri, **kwargs))
else:
os.unlink(self._translateUri(uri, **kwargs))
except OSError as ex:
if ex.errno == 2: # Not found
return None
raise ex
# remove also the views directory
try:
shutil.rmtree(self._translateUri(self._translateUri('/.views/' + uri, **kwargs),**kwargs))
except:
# @todo: log errors when removing views
pass
def getSize(self, uri, depth, **kwargs):
"""
Gets the total size of the resources
"""
size = 0
meta = self.getMeta(uri, -1, None, **kwargs)
for path, data in meta.items():
if data['{DAV:}resourcetype']:
continue
size += int(data['{DAV:}getcontentlength'])
return size
def _translateUri(self, uri, **kwargs):
"""
Translates URI to local path
"""
return self.path + uri
def getView(self, uri, request, **kwargs):
"""
Returns the local path where the views/thumbnails have
to be stored and the remote url to access it.
"""
viewURL = '/.views'+uri.replace(request.path,request.uri)
viewPath = self._translateUri('/.views'+uri, **kwargs)
return [viewPath, viewURL]
def _saveToFile(self, data, localPath, expectedSize=None):
if hasattr(data, 'save_as'):
data.save_as(localPath)
else:
from multipart import copy_file
if not hasattr(data,'read'):
import cStringIO as StringIO
data = StringIO.StringIO(data)
f = open(localPath,"w")
if f:
copy_file(data, f)
f.close()
if expectedSize is not None:
actualSize = os.path.getsize(localPath)
if actualSize != expectedSize:
raise ValueError('_saveToFile: Size error. Expected: %s, Actual: %s' % (expectedSize, actualSize))
class UserStorageProvider(StorageProvider):
def __init__(self, path, propertyProvider=None, nestedLevel=0, createIfNonExistent=False):
if propertyProvider is None:
propertyProvider = property.Base()
self.nestedLevel = nestedLevel
self.createIfNonExistent = createIfNonExistent
StorageProvider.__init__(self,path,propertyProvider)
def getNestedName(self, name, nestedLevel, step=2):
if not len(name):
raise Exception('Invalid value for parameter identity')
folder = ''
max = len(name)
for i in range(0,nestedLevel):
i=i*step
if i> max-step:
i = max-step
folder +='/'+name[i:i+step]
folder +='/'+name
return folder
def _translateUri(self, uri, **kwargs):
"""
Translates URI to local path
"""
user = kwargs['user']
if user.has_key('folder'):
folder = user['folder']
else:
folder = self.path + '/' + \
self.getNestedName(user.getIdentity(),self.nestedLevel)
if self.createIfNonExistent and not os.path.exists(folder):
os.makedirs(folder)
return folder + uri
import core.pool.Redis as Redis
import ujson as json
from core.pattern import Decorator
import re
class CacheDecorator(Decorator):
def __init__(self, obj, expiration=86400, prefix='storage_meta_'):
Decorator.__init__(self, obj)
object.__setattr__(self, "redis", Redis.ConnectionPool().get())
object.__setattr__(self, "expiration", expiration)
object.__setattr__(self, "prefix", prefix)
def getMeta(self, uri, depth=0, list=None, **kwargs):
meta = {}
if depth == 0:
(key, field) = self.getCacheKey(uri, **kwargs)
data = self.redis.hget(key, field)
if data:
meta[uri] = data
else:
(key, field) = self.getCacheKey(uri+'/', **kwargs)
data = self.redis.hgetall(key)
if data:
for (name, value) in data.items():
if name != '':
name = '/'+name
meta[uri+name] = value
if not kwargs.get('generateCache') and len(meta):
theMeta = {}
if meta.has_key(uri): # sanity check
for name, data in meta.items():
theMeta[name] = json.decode(data)
return theMeta
meta = self._obj.getMeta(uri, depth, list, **kwargs)
# if the depth == 0 and the main uri is a collection - do not cache
if kwargs.get('generateCache') or \
not (depth == 0 and int(meta[uri]['{DAV:}resourcetype'])):
logging.getLogger().debug("Saving: Keys %s" % meta.keys())
for (path,data) in meta.items():
if depth ==0 and uri == path:
href = uri
else:
href = path.replace(uri,'')
logging.getLogger().debug("Saving Key %s %s" % (key, os.path.basename(href)) )
self.redis.hset(key, os.path.basename(href), json.encode(data))
self.redis.expire(key, self.expiration)
return meta
def delMeta(self, uri, list=None, **kwargs):
result = self._obj.delMeta(uri, list, **kwargs)
self.delCache(uri, **kwargs)
return result
def setMeta(self, uri, hashes, depth=0, **kwargs):
result = self._obj.setMeta(uri, hashes, depth, **kwargs)
self.addCache(uri, **kwargs)
return result
def create(self, uri, data, env=None, expectedSize=None, **kwargs):
result = self._obj.create(uri, data, env, expectedSize, **kwargs)
self.addCache(uri, **kwargs)
return result
def createCollection(self, uri, **kwargs):
result = self._obj.createCollection(uri, **kwargs)
self.addCache(uri, **kwargs)
return result
def move(self, sourceUri, targetUri, depth=0, **kwargs):
result = self._obj.move(sourceUri, targetUri, depth, **kwargs)
self.delCache(sourceUri, **kwargs)
self.addCache(targetUri, **kwargs)
return result
def addCache(self, uri, **kwargs):
(key, field) = self.getCacheKey(uri, **kwargs)
if self.redis.exists(key):
"""
The resource is added to the cache only if the
parent entry has already cache. Otherwise
the cache will be generated on demand.
"""
kwargs['generateCache'] = 1
self.getMeta(uri, 0, None, **kwargs)
def delCache(self, uri, **kwargs):
(key, field) = self.getCacheKey(uri, **kwargs)
self.redis.hdel(key, field)
(key, field) = self.getCacheKey(uri+'/', **kwargs)
self.redis.delete(key)
def getCacheKey(self, uri, **kwargs):
"""
@param string uri
@returns [key, field]
"""
uri = re.sub(r'/{2,}', '/', uri)
[dir, name] = os.path.split(uri)
key = '%s%s%s' % (self.prefix, kwargs.get('user').getIdentity(), dir)
return [key, name]
def __del__(self):
# delete object references manually
object.__delattr__(self, 'redis')
Decorator.__del__(self)
class FileStorageProvider(StorageProvider):
"""
Single File Storage Provider.
"""
def _translateUri(self, uri, **kwargs):
"""
Translates URI to local path
"""
return self.path
|
|
import re
import os
import six
class Compiler(object):
RE_INTERPOLATE = re.compile(r'(\\)?([#!]){(.*?)}')
RE_ASSIGNMENT = re.compile(r'^(\s*var\s+)(\w+) *= *([^;]+)')
doctypes = {
'5': '<!DOCTYPE html>'
, 'xml': '<?xml version="1.0" encoding="utf-8" ?>'
, 'default': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
, 'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">'
, '1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
, 'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">'
, 'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">'
}
inlineTags = [
'a'
, 'abbr'
, 'acronym'
, 'b'
, 'br'
, 'code'
, 'em'
, 'font'
, 'i'
, 'img'
, 'ins'
, 'kbd'
, 'map'
, 'samp'
, 'small'
, 'span'
, 'strong'
, 'sub'
, 'sup'
, 'textarea'
]
selfClosing = [
'meta'
, 'img'
, 'link'
, 'input'
, 'area'
, 'base'
, 'col'
, 'br'
, 'hr'
]
autocloseCode = 'if,for,block,filter,autoescape,with,trans,spaceless,comment,cache,macro,localize,compress,raw,call,set'.split(',')
filters = {}
def __init__(self, node, **options):
self.options = options
self.node = node
self.hasCompiledDoctype = False
self.hasCompiledTag = False
self.pp = options.get('pretty', True)
self.debug = options.get('compileDebug', False) is not False
self.filters.update(options.get('filters', {}))
self.doctypes.update(options.get('doctypes', {}))
# self.var_processor = options.get('var_processor', lambda x: x)
self.selfClosing.extend(options.get('selfClosing', []))
self.autocloseCode.extend(options.get('autocloseCode', []))
self.inlineTags.extend(options.get('inlineTags', []))
self.useRuntime = options.get('useRuntime', True)
self.extension = options.get('extension', None) or '.jade'
self.indents = 0
self.doctype = None
self.terse = False
self.xml = False
self.mixing = 0
self.variable_start_string = options.get("variable_start_string", "{{")
self.variable_end_string = options.get("variable_end_string", "}}")
if 'doctype' in self.options: self.setDoctype(options['doctype'])
self.instring = False
def var_processor(self, var):
if isinstance(var,six.string_types) and var.startswith('_ '):
var = '_("%s")'%var[2:]
return var
def compile_top(self):
return ''
def compile(self):
self.buf = [self.compile_top()]
self.lastBufferedIdx = -1
self.visit(self.node)
compiled = u''.join(self.buf)
if isinstance(compiled, six.binary_type):
compiled = six.text_type(compiled, 'utf8')
return compiled
def setDoctype(self, name):
self.doctype = self.doctypes.get(name or 'default',
'<!DOCTYPE %s>' % name)
self.terse = name in ['5','html']
self.xml = self.doctype.startswith('<?xml')
def buffer(self, str):
if self.lastBufferedIdx == len(self.buf):
self.lastBuffered += str
self.buf[self.lastBufferedIdx - 1] = self.lastBuffered
else:
self.buf.append(str)
self.lastBuffered = str;
self.lastBufferedIdx = len(self.buf)
def visit(self, node, *args, **kwargs):
# debug = self.debug
# if debug:
# self.buf.append('__jade.unshift({ lineno: %d, filename: %s });' % (node.line,('"%s"'%node.filename) if node.filename else '__jade[0].filename'));
# if node.debug==False and self.debug:
# self.buf.pop()
# self.buf.pop()
self.visitNode(node, *args, **kwargs)
# if debug: self.buf.append('__jade.shift();')
def visitNode (self, node, *args, **kwargs):
name = node.__class__.__name__
if self.instring and name != 'Tag':
self.buffer('\n')
self.instring = False
return getattr(self, 'visit%s' % name)(node, *args, **kwargs)
def visitLiteral(self, node):
self.buffer(node.str)
def visitBlock(self, block):
for node in block.nodes:
self.visit(node)
def visitCodeBlock(self, block):
self.buffer('{%% block %s %%}' % block.name)
if block.mode=='prepend':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.visitBlock(block)
if block.mode == 'append':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.buffer('{% endblock %}')
def visitDoctype(self,doctype=None):
if doctype and (doctype.val or not self.doctype):
self.setDoctype(doctype.val or 'default')
if self.doctype:
self.buffer(self.doctype)
self.hasCompiledDoctype = True
def visitMixin(self,mixin):
if mixin.block:
self.buffer('{%% macro %s(%s) %%}' % (mixin.name, mixin.args))
self.visitBlock(mixin.block)
self.buffer('{% endmacro %}')
else:
self.buffer('%s%s(%s)%s' % (self.variable_start_string, mixin.name,
mixin.args, self.variable_end_string))
def visitTag(self,tag):
self.indents += 1
name = tag.name
if not self.hasCompiledTag:
if not self.hasCompiledDoctype and 'html' == name:
self.visitDoctype()
self.hasCompiledTag = True
if self.pp and name not in self.inlineTags and not tag.inline:
self.buffer('\n' + ' ' * (self.indents - 1))
if name in self.inlineTags or tag.inline:
self.instring = False
closed = name in self.selfClosing and not self.xml
if tag.text:
t = tag.text.nodes[0]
if t.startswith(u'/'):
if len(t) > 1:
raise Exception('%s is self closing and should not have content.' % name)
closed = True
self.buffer('<%s' % name)
self.visitAttributes(tag.attrs)
self.buffer('/>' if not self.terse and closed else '>')
if not closed:
if tag.code: self.visitCode(tag.code)
if tag.text: self.buffer(self.interpolate(tag.text.nodes[0].lstrip()))
self.escape = 'pre' == tag.name
# empirically check if we only contain text
textOnly = tag.textOnly or not bool(len(tag.block.nodes))
self.instring = False
self.visit(tag.block)
if self.pp and not name in self.inlineTags and not textOnly:
self.buffer('\n' + ' ' * (self.indents-1))
self.buffer('</%s>' % name)
self.indents -= 1
def visitFilter(self,filter):
if filter.name not in self.filters:
if filter.isASTFilter:
raise Exception('unknown ast filter "%s"' % filter.name)
else:
raise Exception('unknown filter "%s"' % filter.name)
fn = self.filters.get(filter.name)
if filter.isASTFilter:
self.buf.append(fn(filter.block, self, filter.attrs))
else:
text = ''.join(filter.block.nodes)
text = self.interpolate(text)
filter.attrs = filter.attrs or {}
filter.attrs['filename'] = self.options.get('filename', None)
self.buffer(fn(text, filter.attrs))
def _interpolate(self, attr, repl):
return self.RE_INTERPOLATE.sub(lambda matchobj:repl(matchobj.group(3)),
attr)
def interpolate(self, text, escape=None):
def repl(matchobj):
filter_ = (escape is None and matchobj.group(2) != '!') or escape
return '%s%s%s%s%s' % (
self.variable_start_string,
'(' if filter_ else '',
matchobj.group(3),
')|escape' if filter_ else '',
self.variable_end_string)
return self.RE_INTERPOLATE.sub(repl, text)
def visitText(self,text):
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
if self.pp:
self.buffer('\n')
def visitString(self,text):
instring = not text.inline
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
self.instring = instring
def visitComment(self,comment):
if not comment.buffer: return
if self.pp:
self.buffer('\n' + ' ' * (self.indents))
self.buffer('<!--%s-->' % comment.val)
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}' % (assignment.name, assignment.val))
def format_path(self,path):
has_extension = '.' in os.path.basename(path)
if not has_extension:
path += self.extension
return path
def visitExtends(self,node):
path = self.format_path(node.path)
self.buffer('{%% extends "%s" %%}' % (path))
def visitInclude(self,node):
path = self.format_path(node.path)
self.buffer('{%% include "%s" %%}' % (path))
def visitBlockComment(self, comment):
if not comment.buffer:
return
isConditional = comment.val.strip().startswith('if')
self.buffer('<!--[%s]>' % comment.val.strip() if isConditional else '<!--%s' % comment.val)
self.visit(comment.block)
self.buffer('<![endif]-->' if isConditional else '-->')
def visitConditional(self, conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('{%% %s %%}' % TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']:
self.buf.append('{% endif %}')
def visitVar(self, var, escape=False):
var = self.var_processor(var)
return ('%s%s%s%s%s' % (
self.variable_start_string,
'(' if escape else '',
var,
')|escape' if escape else '',
self.variable_end_string))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append(self.visitVar(val, code.escape))
else:
self.buf.append('{%% %s %%}' % code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ', 1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}' % codeTag)
def visitBlockCode(self,code):
for instruction in ''.join(code.block.nodes).split(';'):
if not instruction.strip():
continue
matches = self.RE_ASSIGNMENT.match(instruction)
if matches:
self.buffer('{%% set %s = %s %%}' % (
matches.groups()[1], matches.groups()[2]))
else:
self.buf.append('{%% %s %%}' % instruction)
def visitEach(self,each):
self.buf.append('{%% for %s in %s|__pyjade_iter:%d %%}' % (','.join(each.keys), each.obj, len(each.keys)))
self.visit(each.block)
self.buf.append('{% endfor %}')
def attributes(self,attrs):
return "%s__pyjade_attrs(%s)%s" % (self.variable_start_string, attrs, self.variable_end_string)
def visitDynamicAttributes(self, attrs):
buf, classes, params = [], [], {}
terse='terse=True' if self.terse else ''
for attr in attrs:
if attr['name'] == 'class':
classes.append('(%s)' % attr['val'])
else:
pair = "('%s',(%s))" % (attr['name'], attr['val'])
buf.append(pair)
if classes:
classes = " , ".join(classes)
buf.append("('class', (%s))" % classes)
buf = ', '.join(buf)
if self.terse: params['terse'] = 'True'
if buf: params['attrs'] = '[%s]' % buf
param_string = ', '.join(['%s=%s' % (n, v) for n, v in six.iteritems(params)])
if buf or terse:
self.buf.append(self.attributes(param_string))
def visitAttributes(self, attrs):
temp_attrs = []
for attr in attrs:
if (not self.useRuntime and not attr['name']=='class') or attr['static']: #
if temp_attrs:
self.visitDynamicAttributes(temp_attrs)
temp_attrs = []
n, v = attr['name'], attr['val']
if isinstance(v, six.string_types):
if self.useRuntime or attr['static']:
self.buf.append(' %s=%s' % (n, v))
else:
self.buf.append(' %s="%s"' % (n, self.visitVar(v)))
elif v is True:
if self.terse:
self.buf.append(' %s' % (n,))
else:
self.buf.append(' %s="%s"' % (n, n))
else:
temp_attrs.append(attr)
if temp_attrs: self.visitDynamicAttributes(temp_attrs)
@classmethod
def register_filter(cls, name, f):
cls.filters[name] = f
@classmethod
def register_autoclosecode(cls, name):
cls.autocloseCode.append(name)
#1-
|
|
import os
import json
import pyodbc
import psycopg2
import psycopg2.extras
from psycopg2.pool import ThreadedConnectionPool
import datetime
from concurrent.futures import ThreadPoolExecutor, wait
import multiprocessing
import sys
import hashlib
from utils import *
THREADNUM = 16
class IntWriter:
def __init__(self, target):
self.inttype = target['type'] #database, file, etc.
self.intconnstr = target['connstr'] #connection string: Server,Port,db/filename
self.mdmconnstr = 'Driver={ODBC Driver 13 for SQL Server}; Server=localhost; Database=MDM_PROD; UID=int_etl; PWD=ugpassword;'
self.mdmquery = 'SELECT [ID],[UID] FROM [MDM_PROD].[MODEL].[OBJECTS] where SystemID = ? and deletiondate is null'
self.goldenquery = 'SELECT [XID] as [ID],[UniqueObjectID] as [GoldenID] FROM [MDM_PROD].[MODEL].[mv_xref] where SystemID = ? and [UniqueObjectID] is not null'
self.mdmssys = target['ssys'] #source system code for UID lookup in MDM
self.intencoding = target['encoding'] #append method description (api, rest, query, etc.)
self.intname = target['name'] #name of table or procedure or whatever else
self.lookupcolumns = target['lookupcolumns']
self.pool = None
self.conn = None
self.curr = None
self.wcounter = 0
self.stream = []
self.intheader = target['header']
self.lookup_table = dict()
self.golden_table = dict()
self.ods_to_dwh_table = set()
self.cache_dict = dict()
self.convtime = datetime.timedelta()
self.connect()
self.active = True
self.executor = ThreadPoolExecutor(max_workers=THREADNUM)
self.futures = []
def golden_tracker(self):
cursor = pyodbc.connect(self.mdmconnstr).execute(self.goldenquery, (self.mdmssys,))
for row in cursor:
self.golden_table[row[0]] = row[1]
logging.info(len(self.golden_table), 'golden IDs are mapped to datasource. Memory used: ', sys.getsizeof(self.golden_table))
def ods_to_dwh_tracker(self):
cursor = pyodbc.connect(self.intconnstr).execute('select odsid from ' + self.intname)
self.ods_to_dwh_table.update([row[0] for row in cursor])
logging.info(len(self.ods_to_dwh_table), 'records already in Staging area. Memory used: ', sys.getsizeof(self.ods_to_dwh_table))
def change_tracker(self, dtype):
query = "select ddochash, dcontenthash from public.v_fact where dtype = %s"
db = psycopg2.connect(self.intconnstr)
cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query, (dtype,))
for row in cursor.fetchall():
self.cache_dict[row['ddochash'].tobytes()] = row['dcontenthash']
def connect(self):
t = datetime.datetime.today()
# TODO: move to a separate function to make program independant of MDM system
cursor = pyodbc.connect(self.mdmconnstr).execute(self.mdmquery, (self.mdmssys,))
columns = [column[0] for column in cursor.description]
for row in cursor.fetchall():
self.lookup_table[row[1]] = row[0]
# print(self.lookup_table)
self.golden_tracker()
if self.inttype == 'odsf1':
self.pool = ThreadedConnectionPool(1, THREADNUM + 1, self.intconnstr)
if self.inttype == 'staf1':
self.ods_to_dwh_tracker()
if self.intname == 'KS2': # TODO: add proper lookup of possible systems or some other logic when to look for changes (may be target system)
self.change_tracker(self.intname)
logging.info('Cache initialization took ' + str(datetime.datetime.today() - t))
return
def clear(self):
self.stream.clear()
return
def written(self):
print(self.wcounter)
return self.wcounter
def __len__(self):
return len(self.stream)
def append(self, data):
st = datetime.datetime.now()
BATCH_SIZE = 1
if self.inttype == 'apij1':
BATCH_SIZE = 1000
objectkeys = ['ExtractionDate','Migration','ActionID','SystemID','EntityID','UID','ParentUID','Verified','UXID','ValidFrom','ValidTo']
obj = {}
if 'PeriodObjects' in data:
obj['ExtractionDate'] = data['ExtractionDate']
obj['Migration'] = data['Migration']
obj['ActionID'] = data['ActionID']
obj['SystemID'] = data['SystemID']
obj['EntityID'] = data['EntityID']
obj['UID'] = data['UID']
obj['ParentUID'] = data['ParentUID']
obj['Verified'] = data['Verified']
obj['UXID'] = data['UXID']
obj['PeriodObjects'] = data['PeriodObjects']
else:
obj['PeriodObjects'] = []
obj['PeriodObjects'].append({'Attributes': []})
if 'ValidFrom' in data:
obj['PeriodObjects'][0]['ValidFrom'] = data['ValidFrom']
if 'ValidTo' in data:
obj['PeriodObjects'][0]['ValidTo'] = data['ValidTo']
for key in data.keys():
if key not in objectkeys:
if data[key] in self.lookup_table:
data[key] = self.lookup_table[data[key]]
obj['PeriodObjects'][0]['Attributes'].append({'Name': key, 'Value': str(data[key]).replace('00000000-0000-0000-0000-000000000000', '#NULL')})
else:
obj[key] = str(data[key]).replace('00000000-0000-0000-0000-000000000000', '#NULL')
obj['ActionID'] = 3 # Force-set action as "integration"
elif self.inttype == 'odsf1':
objectkeys = ['DataType','SystemID','ActionID','ExtractionDate','DocumentUID','Ln','inttimestamp']
obj = dict()
obj['dtimestamp'] = data['inttimestamp']
obj['dextractiondate'] = data['ExtractionDate']
obj['dtype'] = data['DataType']
obj['dsystem'] = data['SystemID']
obj['ddocuid'] = data['DocumentUID']
obj['ddocln'] = data['Ln']
obj['ddochash'] = hashlib.md5((str(obj['ddocuid']) + str(obj['ddocln'])).encode('utf-8')).digest()
# filter elements where GUID lookup failed --- NO IMPORT before GUIDs are in MDM
errs = [(k,v) for (k,v) in data.items() if k in self.lookupcolumns and v not in self.lookup_table and v != '00000000-0000-0000-0000-000000000000']
if len(errs) > 0:
logging.warning('Failed to convert GUID for %s', str(errs))
self.convtime += datetime.datetime.now() - st
return 0
obj['dcontent'] = json.dumps({k:self.lookup_table[v] if v in self.lookup_table else v.replace('00000000-0000-0000-0000-000000000000', '#NULL')
for (k,v) in data.items() if k not in objectkeys}, sort_keys=True)
obj['dcontenthash'] = hashlib.md5(obj['dcontent'].encode('utf-8')).digest()
obj['delta'] = False
if obj['ddochash'] in self.cache_dict:
# This line has been already posted so we need to check if the last available record is actual
# flag line as delta
obj['delta'] = True
if self.cache_dict[obj['ddochash']].tobytes() == obj['dcontenthash']:
# Can update some field here with a timestamp to guaranteee that data is actual
self.convtime += datetime.datetime.now() - st
return 0
# Earlier version exists so we have to create a new record for this version
elif self.inttype == 'staf1':
obj = data.copy()
if obj['odsid'] in self.ods_to_dwh_table:
self.convtime += datetime.datetime.now() - st
return 0
# TODO: this list of fields should be another field in sources table
golden_entities = ['ProjectUID', 'ConstrObjectUID']
for key in golden_entities:
if obj[key] not in self.golden_table:
logging.warning('Failed to find golden ID for record %s %s', str(obj[key]), str(key))
self.convtime += datetime.datetime.now() - st
return 0
obj[key] = self.golden_table[obj[key]]
# treat records which dont need to have golden values - pass nulls to fit into sql requirements
for key in obj:
if obj[key] == '#NULL':
obj[key] = None
self.convtime += datetime.datetime.now() - st
self.stream.append(obj)
if len(self.stream) == BATCH_SIZE:
self.futures.append(self.executor.submit(self.commitp, {'ContextRef': '', 'Objects': self.stream.copy()}))
self.clear()
return 1
def close(self):
if len(self.stream) > 0:
self.futures.append(self.executor.submit(self.commitp, {'ContextRef': '', 'Objects': self.stream.copy()}))
self.clear()
wait(self.futures)
self.wcounter = sum([f.result() for f in self.futures])
self.executor.shutdown(wait=True)
if self.inttype == 'odsf1':
safeexecute_pgsql(self.pool, 'refresh materialized view mv_fact_lastv', None, self.intconnstr)
self.pool.closeall()
print(self.convtime)
self.active = False
def commitp(self, params=None):
t = datetime.datetime.today()
count = 0
if self.inttype == 'apij1':
if params:
w = params
db = pyodbc.connect(self.intconnstr)
cursor = db.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL SNAPSHOT')
cursor.commit()
query = 'DECLARE @ret int' \
' EXEC @ret = ' + self.intname + ' ?, NULL' \
' SELECT @ret'
try:
count = cursor.execute(query, [str(json.dumps(w)),]).fetchone()[0]
cursor.commit()
except:
logging.error("Unexpected SQL server error, rolling back:", sys.exc_info())
logging.error("With object:", w)
cursor.rollback()
elif self.inttype == 'odsf1':
if params and 'Objects' in params:
w = params['Objects']
conn = self.pool.getconn()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
for obj in w:
query = 'INSERT INTO public.fact(dtype, dsystem, ddocuid, ddocln, ddochash, dcontenthash, dcontent, dtimestamp, dextractiondate, delta)' \
' VALUES (%(dtype)s, %(dsystem)s, %(ddocuid)s, %(ddocln)s, %(ddochash)s, %(dcontenthash)s, %(dcontent)s, %(dtimestamp)s, ' \
'%(dextractiondate)s, %(delta)s)'
try:
cur.execute(query, dict(obj))
conn.commit()
count += 1
except:
logging.error("Unexpected PostgreSQL server error, rolling back:", sys.exc_info())
logging.error("With object:", obj)
conn.rollback()
self.pool.putconn(conn)
elif self.inttype == 'staf1':
# TODO: CHECK
if params:
w = params['Objects']
db = pyodbc.connect(self.intconnstr)
cursor = db.cursor()
query = 'INSERT INTO ' + self.intname + '(' + ','.join(self.intheader) + ') VALUES(' + ','.join(['?' for _ in self.intheader]) + ')'
for obj in w:
try:
cursor.execute(query, tuple([obj[key] for key in self.intheader]))
cursor.commit()
count += 1
except:
logging.error("Unexpected SQL server error, rolling back:", sys.exc_info())
logging.error("With query:", query)
logging.error("With object:", obj)
cursor.rollback()
print('Commit took ' + str(datetime.datetime.today() - t))
return count
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import six
from girder.exceptions import ValidationException
from girder.models.folder import Folder
from girder.models.setting import Setting
from girder.models.user import User
from tests import base
from server.constants import PluginSettings
def setUpModule():
base.enabledPlugins.append('item_licenses')
base.startServer()
def tearDownModule():
base.stopServer()
class ItemLicensesTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# Create a user
user = {
'email': 'user1@email.com',
'login': 'user1login',
'firstName': 'First',
'lastName': 'Last',
'password': 'user1password',
'admin': False
}
self.user = User().createUser(**user)
# Get user's private folder
folders = Folder().childFolders(self.user, 'user', user=self.user)
for folder in folders:
if folder['name'] == 'Private':
self.folder = folder
break
def testItemCreateInvalid(self):
"""
Test creating items with invalid licenses.
"""
# Create item with a null name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': None
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
# Create item with an invalid license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': 'Unsupported license'
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
# Create item with a valid license name with extra whitespace
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': ' The MIT License (MIT) '
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
def testItemCreateAndUpdate(self):
"""
Test creating, reading, and updating an item, especially with regards to
its license field.
"""
# Create item without specifying a license
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Create item with a blank license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': ''
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Update item license
params = {
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Update item license to be unspecified
params = {
'license': ''
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Create item with a valid license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': 'The MIT License (MIT)'
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'The MIT License (MIT)')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'The MIT License (MIT)')
# Update item
params = {
'name': 'changed name',
'description': 'new description',
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Update item with the same license name
params = {
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
def testItemCopy(self):
"""
Test copying an item, especially with regards to its license field.
"""
params = {
'name': 'original item',
'description': 'original description',
'license': 'The MIT License (MIT)',
'folderId': self.folder['_id']
}
# Create item
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
origItemId = resp.json['_id']
# Copy to a new item with different name and license.
params = {
'name': 'new item',
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s/copy' % origItemId,
method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
def testGetLicenses(self):
"""
Test getting list of licenses.
"""
# Get default settings
resp = self.request(path='/item/licenses', user=self.user, params={
'default': True
})
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Get current settings
resp = self.request(path='/item/licenses', user=self.user)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Change licenses
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}]},
{'category': 'B', 'licenses': [{'name': '2'}, {'name': '3'}]}])
# Get default settings after changing licenses
resp = self.request(path='/item/licenses', user=self.user, params={
'default': True
})
self.assertStatusOk(resp)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Get current settings after changing licenses
resp = self.request(path='/item/licenses', user=self.user)
self.assertStatusOk(resp)
six.assertCountEqual(
self, resp.json,
[{'category': 'A', 'licenses': [{'name': '1'}]},
{'category': 'B', 'licenses': [{'name': '2'}, {'name': '3'}]}])
def testLicensesSettingValidation(self):
"""
Test validation of licenses setting.
"""
# Test valid settings
Setting().set(
PluginSettings.LICENSES,
[])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}, {'name': '2'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []},
{'category': 'B', 'licenses': [{'name': '1'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []},
{'category': 'B', 'licenses': [{'name': '1'}, {'name': '2'}]}])
# Test invalid top-level types
for val in (None, 1, '', {}, [{}]):
self.assertRaises(ValidationException, Setting().set, PluginSettings.LICENSES, val)
# Test invalid category types
for category, licenses in ((None, []), (1, []), ('', []), ({}, [])):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': category, 'licenses': licenses}])
# Test invalid licenses types
for val in (None, {}, [1], ['']):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': val}])
# Test invalid license names
for val in (None, 1, '', {}, []):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': val}]}])
|
|
"""Provides global signal dispatching services."""
__author__ = "Patrick K. O'Brien <pobrien@orbtech.com>"
__cvsid__ = "$Id: dispatcher.py 39667 2006-06-11 00:13:05Z RD $"
__revision__ = "$Revision: 39667 $"[11:-2]
import exceptions
import types
import weakref
class DispatcherError(exceptions.Exception):
def __init__(self, args=None):
self.args = args
class Parameter:
"""Used to represent default parameter values."""
def __repr__(self):
return self.__class__.__name__
class Any(Parameter): pass
Any = Any()
class Anonymous(Parameter): pass
Anonymous = Anonymous()
connections = {}
senders = {}
_boundMethods = weakref.WeakKeyDictionary()
def connect(receiver, signal=Any, sender=Any, weak=True):
"""
Connect receiver to sender for signal.
* If sender is Any, receiver will receive signal from any sender.
* If signal is Any, receiver will receive any signal from sender.
* If sender is None, receiver will receive signal from Anonymous.
* If signal is Any and sender is None, receiver will receive any
signal from Anonymous.
* If signal is Any and sender is Any, receiver will receive any
signal from any sender.
* If weak is true, weak references will be used.
"""
if signal is None:
raise DispatcherError, 'signal cannot be None'
if weak:
receiver = safeRef(receiver)
senderkey = id(sender)
signals = {}
if connections.has_key(senderkey):
signals = connections[senderkey]
else:
connections[senderkey] = signals
# Keep track of senders for cleanup.
if sender not in (None, Any):
def remove(object, senderkey=senderkey):
_removeSender(senderkey=senderkey)
# Skip objects that can not be weakly referenced, which means
# they won't be automatically cleaned up, but that's too bad.
try:
weakSender = weakref.ref(sender, remove)
senders[senderkey] = weakSender
except:
pass
receivers = []
if signals.has_key(signal):
receivers = signals[signal]
else:
signals[signal] = receivers
try:
receivers.remove(receiver)
except ValueError:
pass
receivers.append(receiver)
def disconnect(receiver, signal=Any, sender=Any, weak=True):
"""Disconnect receiver from sender for signal.
Disconnecting is not required. The use of disconnect is the same as for
connect, only in reverse. Think of it as undoing a previous connection."""
if signal is None:
raise DispatcherError, 'signal cannot be None'
if weak:
receiver = safeRef(receiver)
senderkey = id(sender)
try:
receivers = connections[senderkey][signal]
except KeyError:
raise DispatcherError, \
'No receivers for signal %r from sender %s' % (signal, sender)
try:
receivers.remove(receiver)
except ValueError:
raise DispatcherError, \
'No connection to receiver %s for signal %r from sender %s' % \
(receiver, signal, sender)
_cleanupConnections(senderkey, signal)
def send(signal, sender=Anonymous, **kwds):
"""Send signal from sender to all connected receivers.
Return a list of tuple pairs [(receiver, response), ... ].
If sender is not specified, signal is sent anonymously."""
senderkey = id(sender)
anykey = id(Any)
# Get receivers that receive *this* signal from *this* sender.
receivers = []
try:
receivers.extend(connections[senderkey][signal])
except KeyError:
pass
# Add receivers that receive *any* signal from *this* sender.
anyreceivers = []
try:
anyreceivers = connections[senderkey][Any]
except KeyError:
pass
for receiver in anyreceivers:
if receivers.count(receiver) == 0:
receivers.append(receiver)
# Add receivers that receive *this* signal from *any* sender.
anyreceivers = []
try:
anyreceivers = connections[anykey][signal]
except KeyError:
pass
for receiver in anyreceivers:
if receivers.count(receiver) == 0:
receivers.append(receiver)
# Add receivers that receive *any* signal from *any* sender.
anyreceivers = []
try:
anyreceivers = connections[anykey][Any]
except KeyError:
pass
for receiver in anyreceivers:
if receivers.count(receiver) == 0:
receivers.append(receiver)
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in receivers:
if type(receiver) is weakref.ReferenceType \
or isinstance(receiver, BoundMethodWeakref):
# Dereference the weak reference.
receiver = receiver()
if receiver is None:
# This receiver is dead, so skip it.
continue
response = _call(receiver, signal=signal, sender=sender, **kwds)
responses += [(receiver, response)]
return responses
def _call(receiver, **kwds):
"""Call receiver with only arguments it can accept."""
## if type(receiver) is types.InstanceType:
if hasattr(receiver, '__call__') and \
(hasattr(receiver.__call__, 'im_func') or hasattr(receiver.__call__, 'im_code')):
# receiver is a class instance; assume it is callable.
# Reassign receiver to the actual method that will be called.
receiver = receiver.__call__
if hasattr(receiver, 'im_func'):
# receiver is a method. Drop the first argument, usually 'self'.
fc = receiver.im_func.func_code
acceptable = fc.co_varnames[1:fc.co_argcount]
elif hasattr(receiver, 'func_code'):
# receiver is a function.
fc = receiver.func_code
acceptable = fc.co_varnames[0:fc.co_argcount]
else:
raise DispatcherError, 'Unknown receiver %s of type %s' % (receiver, type(receiver))
if not (fc.co_flags & 8):
# fc does not have a **kwds type parameter, therefore
# remove unacceptable arguments.
for arg in kwds.keys():
if arg not in acceptable:
del kwds[arg]
return receiver(**kwds)
def safeRef(object):
"""Return a *safe* weak reference to a callable object."""
if hasattr(object, 'im_self'):
if object.im_self is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
selfkey = object.im_self
funckey = object.im_func
if not _boundMethods.has_key(selfkey):
_boundMethods[selfkey] = weakref.WeakKeyDictionary()
if not _boundMethods[selfkey].has_key(funckey):
_boundMethods[selfkey][funckey] = \
BoundMethodWeakref(boundMethod=object)
return _boundMethods[selfkey][funckey]
return weakref.ref(object, _removeReceiver)
class BoundMethodWeakref:
"""BoundMethodWeakref class."""
def __init__(self, boundMethod):
"""Return a weak-reference-like instance for a bound method."""
self.isDead = 0
def remove(object, self=self):
"""Set self.isDead to true when method or instance is destroyed."""
self.isDead = 1
_removeReceiver(receiver=self)
self.weakSelf = weakref.ref(boundMethod.im_self, remove)
self.weakFunc = weakref.ref(boundMethod.im_func, remove)
def __repr__(self):
"""Return the closest representation."""
return '<bound method weakref for %s.%s>' % (self.weakSelf, self.weakFunc)
def __call__(self):
"""Return a strong reference to the bound method."""
if self.isDead:
return None
else:
object = self.weakSelf()
method = self.weakFunc().__name__
try: # wxPython hack to handle wxDead objects.
return getattr(object, method)
except AttributeError:
## _removeReceiver(receiver=self)
return None
def _removeReceiver(receiver):
"""Remove receiver from connections."""
for senderkey in connections.keys():
for signal in connections[senderkey].keys():
receivers = connections[senderkey][signal]
try:
receivers.remove(receiver)
except:
pass
_cleanupConnections(senderkey, signal)
def _cleanupConnections(senderkey, signal):
"""Delete any empty signals for senderkey. Delete senderkey if empty."""
receivers = connections[senderkey][signal]
if not receivers:
# No more connected receivers. Therefore, remove the signal.
signals = connections[senderkey]
del signals[signal]
if not signals:
# No more signal connections. Therefore, remove the sender.
_removeSender(senderkey)
def _removeSender(senderkey):
"""Remove senderkey from connections."""
del connections[senderkey]
# Senderkey will only be in senders dictionary if sender
# could be weakly referenced.
try:
del senders[senderkey]
except:
pass
|
|
from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Polynomials #
#----------------------------------------------------------------------------#
# XXX: extra precision
@defun
def polyval(ctx, coeffs, x, derivative=False):
r"""
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
:func:`~mpmath.polyval` evaluates the polynomial
.. math ::
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
tuple `(P(x), P'(x))`.
>>> from mpmath import *
>>> mp.pretty = True
>>> polyval([3, 0, 2], 0.5)
2.75
>>> polyval([3, 0, 2], 0.5, derivative=True)
(2.75, 3.0)
The coefficients and the evaluation point may be any combination
of real or complex numbers.
"""
if not coeffs:
return ctx.zero
p = ctx.convert(coeffs[0])
q = ctx.zero
for c in coeffs[1:]:
if derivative:
q = p + x*q
p = c + x*p
if derivative:
return p, q
else:
return p
@defun
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
error=False, roots_init=None):
"""
Computes all roots (real or complex) of a given polynomial.
The roots are returned as a sorted list, where real roots appear first
followed by complex conjugate roots as adjacent elements. The polynomial
should be given as a list of coefficients, in the format used by
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
where *err* is an estimate of the maximum error among the computed roots.
**Examples**
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(polyroots([1,-1,-14,24]), 4)
[-4.0, 2.0, 3.0]
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
error estimate::
>>> roots, err = polyroots([4,3,2], error=True)
>>> for r in roots:
... print(r)
...
(-0.375 + 0.59947894041409j)
(-0.375 - 0.59947894041409j)
>>>
>>> err
2.22044604925031e-16
>>>
>>> polyval([4,3,2], roots[0])
(2.22044604925031e-16 + 0.0j)
>>> polyval([4,3,2], roots[1])
(2.22044604925031e-16 + 0.0j)
The following example computes all the 5th roots of unity; that is,
the roots of `x^5 - 1`::
>>> mp.dps = 20
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
... print(r)
...
1.0
(-0.8090169943749474241 + 0.58778525229247312917j)
(-0.8090169943749474241 - 0.58778525229247312917j)
(0.3090169943749474241 + 0.95105651629515357212j)
(0.3090169943749474241 - 0.95105651629515357212j)
**Precision and conditioning**
The roots are computed to the current working precision accuracy. If this
accuracy cannot be achieved in ``maxsteps`` steps, then a
``NoConvergence`` exception is raised. The algorithm internally is using
the current working precision extended by ``extraprec``. If
``NoConvergence`` was raised, that is caused either by not having enough
extra precision to achieve convergence (in which case increasing
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
case increasing ``maxsteps`` should fix the problem), or a combination of
both.
The user should always do a convergence study with regards to
``extraprec`` to ensure accurate results. It is possible to get
convergence to a wrong answer with too low ``extraprec``.
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
typically compute all roots of an arbitrary polynomial to high precision::
>>> mp.dps = 60
>>> for r in polyroots([1, 0, -10, 0, 1]):
... print(r)
...
-3.14626436994197234232913506571557044551247712918732870123249
-0.317837245195782244725757617296174288373133378433432554879127
0.317837245195782244725757617296174288373133378433432554879127
3.14626436994197234232913506571557044551247712918732870123249
>>>
>>> sqrt(3) + sqrt(2)
3.14626436994197234232913506571557044551247712918732870123249
>>> sqrt(3) - sqrt(2)
0.317837245195782244725757617296174288373133378433432554879127
**Algorithm**
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
uses complex arithmetic to locate all roots simultaneously.
The Durand-Kerner method can be viewed as approximately performing
simultaneous Newton iteration for all the roots. In particular,
the convergence to simple roots is quadratic, just like Newton's
method.
Although all roots are internally calculated using complex arithmetic, any
root found to have an imaginary part smaller than the estimated numerical
error is truncated to a real number (small real parts are also chopped).
Real roots are placed first in the returned list, sorted by value. The
remaining complex roots are sorted by their real parts so that conjugate
roots end up next to each other.
**References**
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
"""
if len(coeffs) <= 1:
if not coeffs or not coeffs[0]:
raise ValueError("Input to polyroots must not be the zero polynomial")
# Constant polynomial with no roots
return []
orig = ctx.prec
tol = +ctx.eps
with ctx.extraprec(extraprec):
deg = len(coeffs) - 1
# Must be monic
lead = ctx.convert(coeffs[0])
if lead == 1:
coeffs = [ctx.convert(c) for c in coeffs]
else:
coeffs = [c/lead for c in coeffs]
f = lambda x: ctx.polyval(coeffs, x)
if roots_init is None:
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
else:
roots = [None]*deg;
deg_init = min(deg, len(roots_init))
roots[:deg_init] = list(roots_init[:deg_init])
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
in xrange(deg_init,deg)]
err = [ctx.one for n in xrange(deg)]
# Durand-Kerner iteration until convergence
for step in xrange(maxsteps):
if abs(max(err)) < tol:
break
for i in xrange(deg):
p = roots[i]
x = f(p)
for j in range(deg):
if i != j:
try:
x /= (p-roots[j])
except ZeroDivisionError:
continue
roots[i] = p - x
err[i] = abs(x)
if abs(max(err)) >= tol:
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
% maxsteps)
# Remove small real or imaginary parts
if cleanup:
for i in xrange(deg):
if abs(roots[i]) < tol:
roots[i] = ctx.zero
elif abs(ctx._im(roots[i])) < tol:
roots[i] = roots[i].real
elif abs(ctx._re(roots[i])) < tol:
roots[i] = roots[i].imag * 1j
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
if error:
err = max(err)
err = max(err, ctx.ldexp(1, -orig+1))
return [+r for r in roots], +err
else:
return [+r for r in roots]
|
|
import json
import os.path
import random
import re
import uuid
import zipfile
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
from django.forms import ValidationError
from django.utils.translation import ugettext
import six
import waffle
from django_statsd.clients import statsd
from olympia import amo
from olympia.amo.utils import normalize_string, to_language
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.discovery.utils import call_recommendation_server
from olympia.lib.cache import memoize, memoize_key
from olympia.translations.fields import LocaleErrorMessage
def generate_addon_guid():
return '{%s}' % str(uuid.uuid4())
def clear_get_featured_ids_cache(*args, **kwargs):
cache_key = memoize_key('addons:featured', *args, **kwargs)
cache.delete(cache_key)
@memoize('addons:featured', timeout=60 * 10)
def get_featured_ids(app=None, lang=None, type=None, types=None):
from olympia.addons.models import Addon
ids = []
is_featured = Q(collections__featuredcollection__isnull=False)
if app:
is_featured &= Q(collections__featuredcollection__application=app.id)
qs = Addon.objects.valid()
if type:
qs = qs.filter(type=type)
elif types:
qs = qs.filter(type__in=types)
if lang:
has_locale = qs.filter(
is_featured &
Q(collections__featuredcollection__locale__iexact=lang))
if has_locale.exists():
ids += list(has_locale.distinct().values_list('id', flat=True))
none_qs = qs.filter(
is_featured &
Q(collections__featuredcollection__locale__isnull=True))
blank_qs = qs.filter(is_featured &
Q(collections__featuredcollection__locale=''))
qs = none_qs | blank_qs
else:
qs = qs.filter(is_featured)
other_ids = list(qs.distinct().values_list('id', flat=True))
random.shuffle(ids)
random.shuffle(other_ids)
ids += other_ids
return list(map(int, ids))
@memoize('addons:creatured', timeout=60 * 10)
def get_creatured_ids(category, lang=None):
from olympia.addons.models import Addon
from olympia.bandwagon.models import FeaturedCollection
if lang:
lang = lang.lower()
per_locale = set()
if isinstance(category, int):
category = CATEGORIES_BY_ID[category]
app_id = category.application
others = (Addon.objects.public()
.filter(
Q(collections__featuredcollection__locale__isnull=True) |
Q(collections__featuredcollection__locale=''),
collections__featuredcollection__isnull=False,
collections__featuredcollection__application=app_id,
category=category.id)
.distinct()
.values_list('id', flat=True))
if lang is not None and lang != '':
possible_lang_match = FeaturedCollection.objects.filter(
locale__icontains=lang,
application=app_id,
collection__addons__category=category.id).distinct()
for fc in possible_lang_match:
if lang in fc.locale.lower().split(','):
per_locale.update(
fc.collection.addons
.filter(category=category.id)
.values_list('id', flat=True))
others = list(others)
per_locale = list(per_locale)
random.shuffle(others)
random.shuffle(per_locale)
return list(map(int, filter(None, per_locale + others)))
def verify_mozilla_trademark(name, user, form=None):
skip_trademark_check = (
user and user.is_authenticated and user.email and
user.email.endswith(amo.ALLOWED_TRADEMARK_SUBMITTING_EMAILS))
def _check(name):
name = normalize_string(name, strip_punctuation=True).lower()
for symbol in amo.MOZILLA_TRADEMARK_SYMBOLS:
if waffle.switch_is_active('content-optimization'):
violates_trademark = symbol in name
else:
violates_trademark = (
name.count(symbol) > 1 or (
name.count(symbol) >= 1 and not
name.endswith(' for {}'.format(symbol))))
if violates_trademark:
raise forms.ValidationError(ugettext(
u'Add-on names cannot contain the Mozilla or '
u'Firefox trademarks.'))
if not skip_trademark_check:
if not isinstance(name, dict):
_check(name)
else:
for locale, localized_name in name.items():
try:
_check(localized_name)
except forms.ValidationError as exc:
if form is not None:
for message in exc.messages:
error_message = LocaleErrorMessage(
message=message, locale=locale)
form.add_error('name', error_message)
else:
raise
return name
TAAR_LITE_FALLBACKS = [
'enhancerforyoutube@maximerf.addons.mozilla.org', # /enhancer-for-youtube/
'{2e5ff8c8-32fe-46d0-9fc8-6b8986621f3c}', # /search_by_image/
'uBlock0@raymondhill.net', # /ublock-origin/
'newtaboverride@agenedia.com'] # /new-tab-override/
TAAR_LITE_OUTCOME_REAL_SUCCESS = 'recommended'
TAAR_LITE_OUTCOME_REAL_FAIL = 'recommended_fallback'
TAAR_LITE_OUTCOME_CURATED = 'curated'
TAAR_LITE_FALLBACK_REASON_TIMEOUT = 'timeout'
TAAR_LITE_FALLBACK_REASON_EMPTY = 'no_results'
TAAR_LITE_FALLBACK_REASON_INVALID = 'invalid_results'
def get_addon_recommendations(guid_param, taar_enable):
guids = None
fail_reason = None
if taar_enable:
guids = call_recommendation_server(
settings.TAAR_LITE_RECOMMENDATION_ENGINE_URL, guid_param, {})
outcome = (TAAR_LITE_OUTCOME_REAL_SUCCESS if guids
else TAAR_LITE_OUTCOME_REAL_FAIL)
if not guids:
fail_reason = (TAAR_LITE_FALLBACK_REASON_EMPTY if guids == []
else TAAR_LITE_FALLBACK_REASON_TIMEOUT)
else:
outcome = TAAR_LITE_OUTCOME_CURATED
if not guids:
guids = TAAR_LITE_FALLBACKS
return guids, outcome, fail_reason
def is_outcome_recommended(outcome):
return outcome == TAAR_LITE_OUTCOME_REAL_SUCCESS
def get_addon_recommendations_invalid():
return (
TAAR_LITE_FALLBACKS, TAAR_LITE_OUTCOME_REAL_FAIL,
TAAR_LITE_FALLBACK_REASON_INVALID)
MULTIPLE_STOPS_REGEX = re.compile(r'\.{2,}')
@statsd.timer('addons.tasks.migrate_lwts_to_static_theme.build_xpi')
def build_static_theme_xpi_from_lwt(lwt, upload_zip):
# create manifest
accentcolor = (('#%s' % lwt.persona.accentcolor) if lwt.persona.accentcolor
else amo.THEME_FRAME_COLOR_DEFAULT)
textcolor = '#%s' % (lwt.persona.textcolor or '000')
lwt_header = MULTIPLE_STOPS_REGEX.sub(
u'.', six.text_type(lwt.persona.header))
manifest = {
"manifest_version": 2,
"name": six.text_type(lwt.name) or six.text_type(lwt.slug),
"version": '1.0',
"theme": {
"images": {
"theme_frame": lwt_header
},
"colors": {
"frame": accentcolor,
"tab_background_text": textcolor
}
}
}
if lwt.description:
manifest['description'] = six.text_type(lwt.description)
# build zip with manifest and background file
with zipfile.ZipFile(upload_zip, 'w', zipfile.ZIP_DEFLATED) as dest:
dest.writestr('manifest.json', json.dumps(manifest))
dest.write(lwt.persona.header_path, arcname=lwt_header)
def build_webext_dictionary_from_legacy(addon, destination):
"""Create a webext package of a legacy dictionary `addon`, and put it in
`destination` path."""
from olympia.files.utils import SafeZip # Avoid circular import.
old_path = addon.current_version.all_files[0].file_path
old_zip = SafeZip(old_path)
if not old_zip.is_valid:
raise ValidationError('Current dictionary xpi is not valid')
dictionary_path = ''
with zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED) as new_zip:
for obj in old_zip.filelist:
splitted = obj.filename.split('/')
# Ignore useless directories and files.
if splitted[0] in ('META-INF', '__MACOSX', 'chrome',
'chrome.manifest', 'install.rdf'):
continue
# Also ignore javascript (regardless of where they are, not just at
# the root), since dictionaries should not contain any code.
if splitted[-1].endswith('.js'):
continue
# Store the path of the last .dic file we find. It can be inside a
# directory.
if (splitted[-1].endswith('.dic')):
dictionary_path = obj.filename
new_zip.writestr(obj.filename, old_zip.read(obj.filename))
# Now that all files we want from the old zip are copied, build and
# add manifest.json.
if not dictionary_path:
# This should not happen... It likely means it's an invalid
# dictionary to begin with, or one that has its .dic file in a
# chrome/ directory for some reason. Abort!
raise ValidationError('Current dictionary xpi has no .dic file')
if addon.target_locale:
target_language = addon.target_locale
else:
# Guess target_locale since we don't have one already. Note that
# for extra confusion, target_locale is a language, not a locale.
target_language = to_language(os.path.splitext(
os.path.basename(dictionary_path))[0])
if target_language not in settings.AMO_LANGUAGES:
# We couldn't find that language in the list we support. Let's
# try with just the prefix.
target_language = target_language.split('-')[0]
if target_language not in settings.AMO_LANGUAGES:
# We tried our best.
raise ValidationError(u'Addon has no target_locale and we'
u' could not guess one from the xpi')
# Dumb version number increment. This will be invalid in some cases,
# but some of the dictionaries we have currently already have wild
# version numbers anyway.
version_number = addon.current_version.version
if version_number.endswith('.1-typefix'):
version_number = version_number.replace('.1-typefix', '.2webext')
else:
version_number = '%s.1webext' % version_number
manifest = {
'manifest_version': 2,
'name': six.text_type(addon.name),
'browser_specific_settings': {
'gecko': {
'id': addon.guid,
},
},
'version': version_number,
'dictionaries': {target_language: dictionary_path},
}
# Write manifest.json we just build.
new_zip.writestr('manifest.json', json.dumps(manifest))
|
|
'''
Builder
======
Class used for the registering and application of rules for specific widgets.
'''
import codecs
import sys
import types
from os import environ
from os.path import join
from copy import copy
from types import CodeType
from functools import partial
from kivy.factory import Factory
from kivy.lang.parser import Parser, ParserException, _handlers, global_idmap,\
ParserRuleProperty
from kivy.logger import Logger
from kivy.utils import QueryDict
from kivy.cache import Cache
from kivy import kivy_data_dir
from kivy.compat import PY2, iteritems, iterkeys
from kivy.context import register_context
from kivy.resources import resource_find
from kivy._event import Observable, EventDispatcher
__all__ = ('Observable', 'Builder', 'BuilderBase', 'BuilderException')
trace = Logger.trace
# class types to check with isinstance
if PY2:
_cls_type = (type, types.ClassType)
else:
_cls_type = (type, )
# late import
Instruction = None
# delayed calls are canvas expression triggered during an loop. It is one
# directional linked list of args to call call_fn with. Each element is a list
# whos last element points to the next list of args to execute when
# Builder.sync is called.
_delayed_start = None
class BuilderException(ParserException):
'''Exception raised when the Builder failed to apply a rule on a widget.
'''
pass
def get_proxy(widget):
try:
return widget.proxy_ref
except AttributeError:
return widget
def custom_callback(__kvlang__, idmap, *largs, **kwargs):
idmap['args'] = largs
exec(__kvlang__.co_value, idmap)
def call_fn(args, instance, v):
element, key, value, rule, idmap = args
if __debug__ and not environ.get('KIVY_UNITTEST_NOBUILDERTRACE'):
trace('Lang: call_fn %s, key=%s, value=%r, %r' % (
element, key, value, rule.value))
rule.count += 1
e_value = eval(value, idmap)
if __debug__ and not environ.get('KIVY_UNITTEST_NOBUILDERTRACE'):
trace('Lang: call_fn => value=%r' % (e_value, ))
setattr(element, key, e_value)
def delayed_call_fn(args, instance, v):
# it's already on the list
if args[-1] is not None:
return
global _delayed_start
if _delayed_start is None:
_delayed_start = args
args[-1] = StopIteration
else:
args[-1] = _delayed_start
_delayed_start = args
def update_intermediates(base, keys, bound, s, fn, args, instance, value):
''' Function that is called when an intermediate property is updated
and `rebind` of that property is True. In that case, we unbind
all bound funcs that were bound to attrs of the old value of the
property and rebind to the new value of the property.
For example, if the rule is `self.a.b.c.d`, then when b is changed, we
unbind from `b`, `c` and `d`, if they were bound before (they were not
None and `rebind` of the respective properties was True) and we rebind
to the new values of the attrs `b`, `c``, `d` that are not None and
`rebind` is True.
:Parameters:
`base`
A (proxied) ref to the base widget, `self` in the example
above.
`keys`
A list of the name off the attrs of `base` being watched. In
the example above it'd be `['a', 'b', 'c', 'd']`.
`bound`
A list 4-tuples, each tuple being (widget, attr, callback, uid)
representing callback functions bound to the attributed `attr`
of `widget`. `uid` is returned by `fbind` when binding.
The callback may be None, in which case the attr
was not bound, but is there to be able to walk the attr tree.
E.g. in the example above, if `b` was not an eventdispatcher,
`(_b_ref_, `c`, None)` would be added to the list so we can get
to `c` and `d`, which may be eventdispatchers and their attrs.
`s`
The index in `keys` of the of the attr that needs to be
updated. That is all the keys from `s` and further will be
rebound, since the `s` key was changed. In bound, the
corresponding index is `s - 1`. If `s` is None, we start from
1 (first attr).
`fn`
The function to be called args, `args` on bound callback.
'''
# first remove all the old bound functions from `s` and down.
for f, k, fun, uid in bound[s:]:
if fun is None:
continue
try:
f.unbind_uid(k, uid)
except ReferenceError:
pass
del bound[s:]
# find the first attr from which we need to start rebinding.
f = getattr(*bound[-1][:2])
if f is None:
fn(args, None, None)
return
s += 1
append = bound.append
# bind all attrs, except last to update_intermediates
for val in keys[s:-1]:
# if we need to dynamically rebind, bindm otherwise just
# add the attr to the list
if isinstance(f, (EventDispatcher, Observable)):
prop = f.property(val, True)
if prop is not None and getattr(prop, 'rebind', False):
# fbind should not dispatch, otherwise
# update_intermediates might be called in the middle
# here messing things up
uid = f.fbind(
val, update_intermediates, base, keys, bound, s, fn, args)
append([f.proxy_ref, val, update_intermediates, uid])
else:
append([f.proxy_ref, val, None, None])
else:
append([getattr(f, 'proxy_ref', f), val, None, None])
f = getattr(f, val, None)
if f is None:
break
s += 1
# for the last attr we bind directly to the setting function,
# because that attr sets the value of the rule.
if isinstance(f, (EventDispatcher, Observable)):
uid = f.fbind(keys[-1], fn, args)
if uid:
append([f.proxy_ref, keys[-1], fn, uid])
# when we rebind we have to update the
# rule with the most recent value, otherwise, the value might be wrong
# and wouldn't be updated since we might not have tracked it before.
# This only happens for a callback when rebind was True for the prop.
fn(args, None, None)
def create_handler(iself, element, key, value, rule, idmap, delayed=False):
idmap = copy(idmap)
idmap.update(global_idmap)
idmap['self'] = iself.proxy_ref
bound_list = _handlers[iself.uid][key]
handler_append = bound_list.append
# we need a hash for when delayed, so we don't execute duplicate canvas
# callbacks from the same handler during a sync op
if delayed:
fn = delayed_call_fn
args = [element, key, value, rule, idmap, None] # see _delayed_start
else:
fn = call_fn
args = (element, key, value, rule, idmap)
# bind every key.value
if rule.watched_keys is not None:
for keys in rule.watched_keys:
base = idmap.get(keys[0])
if base is None:
continue
f = base = getattr(base, 'proxy_ref', base)
bound = []
was_bound = False
append = bound.append
# bind all attrs, except last to update_intermediates
k = 1
for val in keys[1:-1]:
# if we need to dynamically rebind, bindm otherwise
# just add the attr to the list
if isinstance(f, (EventDispatcher, Observable)):
prop = f.property(val, True)
if prop is not None and getattr(prop, 'rebind', False):
# fbind should not dispatch, otherwise
# update_intermediates might be called in the middle
# here messing things up
uid = f.fbind(
val, update_intermediates, base, keys, bound, k,
fn, args)
append([f.proxy_ref, val, update_intermediates, uid])
was_bound = True
else:
append([f.proxy_ref, val, None, None])
elif not isinstance(f, _cls_type):
append([getattr(f, 'proxy_ref', f), val, None, None])
else:
append([f, val, None, None])
f = getattr(f, val, None)
if f is None:
break
k += 1
# for the last attr we bind directly to the setting
# function, because that attr sets the value of the rule.
if isinstance(f, (EventDispatcher, Observable)):
uid = f.fbind(keys[-1], fn, args) # f is not None
if uid:
append([f.proxy_ref, keys[-1], fn, uid])
was_bound = True
if was_bound:
handler_append(bound)
try:
return eval(value, idmap), bound_list
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__, e),
cause=tb)
class BuilderBase(object):
'''The Builder is responsible for creating a :class:`Parser` for parsing a
kv file, merging the results into its internal rules, templates, etc.
By default, :class:`Builder` is a global Kivy instance used in widgets
that you can use to load other kv files in addition to the default ones.
'''
_match_cache = {}
_match_name_cache = {}
def __init__(self):
super(BuilderBase, self).__init__()
self.files = []
self.dynamic_classes = {}
self.templates = {}
self.rules = []
self.rulectx = {}
def load_file(self, filename, **kwargs):
'''Insert a file into the language builder and return the root widget
(if defined) of the kv file.
:parameters:
`rulesonly`: bool, defaults to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
'''
filename = resource_find(filename) or filename
if __debug__ and not environ.get('KIVY_UNITTEST_NOBUILDERTRACE'):
trace('Lang: load file %s' % filename)
with open(filename, 'r') as fd:
kwargs['filename'] = filename
data = fd.read()
# remove bom ?
if PY2:
if data.startswith((codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)):
raise ValueError('Unsupported UTF16 for kv files.')
if data.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
raise ValueError('Unsupported UTF32 for kv files.')
if data.startswith(codecs.BOM_UTF8):
data = data[len(codecs.BOM_UTF8):]
return self.load_string(data, **kwargs)
def unload_file(self, filename):
'''Unload all rules associated with a previously imported file.
.. versionadded:: 1.0.8
.. warning::
This will not remove rules or templates already applied/used on
current widgets. It will only effect the next widgets creation or
template invocation.
'''
# remove rules and templates
filename = resource_find(filename) or filename
self.rules = [x for x in self.rules if x[1].ctx.filename != filename]
self._clear_matchcache()
templates = {}
for x, y in self.templates.items():
if y[2] != filename:
templates[x] = y
self.templates = templates
if filename in self.files:
self.files.remove(filename)
# unregister all the dynamic classes
Factory.unregister_from_filename(filename)
def load_string(self, string, **kwargs):
'''Insert a string into the Language Builder and return the root widget
(if defined) of the kv string.
:Parameters:
`rulesonly`: bool, defaults to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
'''
kwargs.setdefault('rulesonly', False)
self._current_filename = fn = kwargs.get('filename', None)
# put a warning if a file is loaded multiple times
if fn in self.files:
Logger.warning(
'Lang: The file {} is loaded multiples times, '
'you might have unwanted behaviors.'.format(fn))
try:
# parse the string
parser = Parser(content=string, filename=fn)
# merge rules with our rules
self.rules.extend(parser.rules)
self._clear_matchcache()
# add the template found by the parser into ours
for name, cls, template in parser.templates:
self.templates[name] = (cls, template, fn)
Factory.register(name,
cls=partial(self.template, name),
is_template=True, warn=True)
# register all the dynamic classes
for name, baseclasses in iteritems(parser.dynamic_classes):
Factory.register(name, baseclasses=baseclasses, filename=fn,
warn=True)
# create root object is exist
if kwargs['rulesonly'] and parser.root:
filename = kwargs.get('rulesonly', '<string>')
raise Exception('The file <%s> contain also non-rules '
'directives' % filename)
# save the loaded files only if there is a root without
# template/dynamic classes
if fn and (parser.templates or
parser.dynamic_classes or parser.rules):
self.files.append(fn)
if parser.root:
widget = Factory.get(parser.root.name)()
self._apply_rule(widget, parser.root, parser.root)
return widget
finally:
self._current_filename = None
def template(self, *args, **ctx):
'''Create a specialized template using a specific context.
.. versionadded:: 1.0.5
With templates, you can construct custom widgets from a kv lang
definition by giving them a context. Check :ref:`Template usage
<template_usage>`.
'''
# Prevent naming clash with whatever the user might be putting into the
# ctx as key.
name = args[0]
if name not in self.templates:
raise Exception('Unknown <%s> template name' % name)
baseclasses, rule, fn = self.templates[name]
key = '%s|%s' % (name, baseclasses)
cls = Cache.get('kv.lang', key)
if cls is None:
rootwidgets = []
for basecls in baseclasses.split('+'):
rootwidgets.append(Factory.get(basecls))
cls = type(name, tuple(rootwidgets), {})
Cache.append('kv.lang', key, cls)
widget = cls()
# in previous versions, ``ctx`` is passed as is as ``template_ctx``
# preventing widgets in it from be collected by the GC. This was
# especially relevant to AccordionItem's title_template.
proxy_ctx = {k: get_proxy(v) for k, v in ctx.items()}
self._apply_rule(widget, rule, rule, template_ctx=proxy_ctx)
return widget
def apply_rules(self, widget, rule_name, ignored_consts=set()):
'''Search all the rules that match `rule_name` widget
and apply them to `widget`.
.. versionadded:: 1.10.0
`ignored_consts` is a set or list type whose elements are property
names for which constant KV rules (i.e. those that don't create
bindings) of that widget will not be applied. This allows e.g. skipping
constant rules that overwrite a value initialized in python.
'''
rules = self.match_rule_name(rule_name)
if __debug__ and not environ.get('KIVY_UNITTEST_NOBUILDERTRACE'):
trace('Lang: Found %d rules for %s' % (len(rules), rule_name))
if not rules:
return
for rule in rules:
self._apply_rule(widget, rule, rule, ignored_consts=ignored_consts)
def apply(self, widget, ignored_consts=set()):
'''Search all the rules that match the widget and apply them.
`ignored_consts` is a set or list type whose elements are property
names for which constant KV rules (i.e. those that don't create
bindings) of that widget will not be applied. This allows e.g. skipping
constant rules that overwrite a value initialized in python.
'''
rules = self.match(widget)
if __debug__ and not environ.get('KIVY_UNITTEST_NOBUILDERTRACE'):
trace('Lang: Found %d rules for %s' % (len(rules), widget))
if not rules:
return
for rule in rules:
self._apply_rule(widget, rule, rule, ignored_consts=ignored_consts)
def _clear_matchcache(self):
BuilderBase._match_cache = {}
BuilderBase._match_name_cache = {}
def _apply_rule(self, widget, rule, rootrule, template_ctx=None,
ignored_consts=set()):
# widget: the current instantiated widget
# rule: the current rule
# rootrule: the current root rule (for children of a rule)
# will collect reference to all the id in children
assert(rule not in self.rulectx)
self.rulectx[rule] = rctx = {
'ids': {'root': widget.proxy_ref},
'set': [], 'hdl': []}
# extract the context of the rootrule (not rule!)
assert(rootrule in self.rulectx)
rctx = self.rulectx[rootrule]
# if a template context is passed, put it as "ctx"
if template_ctx is not None:
rctx['ids']['ctx'] = QueryDict(template_ctx)
# if we got an id, put it in the root rule for a later global usage
if rule.id:
# use only the first word as `id` discard the rest.
rule.id = rule.id.split('#', 1)[0].strip()
rctx['ids'][rule.id] = widget.proxy_ref
# set id name as a attribute for root widget so one can in python
# code simply access root_widget.id_name
_ids = dict(rctx['ids'])
_root = _ids.pop('root')
_new_ids = _root.ids
for _key in iterkeys(_ids):
if _ids[_key] == _root:
# skip on self
continue
_new_ids[_key] = _ids[_key]
_root.ids = _new_ids
# first, ensure that the widget have all the properties used in
# the rule if not, they will be created as ObjectProperty.
rule.create_missing(widget)
# build the widget canvas
if rule.canvas_before:
with widget.canvas.before:
self._build_canvas(widget.canvas.before, widget,
rule.canvas_before, rootrule)
if rule.canvas_root:
with widget.canvas:
self._build_canvas(widget.canvas, widget,
rule.canvas_root, rootrule)
if rule.canvas_after:
with widget.canvas.after:
self._build_canvas(widget.canvas.after, widget,
rule.canvas_after, rootrule)
# create children tree
Factory_get = Factory.get
Factory_is_template = Factory.is_template
for crule in rule.children:
cname = crule.name
if cname in ('canvas', 'canvas.before', 'canvas.after'):
raise ParserException(
crule.ctx, crule.line,
'Canvas instructions added in kv must '
'be declared before child widgets.')
# depending if the child rule is a template or not, we are not
# having the same approach
cls = Factory_get(cname)
if Factory_is_template(cname):
# we got a template, so extract all the properties and
# handlers, and push them in a "ctx" dictionary.
ctx = {}
idmap = copy(global_idmap)
idmap.update({'root': rctx['ids']['root']})
if 'ctx' in rctx['ids']:
idmap.update({'ctx': rctx['ids']['ctx']})
try:
for prule in crule.properties.values():
value = prule.co_value
if type(value) is CodeType:
value = eval(value, idmap)
ctx[prule.name] = value
for prule in crule.handlers:
value = eval(prule.value, idmap)
ctx[prule.name] = value
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(
prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
# create the template with an explicit ctx
child = cls(**ctx)
widget.add_widget(child)
# reference it on our root rule context
if crule.id:
rctx['ids'][crule.id] = child
else:
# we got a "normal" rule, construct it manually
# we can't construct it without __no_builder=True, because the
# previous implementation was doing the add_widget() before
# apply(), and so, we could use "self.parent".
child = cls(__no_builder=True)
widget.add_widget(child)
self.apply(child)
self._apply_rule(child, crule, rootrule)
# append the properties and handlers to our final resolution task
if rule.properties:
rctx['set'].append((widget.proxy_ref,
list(rule.properties.values())))
for key, crule in rule.properties.items():
# clear previously applied rules if asked
if crule.ignore_prev:
Builder.unbind_property(widget, key)
if rule.handlers:
rctx['hdl'].append((widget.proxy_ref, rule.handlers))
# if we are applying another rule that the root one, then it's done for
# us!
if rootrule is not rule:
del self.rulectx[rule]
return
# normally, we can apply a list of properties with a proper context
try:
rule = None
for widget_set, rules in reversed(rctx['set']):
for rule in rules:
assert(isinstance(rule, ParserRuleProperty))
key = rule.name
value = rule.co_value
if type(value) is CodeType:
value, bound = create_handler(
widget_set, widget_set, key, value, rule,
rctx['ids'])
# if there's a rule
if (widget_set != widget or bound or
key not in ignored_consts):
setattr(widget_set, key, value)
else:
if (widget_set != widget or
key not in ignored_consts):
setattr(widget_set, key, value)
except Exception as e:
if rule is not None:
tb = sys.exc_info()[2]
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__,
e), cause=tb)
raise e
# build handlers
try:
crule = None
for widget_set, rules in rctx['hdl']:
for crule in rules:
assert(isinstance(crule, ParserRuleProperty))
assert(crule.name.startswith('on_'))
key = crule.name
if not widget_set.is_event_type(key):
key = key[3:]
idmap = copy(global_idmap)
idmap.update(rctx['ids'])
idmap['self'] = widget_set.proxy_ref
if not widget_set.fbind(key, custom_callback, crule,
idmap):
raise AttributeError(key)
# hack for on_parent
if crule.name == 'on_parent':
Factory.Widget.parent.dispatch(widget_set.__self__)
except Exception as e:
if crule is not None:
tb = sys.exc_info()[2]
raise BuilderException(
crule.ctx, crule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
raise e
# rule finished, forget it
del self.rulectx[rootrule]
def match(self, widget):
'''Return a list of :class:`ParserRule` objects matching the widget.
'''
cache = BuilderBase._match_cache
k = (widget.__class__, widget.id, tuple(widget.cls))
if k in cache:
return cache[k]
rules = []
for selector, rule in self.rules:
if selector.match(widget):
if rule.avoid_previous_rules:
del rules[:]
rules.append(rule)
cache[k] = rules
return rules
def match_rule_name(self, rule_name):
'''Return a list of :class:`ParserRule` objects matching the widget.
'''
cache = BuilderBase._match_name_cache
rule_name = str(rule_name)
k = rule_name.lower()
if k in cache:
return cache[k]
rules = []
for selector, rule in self.rules:
if selector.match_rule_name(rule_name):
if rule.avoid_previous_rules:
del rules[:]
rules.append(rule)
cache[k] = rules
return rules
def sync(self):
'''Execute all the waiting operations, such as the execution of all the
expressions related to the canvas.
.. versionadded:: 1.7.0
'''
global _delayed_start
next_args = _delayed_start
if next_args is None:
return
while next_args is not StopIteration:
# is this try/except still needed? yes, in case widget died in this
# frame after the call was scheduled
try:
call_fn(next_args[:-1], None, None)
except ReferenceError:
pass
args = next_args
next_args = args[-1]
args[-1] = None
_delayed_start = None
def unbind_widget(self, uid):
'''Unbind all the handlers created by the KV rules of the
widget. The :attr:`kivy.uix.widget.Widget.uid` is passed here
instead of the widget itself, because Builder is using it in the
widget destructor.
This effectively clears all the KV rules associated with this widget.
For example:
.. code-block:: python
>>> w = Builder.load_string(\'''
... Widget:
... height: self.width / 2. if self.disabled else self.width
... x: self.y + 50
... \''')
>>> w.size
[100, 100]
>>> w.pos
[50, 0]
>>> w.width = 500
>>> w.size
[500, 500]
>>> Builder.unbind_widget(w.uid)
>>> w.width = 222
>>> w.y = 500
>>> w.size
[222, 500]
>>> w.pos
[50, 500]
.. versionadded:: 1.7.2
'''
if uid not in _handlers:
return
for prop_callbacks in _handlers[uid].values():
for callbacks in prop_callbacks:
for f, k, fn, bound_uid in callbacks:
if fn is None: # it's not a kivy prop.
continue
try:
f.unbind_uid(k, bound_uid)
except ReferenceError:
# proxy widget is already gone, that's cool :)
pass
del _handlers[uid]
def unbind_property(self, widget, name):
'''Unbind the handlers created by all the rules of the widget that set
the name.
This effectively clears all the rules of widget that take the form::
name: rule
For example:
.. code-block:: python
>>> w = Builder.load_string(\'''
... Widget:
... height: self.width / 2. if self.disabled else self.width
... x: self.y + 50
... \''')
>>> w.size
[100, 100]
>>> w.pos
[50, 0]
>>> w.width = 500
>>> w.size
[500, 500]
>>> Builder.unbind_property(w, 'height')
>>> w.width = 222
>>> w.size
[222, 500]
>>> w.y = 500
>>> w.pos
[550, 500]
.. versionadded:: 1.9.1
'''
uid = widget.uid
if uid not in _handlers:
return
prop_handlers = _handlers[uid]
if name not in prop_handlers:
return
for callbacks in prop_handlers[name]:
for f, k, fn, bound_uid in callbacks:
if fn is None: # it's not a kivy prop.
continue
try:
f.unbind_uid(k, bound_uid)
except ReferenceError:
# proxy widget is already gone, that's cool :)
pass
del prop_handlers[name]
if not prop_handlers:
del _handlers[uid]
def _build_canvas(self, canvas, widget, rule, rootrule):
global Instruction
if Instruction is None:
Instruction = Factory.get('Instruction')
idmap = copy(self.rulectx[rootrule]['ids'])
for crule in rule.children:
name = crule.name
if name == 'Clear':
canvas.clear()
continue
instr = Factory.get(name)()
if not isinstance(instr, Instruction):
raise BuilderException(
crule.ctx, crule.line,
'You can add only graphics Instruction in canvas.')
try:
for prule in crule.properties.values():
key = prule.name
value = prule.co_value
if type(value) is CodeType:
value, _ = create_handler(
widget, instr.proxy_ref,
key, value, prule, idmap, True)
setattr(instr, key, value)
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(
prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
#: Main instance of a :class:`BuilderBase`.
Builder = register_context('Builder', BuilderBase)
Builder.load_file(join(kivy_data_dir, 'style.kv'), rulesonly=True)
if 'KIVY_PROFILE_LANG' in environ:
import atexit
import cgi
def match_rule(fn, index, rule):
if rule.ctx.filename != fn:
return
for prop, prp in iteritems(rule.properties):
if prp.line != index:
continue
yield prp
for child in rule.children:
for r in match_rule(fn, index, child):
yield r
if rule.canvas_root:
for r in match_rule(fn, index, rule.canvas_root):
yield r
if rule.canvas_before:
for r in match_rule(fn, index, rule.canvas_before):
yield r
if rule.canvas_after:
for r in match_rule(fn, index, rule.canvas_after):
yield r
def dump_builder_stats():
html = [
'<!doctype html>'
'<html><body>',
'<style type="text/css">\n',
'pre { margin: 0; }\n',
'</style>']
files = set([x[1].ctx.filename for x in Builder.rules])
for fn in files:
try:
with open(fn) as f:
lines = f.readlines()
except (IOError, TypeError) as e:
continue
html += ['<h2>', fn, '</h2>', '<table>']
count = 0
for index, line in enumerate(lines):
line = line.rstrip()
line = cgi.escape(line)
matched_prp = []
for psn, rule in Builder.rules:
matched_prp += list(match_rule(fn, index, rule))
count = sum(set([x.count for x in matched_prp]))
color = (255, 155, 155) if count else (255, 255, 255)
html += ['<tr style="background-color: rgb{}">'.format(color),
'<td>', str(index + 1), '</td>',
'<td>', str(count), '</td>',
'<td><pre>', line, '</pre></td>',
'</tr>']
html += ['</table>']
html += ['</body></html>']
with open('builder_stats.html', 'w') as fd:
fd.write(''.join(html))
print('Profiling written at builder_stats.html')
atexit.register(dump_builder_stats)
|
|
import sys
import time
import copy as cp
import asyncio
import hashlib
from unittest import mock
import celery
import pytest
from waterbutler import tasks # noqa
from waterbutler.core import remote_logging
from waterbutler.core import utils as core_utils
from waterbutler.core.path import WaterButlerPath
import tests.utils as test_utils
# Hack to get the module, not the function
copy = sys.modules['waterbutler.tasks.copy']
FAKE_TIME = 1454684930.0
@pytest.fixture(autouse=True)
def patch_backend(monkeypatch):
monkeypatch.setattr(copy.core.app, 'backend', None)
@pytest.fixture(autouse=True)
def callback(monkeypatch):
mock_request = test_utils.MockCoroutine(
return_value=mock.Mock(
status=200,
read=test_utils.MockCoroutine(
return_value=b'meowmeowmeow'
)
)
)
monkeypatch.setattr(copy.utils, 'send_signed_request', mock_request)
return mock_request
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=FAKE_TIME)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def src_path():
return WaterButlerPath('/user/bin/python')
@pytest.fixture
def dest_path():
return WaterButlerPath('/usr/bin/golang')
@pytest.fixture(scope='function')
def src_provider():
p = test_utils.MockProvider()
p.copy.return_value = (test_utils.MockFileMetadata(), True)
p.auth['callback_url'] = 'src_callback'
return p
@pytest.fixture(scope='function')
def dest_provider():
p = test_utils.MockProvider()
p.copy.return_value = (test_utils.MockFileMetadata(), True)
p.auth['callback_url'] = 'dest_callback'
return p
@pytest.fixture(scope='function')
def providers(monkeypatch, src_provider, dest_provider):
def make_provider(name=None, **kwargs):
if name == 'src':
return src_provider
if name == 'dest':
return dest_provider
raise ValueError('Unexpected provider')
monkeypatch.setattr(copy.utils, 'make_provider', make_provider)
return src_provider, dest_provider
@pytest.fixture(autouse=True)
def log_to_keen(monkeypatch):
mock_log_to_keen = test_utils.MockCoroutine()
monkeypatch.setattr(remote_logging, 'log_to_keen', mock_log_to_keen)
return mock_log_to_keen
@pytest.fixture
def src_bundle(src_path):
return {
'nid': 'mst3k',
'path': src_path,
'provider': {
'name': 'src',
'auth': {
'callback_url': '',
},
'settings': {},
'credentials': {},
}
}
@pytest.fixture
def dest_bundle(dest_path):
return {
'nid': 'fbi4u',
'path': dest_path,
'provider': {
'name': 'dest',
'auth': {
'callback_url': '',
},
'settings': {},
'credentials': {},
}
}
@pytest.fixture
def bundles(src_bundle, dest_bundle):
return src_bundle, dest_bundle
class TestCopyTask:
def test_copy_calls_copy(self, event_loop, providers, bundles):
src, dest = providers
src_bundle, dest_bundle = bundles
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle))
assert src.copy.called
src.copy.assert_called_once_with(dest, src_bundle['path'], dest_bundle['path'])
def test_is_task(self):
assert callable(copy.copy)
assert isinstance(copy.copy, celery.Task)
assert not asyncio.iscoroutine(copy.copy)
assert asyncio.iscoroutinefunction(copy.copy.adelay)
def test_imputes_exceptions(self, event_loop, providers, bundles, callback):
src, dest = providers
src_bundle, dest_bundle = bundles
src.copy.side_effect = Exception('This is a string')
with pytest.raises(Exception):
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle))
(method, url, data), _ = callback.call_args_list[0]
assert src.copy.called
src.copy.assert_called_once_with(dest, src_bundle['path'], dest_bundle['path'])
assert url == 'dest_callback'
assert method == 'PUT'
assert data['errors'] == ["Exception('This is a string',)"]
def test_return_values(self, event_loop, providers, bundles, callback, src_path, dest_path, mock_time):
src, dest = providers
src_bundle, dest_bundle = bundles
metadata = test_utils.MockFileMetadata()
src.copy.return_value = (metadata, False)
ret1, ret2 = copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle))
assert (ret1, ret2) == (metadata, False)
(method, url, data), _ = callback.call_args_list[0]
assert method == 'PUT'
assert url == 'dest_callback'
assert data['action'] == 'copy'
assert data['auth'] == {'callback_url': 'dest_callback'}
assert data['email'] == False
assert data['errors'] == []
assert data['time'] == FAKE_TIME + 60
assert data['source'] == {
'nid': 'mst3k',
'resource': 'mst3k',
'path': '/' + src_path.raw_path,
'name': src_path.name,
'materialized': str(src_path),
'provider': src.NAME,
'kind': 'file',
'extra': {},
}
assert data['destination'] == {
'nid': 'fbi4u',
'resource': 'fbi4u',
'path': metadata.path,
'name': metadata.name,
'materialized': metadata.path,
'provider': dest.NAME,
'kind': 'file',
'contentType': metadata.content_type,
'etag': hashlib.sha256(
'{}::{}'.format(metadata.provider, metadata.etag)
.encode('utf-8')
).hexdigest(),
'extra': metadata.extra,
'modified': metadata.modified,
'modified_utc': metadata.modified_utc,
'created_utc': metadata.created_utc,
'size': metadata.size,
'sizeInt': metadata.size_as_int,
}
def test_starttime_override(self, event_loop, providers, bundles, callback, mock_time):
src, dest = providers
src_bundle, dest_bundle = bundles
stamp = FAKE_TIME
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle), start_time=stamp-100)
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle), start_time=stamp+100)
(_, _, data), _ = callback.call_args_list[0]
assert data['email'] is True
assert data['time'] == 60 + stamp
(_, _, data), _ = callback.call_args_list[1]
assert data['email'] is False
assert data['time'] == 60 + stamp
|
|
# -*- coding: utf-8 -*-
# Author: Tamas Marton
import sys
import time
import kivy
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.logger import Logger
from kivy.properties import ObjectProperty, ListProperty
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition
from kivy.adapters.dictadapter import DictAdapter
from kivy.adapters.simplelistadapter import SimpleListAdapter
from kivy.uix.listview import ListItemButton, ListItemLabel, CompositeListItem, ListView
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.image import Image
'''Fixing OSX Mavericks stupid locale bug'''
import platform
if platform.system() == "Darwin":
import locale
lc = locale.getlocale()
if lc[0]== None and lc[1]== None:
locale.setlocale(locale.LC_ALL, 'en_US')
class TasksScreen(Screen):
pass
class ExecutingScreen(Screen):
pass
class NoTaskSelectedDialog(Popup):
pass
class MyButton(Button):
pass
class GUI(BoxLayout):
text_ipToConnect = ObjectProperty(None)
label_Connected = ObjectProperty(None)
tabbed_Panel = ObjectProperty(None)
panelHeader_Connect = ObjectProperty(None)
panelHeader_Tasks = ObjectProperty(None)
panelHeader_Results = ObjectProperty(None)
def __init__(self, **kwargs):
super(GUI,self).__init__(**kwargs)
Logger.info('GUI: Main GUI created')
self.screenManager.transition = SlideTransition(direction="left")
self.listOfTask = self.taskToBeExecute
self.controller = App.get_running_app()
def getAdapter(self):
task_args_converter = lambda row_index, rec: {
'orientation': 'vertical',
'text': rec['name'],
'size_hint_y': None,
'height': '150sp',
'spacing': 0,
'cls_dicts': [{'cls': ListItemButton,
'kwargs':{'text': rec['name'],
'is_representing_cls': True, 'size_hint_y': 0.2, 'markup': True, 'deselected_color':[1., 1., 0., 1], 'selected_color':[0., 1., 0., 1]}},
{'cls': ListItemLabel,
'kwargs':{'text': rec['desc'], 'size_hint_y': 0.8, 'markup': True}} ]}
tdata = App.get_running_app().model.getTasksListOfDict()
item_strings = ["{0}".format(index) for index in range(len(tdata))]
tasks_dict_adapter = DictAdapter(
sorted_keys=item_strings,
data=tdata,
args_converter = task_args_converter,
selection_mode='multiple',
cls=CompositeListItem)
return tasks_dict_adapter
def close(self):
sys.exit(0)
def connect(self, ip):
self.connectpopup = Popup(title='Connecting', size_hint=(None, None), height=60, width=350, auto_dismiss=True)
Logger.info('SOCKET: Connecting to '+ip)
self.controller.SERVERIP = ip
self.connectpopup.open()
self.controller.communication.connectToServer()
self.screenManager.current = 'tasks_selection'
def switchToTab(self, name):
if self.tabbed_Panel.current_tab.text != name:
if name == "Connect":
self.tabbed_Panel.switch_to(self.panelHeader_Connect)
elif name == "Tasks":
self.listOfTask.adapter = self.getAdapter()
self.tabbed_Panel.switch_to(self.panelHeader_Tasks)
elif name == "Results":
self.tabbed_Panel.switch_to(self.panelHeader_Results)
else:
Logger.error('switchToTab: Invalid PanelHeader name received: '+name)
def executeTasks(self):
if self.controller.STATE == "IDLE":
selected_task_list = []
for i in self.listOfTask.adapter.selection:
selected_task_list.append(i.text)
if len(selected_task_list) != 0:
self.controller.STATE = "RUNNING"
self.controller.sendTasksToServer(selected_task_list)
self.progressbar_ExecutingScreen.max = len(selected_task_list)
self.screenManager.current = self.screenManager.next()
else:
p=NoTaskSelectedDialog()
p.open()
def getResults(self):
temp_text=""
for value in self.controller.getResults().itervalues():
temp_text+=(value['name']+
'''
-------------------------------------------
**Result:**
::
'''+value['result']+
'''
''')
if value['image'] != None:
temp_text+=(
'''
**System load during the task:**
.. image:: '''+value['image']+'''
''')
self.rst_result.text= temp_text
self.screenManager.current = 'tasks_selection'
self.switchToTab("Results")
def setConnectionStatus(self,connected):
if connected:
self.label_Connected.text = "Status: [color=#00ff00][b]Connected[/b][/color]"
else:
self.label_Connected.text = "Status: [color=#ff0000][b]Disconnected[/b][/color]"
def updateExecutionStatus(self, task):
if task != None:
self.textinput_Log.text = ''
temptext = "[size=24][color=#36acd8][b]Current Task[b]: [/size][/color][size=18]"+str(task.NAME)+"[/size]\n"
temptext +="[size=24][color=#36acd8][b]Description[b]: [/size][/color][size=18]\n"+str(task.DESCRIPTION)+"[/size]"
self.label_RunningTask.text = temptext
def updateProgressBar(self, percent):
self.progressbar_ExecutingScreen.value=percent
def goBackButtonHandler(self):
self.controller.STATE= "IDLE"
self.switchToTab("Tasks")
def sendToLog(self, message):
self.textinput_Log.text += message+"\n"
|
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import traceback
import inspect
import pickle
import pkgutil
import struct
import numpy as np
from scipy import sparse
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import meta_estimators
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin)
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import (load_iris, load_boston, make_blobs,
make_classification)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.svm.base import BaseLibSVM
from sklearn.cross_validation import train_test_split
from sklearn.utils.validation import DataConversionWarning
dont_test = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer',
'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures']
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
if name in dont_test:
continue
yield check_parameters_default_constructible, name, Estimator
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in meta_estimators:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in meta_estimators:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def test_all_estimator_no_base_class():
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_estimators_sparse_data():
# All estimators should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
estimators = all_estimators()
estimators = [(name, Estimator) for name, Estimator in estimators
if issubclass(Estimator, (ClassifierMixin, RegressorMixin))]
for name, Estimator in estimators:
if name in dont_test:
continue
yield check_regressors_classifiers_sparse_data, name, Estimator, X, y
def check_regressors_classifiers_sparse_data(name, Estimator, X, y):
# catch deprecation warnings
with warnings.catch_warnings():
estimator = Estimator()
# fit and predict
try:
estimator.fit(X, y)
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
for name, Transformer in transformers:
if name in dont_test:
continue
# these don't actually fit the data:
if name in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
continue
yield check_transformer, name, Transformer, X, y
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = X.shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if hasattr(transformer, 'compute_importances'):
transformer.compute_importances = True
if name == 'SelectKBest':
# SelectKBest has a default of k=10
# which is more feature than we have.
transformer.k = 1
elif name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
transformer.n_components = 1
elif name == "MiniBatchDictionaryLearning":
transformer.set_params(n_iter=5) # default = 1000
elif name == "KernelPCA":
transformer.remove_zero_eig = False
# fit
if name in ('PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'):
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in ('PLSCanonical', 'PLSRegression', 'CCA',
'PLSSVD'):
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
assert_raises(ValueError, transformer.transform, X.T)
def test_transformers_sparse_data():
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
estimators = all_estimators(type_filter='transformer')
for name, Transformer in estimators:
if name in dont_test:
continue
yield check_transformer_sparse_data, name, Transformer, X, y
def check_transformer_sparse_data(name, Transformer, X, y):
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if name in ['Scaler', 'StandardScaler']:
transformer = Transformer(with_mean=False)
elif name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
transformer = Transformer(n_components=np.int(X.shape[1] / 4))
else:
transformer = Transformer()
# fit
try:
transformer.fit(X, y)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def test_estimators_nan_inf():
# Test that all estimators check their input for NaN's and infs
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
estimators = all_estimators()
estimators = [(name, E) for name, E in estimators
if (issubclass(E, ClassifierMixin) or
issubclass(E, RegressorMixin) or
issubclass(E, TransformerMixin) or
issubclass(E, ClusterMixin))]
for X_train in [X_train_nan, X_train_inf]:
for name, Estimator in estimators:
if name in dont_test:
continue
if name in ('PLSCanonical', 'PLSRegression', 'CCA',
'PLSSVD', 'Imputer'): # Imputer accepts nan
continue
yield (check_estimators_nan_inf, name, Estimator, X_train,
X_train_finite,
multioutput_estimator_convert_y_2d(name, y))
def check_estimators_nan_inf(name, Estimator, X_train, X_train_finite, y):
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
if name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be
# greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator = Estimator(n_components=1)
elif name == "SelectKBest":
estimator = Estimator(k=1)
set_random_state(estimator, 1)
# try to fit
try:
if issubclass(Estimator, ClusterMixin):
estimator.fit(X_train)
else:
estimator.fit(X_train, y)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
if issubclass(Estimator, ClusterMixin):
# All estimators except clustering algorithm
# support fitting with (optional) y
estimator.fit(X_train_finite)
else:
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def test_transformers_pickle():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
for name, Transformer in transformers:
if name in dont_test:
continue
yield check_transformer_pickle, name, Transformer, X, y
def check_transformer_pickle(name, Transformer, X, y):
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
if hasattr(transformer, 'compute_importances'):
transformer.compute_importances = True
if name == "SelectKBest":
# SelectKBest has a default of k=10
# which is more feature than we have.
transformer.k = 1
elif name in ['GaussianRandomProjection', 'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
transformer.n_components = 1
# fit
if name in ('PLSCanonical', 'PLSRegression', 'CCA',
'PLSSVD'):
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def test_classifiers_one_label():
# test classifiers trained on a single label always return this label
# or raise an sensible error message
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
if name in dont_test:
continue
yield check_classifiers_one_label, name, Classifier, X_train, X_test, y
def check_classifiers_one_label(name, Classifier, X_train, X_test, y):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if not 'class' in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
for name, Alg in clustering:
if name in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
continue
yield check_clustering, name, Alg, X, y
def check_clustering(name, Alg, X, y):
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
# fit
alg.fit(X)
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def test_classifiers_train():
# test if classifiers do something sensible on training set
# also test all shapes / shape errors
classifiers = all_estimators(type_filter='classifier')
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# do it once with binary, once with multiclass
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ['MultinomialNB', 'BernoulliNB']:
# TODO also test these!
continue
yield check_classifiers_train, name, Classifier, X, y
def check_classifiers_train(name, Classifier, X, y):
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict:
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.ravel().shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict:
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def test_classifiers_classes():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
y_names = iris.target_names[y]
for y_names in [y_names, y_names.astype('O')]:
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ['MultinomialNB', 'BernoulliNB']:
# TODO also test these!
continue
yield check_classifiers_classes, name, Classifier, X, y, y_names
def check_classifiers_classes(name, Classifier, X, y, y_names):
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
# fit
try:
classifier.fit(X, y_)
except Exception as e:
print(e)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
accuracy = accuracy_score(y_, y_pred)
assert_greater(accuracy, 0.78,
"accuracy %f of %s not greater than 0.78"
% (accuracy, name))
#assert_array_equal(
#clf.classes_, classes,
#"Unexpected classes_ attribute for %r" % clf)
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def test_classifiers_input_shapes():
# test if classifiers can cope with y.shape = (n_samples, 1)
classifiers = all_estimators(type_filter='classifier')
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
continue
if name in ["DecisionTreeClassifier", "ExtraTreeClassifier"]:
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
continue
yield check_classifiers_input_shapes, name, Classifier, X, y
def check_classifiers_input_shapes(name, Classifier, X, y):
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
assert_equal(len(w), 1)
assert_array_equal(y_pred, classifier.predict(X))
def test_classifiers_pickle():
# test if classifiers do something sensible on training set
# also test all shapes / shape errors
classifiers = all_estimators(type_filter='classifier')
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# do it once with binary, once with multiclass
n_samples, n_features = X.shape
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ['MultinomialNB', 'BernoulliNB']:
# TODO also test these!
continue
yield check_classifiers_pickle, name, Classifier, X, y
def check_classifiers_pickle(name, Classifier, X, y):
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
BOSTON = None
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def test_regressors_int():
# test if regressors can cope with integer labels (by converting them to
# float)
regressors = all_estimators(type_filter='regressor')
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
for name, Regressor in regressors:
if name in dont_test or name in ('CCA'):
continue
elif name in ('OrthogonalMatchingPursuitCV'):
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
yield (check_regressors_int, name, Regressor, X,
multioutput_estimator_convert_y_2d(name, y))
def check_regressors_int(name, Regressor, X, y):
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in ('_PLS', 'PLSCanonical', 'PLSRegression'):
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def test_regressors_train():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
for name, Regressor in regressors:
if name in dont_test:
continue
yield (check_regressors_train, name, Regressor, X,
multioutput_estimator_convert_y_2d(name, y))
def check_regressors_train(name, Regressor, X, y):
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in ('PLSCanonical', 'PLSRegression', 'CCA'):
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
def test_regressor_pickle():
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
regressors = all_estimators(type_filter='regressor')
X, y = _boston_subset()
# TODO: test with intercept
# TODO: test with multiple responses
y = StandardScaler().fit_transform(y) # X is already scaled
for name, Regressor in regressors:
if name in dont_test:
continue
yield (check_regressors_pickle, name, Regressor, X,
multioutput_estimator_convert_y_2d(name, y))
def check_regressors_pickle(name, Regressor, X, y):
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in ('PLSCanonical', 'PLSRegression', 'CCA'):
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield (check_class_weight_classifiers, name, Classifier, X_train,
y_train, X_test, y_test)
def check_class_weight_classifiers(name, Classifier, X_train, y_train, X_test,
y_test):
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def test_class_weight_auto_classifies():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.startswith("RidgeClassifier"):
# RidgeClassifier behaves unexpected
# FIXME!
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto),
f1_score(y_test, y_pred))
def test_estimators_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
for est_type in ["classifier", "regressor", "transformer"]:
estimators = all_estimators(type_filter=est_type)
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X -= X.min()
for name, Estimator in estimators:
if (name in dont_test
or name in ['CCA', '_CCA', 'PLSCanonical',
'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
continue
yield (check_estimators_overwrite_params, name, Estimator, X,
multioutput_estimator_convert_y_2d(name, y))
def check_estimators_overwrite_params(name, Estimator, X, y):
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if hasattr(estimator, 'batch_size'):
# FIXME
# for MiniBatchDictLearning
estimator.batch_size = 1
if name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be
# greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator = Estimator(n_components=1)
elif name == "SelectKBest":
estimator = Estimator(k=1)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def test_cluster_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
clusterers = all_estimators(type_filter="cluster")
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X
for name, Clustering in clusterers:
yield check_cluster_overwrite_params, name, Clustering, X, y
def check_cluster_overwrite_params(name, Clustering, X, y):
with warnings.catch_warnings(record=True):
# catch deprecation warnings
clustering = Clustering()
params = clustering.get_params()
clustering.fit(X)
new_params = clustering.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_sparsify_estimators():
"""Test if predict with sparsified estimators works.
Tests regression, binary classification, and multi-class classification.
"""
estimators = all_estimators()
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
# test regression and binary classification
for name, Estimator in estimators:
try:
Estimator.sparsify
except:
continue
yield check_sparsify_binary_classifier, name, Estimator, X, y
# test multiclass classification
classifiers = all_estimators(type_filter='classifier')
y[-1] = 3 # make multi-class
for name, Classifier in classifiers:
try:
Classifier.sparsify
except:
continue
yield check_sparsify_multiclass_classifier, name, Classifier, X, y
def check_sparsify_multiclass_classifier(name, Classifier, X, y):
est = Classifier()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_sparsify_binary_classifier(name, Estimator, X, y):
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_estimators_not_an_array(name, Estimator, X, y):
if name in ('CCA', '_PLS', 'PLSCanonical', 'PLSRegression'):
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Estimator()
regressor_2 = Estimator()
set_random_state(regressor_1)
set_random_state(regressor_2)
y_ = NotAnArray(np.asarray(y))
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y)
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def test_regressors_not_an_array():
regressors = all_estimators(type_filter='regressor')
X, y = _boston_subset(n_samples=50)
X = StandardScaler().fit_transform(X)
for name, Regressor in regressors:
if name in dont_test:
continue
yield (check_estimators_not_an_array, name, Regressor, X,
multioutput_estimator_convert_y_2d(name, y))
def test_classifiers_not_an_array():
classifiers = all_estimators(type_filter='classifier')
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
for name, Classifier in classifiers:
if name in dont_test:
continue
yield (check_estimators_not_an_array, name, Classifier, X,
multioutput_estimator_convert_y_2d(name, y))
|
|
#!/usr/bin/env python
"""
Modelica AST definitions
"""
from __future__ import print_function, absolute_import, division, print_function, unicode_literals
import copy
import json
from enum import Enum
from typing import List, Union, Dict
from collections import OrderedDict
class ClassNotFoundError(Exception):
pass
class ConstantSymbolNotFoundError(Exception):
pass
class FoundElementaryClassError(Exception):
pass
class Visibility(Enum):
PRIVATE = 0, 'private'
PROTECTED = 1, 'protected'
PUBLIC = 2, 'public'
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
def __str__(self):
return self.fullname
def __lt__(self, other):
return self.value < other.value
nan = float('nan')
"""
AST Node Type Hierarchy
Root Class
Class
Equation
ComponentRef
Expression
Primary
IfEquation
Expression
Equation
ForEquation
Expression
Equation
ConnectClause
ComponentRef
Symbol
"""
class Node:
def __init__(self, **kwargs):
self.set_args(**kwargs)
def set_args(self, **kwargs):
for key in kwargs.keys():
if key not in self.__dict__.keys():
raise KeyError('{:s} not valid arg'.format(key))
self.__dict__[key] = kwargs[key]
def __repr__(self):
d = self.to_json(self)
d['_type'] = self.__class__.__name__
return json.dumps(d, indent=2, sort_keys=True)
@classmethod
def to_json(cls, var):
if isinstance(var, list):
res = [cls.to_json(item) for item in var]
elif isinstance(var, dict):
res = {key: cls.to_json(var[key]) for key in var.keys()}
elif isinstance(var, Node):
# Avoid infinite recursion by not handling attributes that may go
# back up in the tree again.
res = {key: cls.to_json(var.__dict__[key]) for key in var.__dict__.keys()
if key not in ('parent', 'scope', '__deepcopy__')}
elif isinstance(var, Visibility):
res = str(var)
else:
res = var
return res
__str__ = __repr__
class Primary(Node):
def __init__(self, **kwargs):
self.value = None # type: Union[bool, float, int, str, type(None)]
super().__init__(**kwargs)
def __str__(self):
return '{} value {}'.format(type(self).__name__, self.value)
class Array(Node):
def __init__(self, **kwargs):
self.values = [] # type: List[Union[Expression, Primary, ComponentRef, Array]]
super().__init__(**kwargs)
def __str__(self):
return '{} {}'.format(type(self).__name__, self.values)
class Slice(Node):
def __init__(self, **kwargs):
self.start = Primary(value=None) # type: Union[Expression, Primary, ComponentRef]
self.stop = Primary(value=None) # type: Union[Expression, Primary, ComponentRef]
self.step = Primary(value=1) # type: Union[Expression, Primary, ComponentRef]
super().__init__(**kwargs)
def __str__(self):
return '{} start: {}, stop: {}, step: {}'.format(
type(self).__name__, self.start, self.stop, self.step)
class ComponentRef(Node):
def __init__(self, **kwargs):
self.name = '' # type: str
self.indices = [[None]] # type: List[List[Union[Expression, Slice, Primary, ComponentRef]]]
self.child = [] # type: List[ComponentRef]
super().__init__(**kwargs)
def __str__(self) -> str:
return ".".join(self.to_tuple())
def to_tuple(self) -> tuple:
"""
Convert the nested component reference to flat tuple of names, which is
hashable and can therefore be used as dictionary key. Note that this
function ignores any array indices in the component reference.
:return: flattened tuple of c's names
"""
if self.child:
return (self.name, ) + self.child[0].to_tuple()
else:
return self.name,
@classmethod
def from_tuple(cls, components: tuple) -> 'ComponentRef':
"""
Convert the tuple pointing to a component to
a component reference.
:param components: tuple of components name
:return: ComponentRef
"""
component_ref = ComponentRef(name=components[0], child=[])
c = component_ref
for component in components[1:]:
c.child.append(ComponentRef(name=component, child=[]))
c = c.child[0]
return component_ref
@classmethod
def from_string(cls, s: str) -> 'ComponentRef':
"""
Convert the string pointing to a component using dot notation to
a component reference.
:param s: string pointing to component using dot notation
:return: ComponentRef
"""
components = s.split('.')
return cls.from_tuple(components)
@classmethod
def concatenate(cls, *args: List['ComponentRef']) -> 'ComponentRef':
"""
Helper function to append two component references to eachother, e.g.
a "within" component ref and an "object type" component ref.
:return: New component reference, with other appended to self.
"""
a = copy.deepcopy(args[0])
n = a
for b in args[1:]:
while n.child:
n = n.child[0]
b = copy.deepcopy(b) # Not strictly necessary
n.child = [b]
return a
class Expression(Node):
def __init__(self, **kwargs):
self.operator = None # type: Union[str, ComponentRef]
self.operands = [] # type: List[Union[Expression, Primary, ComponentRef, Array, IfExpression]]
super().__init__(**kwargs)
class IfExpression(Node):
def __init__(self, **kwargs):
self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef, Array, IfExpression]]
self.expressions = [] # type: List[Union[Expression, Primary, ComponentRef, Array, IfExpression]]
super().__init__(**kwargs)
class Equation(Node):
def __init__(self, **kwargs):
self.left = None # type: Union[Expression, Primary, ComponentRef, List[Union[Expression, Primary, ComponentRef]]]
self.right = None # type: Union[Expression, Primary, ComponentRef, List[Union[Expression, Primary, ComponentRef]]]
self.comment = '' # type: str
super().__init__(**kwargs)
class IfEquation(Node):
def __init__(self, **kwargs):
self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef]]
self.blocks = [] # type: List[List[Union[Expression, ForEquation, ConnectClause, IfEquation]]]
self.comment = '' # type: str
super().__init__(**kwargs)
class WhenEquation(Node):
def __init__(self, **kwargs):
self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef]]
self.blocks = [] # type: List[List[Union[Expression, ForEquation, ConnectClause, IfEquation]]]
self.comment = '' # type: str
super().__init__(**kwargs)
class ForIndex(Node):
def __init__(self, **kwargs):
self.name = '' # type: str
self.expression = None # type: Union[Expression, Primary, Slice]
super().__init__(**kwargs)
class ForEquation(Node):
def __init__(self, **kwargs):
self.indices = [] # type: List[ForIndex]
self.equations = [] # type: List[Union[Equation, ForEquation, ConnectClause]]
self.comment = None # type: str
super().__init__(**kwargs)
class ConnectClause(Node):
def __init__(self, **kwargs):
self.left = ComponentRef() # type: ComponentRef
self.right = ComponentRef() # type: ComponentRef
self.comment = '' # type: str
super().__init__(**kwargs)
class AssignmentStatement(Node):
def __init__(self, **kwargs):
self.left = [] # type: List[ComponentRef]
self.right = None # type: Union[Expression, IfExpression, Primary, ComponentRef]
self.comment = '' # type: str
super().__init__(**kwargs)
class IfStatement(Node):
def __init__(self, **kwargs):
self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef]]
self.blocks = [] # type: List[List[Union[AssignmentStatement, IfStatement, ForStatement]]]
self.comment = '' # type: str
super().__init__(**kwargs)
class WhenStatement(Node):
def __init__(self, **kwargs):
self.conditions = [] # type: List[Union[Expression, Primary, ComponentRef]]
self.blocks = [] # type: List[List[Union[AssignmentStatement, IfStatement, ForStatement]]]
self.comment = '' # type: str
super().__init__(**kwargs)
class ForStatement(Node):
def __init__(self, **kwargs):
self.indices = [] # type: List[ForIndex]
self.statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]
self.comment = '' # type: str
super().__init__(**kwargs)
class Function(Node):
def __init__(self, **kwargs):
self.name = '' # type: str
self.arguments = [] # type: List[Union[Expression, Primary, ComponentRef, Array]]
self.comment = '' # type: str
super().__init__(**kwargs)
class Symbol(Node):
"""
A mathematical variable or state of the model
"""
ATTRIBUTES = ['value', 'min', 'max', 'start', 'fixed', 'nominal', 'unit']
def __init__(self, **kwargs):
self.name = '' # type: str
self.type = ComponentRef() # type: Union[ComponentRef, InstanceClass]
self.prefixes = [] # type: List[str]
self.redeclare = False # type: bool
self.final = False # type: bool
self.inner = False # type: bool
self.outer = False # type: bool
self.dimensions = [[Primary(value=None)]] # type: List[List[Union[Expression, Primary, ComponentRef]]]
self.comment = '' # type: str
# params start value is 0 by default from Modelica spec
self.start = Primary(value=0) # type: Union[Expression, Primary, ComponentRef, Array]
self.min = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]
self.max = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]
self.nominal = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]
self.value = Primary(value=None) # type: Union[Expression, Primary, ComponentRef, Array]
self.fixed = Primary(value=False) # type: Primary
self.unit = Primary(value="") # type: Primary
self.id = 0 # type: int
self.order = 0 # type: int
self.visibility = Visibility.PRIVATE # type: Visibility
self.class_modification = None # type: ClassModification
super().__init__(**kwargs)
def __str__(self):
return '{} {}, Type "{}"'.format(type(self).__name__, self.name, self.type)
class ComponentClause(Node):
def __init__(self, **kwargs):
self.prefixes = [] # type: List[str]
self.type = ComponentRef() # type: ComponentRef
self.dimensions = [[Primary(value=None)]] # type: List[List[Union[Expression, Primary, ComponentRef]]]
self.comment = [] # type: List[str]
self.symbol_list = [] # type: List[Symbol]
super().__init__(**kwargs)
class EquationSection(Node):
def __init__(self, **kwargs):
self.initial = False # type: bool
self.equations = [] # type: List[Union[Equation, IfEquation, ForEquation, ConnectClause]]
super().__init__(**kwargs)
class AlgorithmSection(Node):
def __init__(self, **kwargs):
self.initial = False # type: bool
self.statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]
super().__init__(**kwargs)
class ImportAsClause(Node):
def __init__(self, **kwargs):
self.component = ComponentRef() # type: ComponentRef
self.name = '' # type: str
super().__init__(**kwargs)
class ImportFromClause(Node):
def __init__(self, **kwargs):
self.component = ComponentRef() # type: ComponentRef
self.symbols = [] # type: List[str]
super().__init__(**kwargs)
class ElementModification(Node):
def __init__(self, **kwargs):
self.component = ComponentRef() # type: Union[ComponentRef]
self.modifications = [] # type: List[Union[Primary, Expression, ClassModification, Array, ComponentRef]]
super().__init__(**kwargs)
class ShortClassDefinition(Node):
def __init__(self, **kwargs):
self.name = '' # type: str
self.type = '' # type: str
self.component = ComponentRef() # type: ComponentRef
self.class_modification = ClassModification() # type: ClassModification
super().__init__(**kwargs)
class ElementReplaceable(Node):
def __init__(self, **kwargs):
# TODO, add fields ?
super().__init__(**kwargs)
class ClassModification(Node):
def __init__(self, **kwargs):
self.arguments = [] # type: List[ClassModificationArgument]
super().__init__(**kwargs)
class ClassModificationArgument(Node):
def __init__(self, **kwargs):
self.value = [] # type: Union[ElementModification, ComponentClause, ShortClassDefinition]
self.scope = None # type: InstanceClass
self.redeclare = False
super().__init__(**kwargs)
def __deepcopy__(self, memo):
_scope, _deepcp = self.scope, self.__deepcopy__
self.scope, self.__deepcopy__ = None, None
new = copy.deepcopy(self, memo)
self.scope, self.__deepcopy__ = _scope, _deepcp
new.scope, new.__deepcopy__ = _scope, _deepcp
return new
class ExtendsClause(Node):
def __init__(self, **kwargs):
self.component = None # type: ComponentRef
self.class_modification = None # type: ClassModification
self.visibility = Visibility.PRIVATE # type: Visibility
super().__init__(**kwargs)
class Class(Node):
def __init__(self, **kwargs):
self.name = None # type: str
self.imports = [] # type: List[Union[ImportAsClause, ImportFromClause]]
self.extends = [] # type: List[ExtendsClause]
self.encapsulated = False # type: bool
self.partial = False # type: bool
self.final = False # type: bool
self.type = '' # type: str
self.comment = '' # type: str
self.classes = OrderedDict() # type: OrderedDict[str, Class]
self.symbols = OrderedDict() # type: OrderedDict[str, Symbol]
self.functions = OrderedDict() # type: OrderedDict[str, Class]
self.initial_equations = [] # type: List[Union[Equation, ForEquation]]
self.equations = [] # type: List[Union[Equation, ForEquation, ConnectClause]]
self.initial_statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]
self.statements = [] # type: List[Union[AssignmentStatement, IfStatement, ForStatement]]
self.annotation = [] # type: Union[NoneType, ClassModification]
self.parent = None # type: Class
super().__init__(**kwargs)
def _find_class(self, component_ref: ComponentRef, search_parent=True) -> 'Class':
try:
if not component_ref.child:
return self.classes[component_ref.name]
else:
# Avoid infinite recursion by passing search_parent = False
return self.classes[component_ref.name]._find_class(component_ref.child[0], False)
except (KeyError, ClassNotFoundError):
if search_parent and self.parent is not None:
return self.parent._find_class(component_ref)
else:
raise ClassNotFoundError("Could not find class '{}'".format(component_ref))
def find_class(self, component_ref: ComponentRef, copy=True, check_builtin_classes=False) -> 'Class':
# TODO: Remove workaround for Modelica / Modelica.SIUnits
if component_ref.name in ["Real", "Integer", "String", "Boolean", "Modelica", "SI"]:
if check_builtin_classes:
type_ = component_ref.name
if component_ref.name in ["Modelica", "SI"]:
type_ = "Real"
c = Class(name=type_)
c.type = "__builtin"
c.parent = self.root
cref = ComponentRef(name=type_)
s = Symbol(name="__value", type=cref)
c.symbols[s.name] = s
return c
else:
raise FoundElementaryClassError()
c = self._find_class(component_ref)
if copy:
c = c.copy_including_children()
return c
def _find_constant_symbol(self, component_ref: ComponentRef, search_parent=True) -> Symbol:
if component_ref.child:
# Try classes first, and constant symbols second
t = component_ref.to_tuple()
try:
node = self._find_class(ComponentRef(name=t[0]), search_parent)
return node._find_constant_symbol(ComponentRef.from_tuple(t[1:]), False)
except ClassNotFoundError:
try:
s = self.symbols[t[0]]
except KeyError:
raise ConstantSymbolNotFoundError()
if 'constant' not in s.prefixes:
raise ConstantSymbolNotFoundError()
# Found a symbol. Continue lookup on type of this symbol.
if isinstance(s.type, InstanceClass):
return s.type._find_constant_symbol(ComponentRef.from_tuple(t[1:]), False)
elif isinstance(s.type, ComponentRef):
node = self._find_class(s.type) # Parent lookups is OK here.
return node._find_constant_symbol(ComponentRef.from_tuple(t[1:]), False)
else:
raise Exception("Unknown object type of symbol type: {}".format(type(s.type)))
else:
try:
return self.symbols[component_ref.name]
except KeyError:
raise ConstantSymbolNotFoundError()
def find_constant_symbol(self, component_ref: ComponentRef) -> Symbol:
return self._find_constant_symbol(component_ref)
def full_reference(self):
names = []
c = self
while True:
names.append(c.name)
if c.parent is None:
break
else:
c = c.parent
# Exclude the root node's name
return ComponentRef.from_tuple(tuple(reversed(names[:-1])))
def _extend(self, other: 'Class') -> None:
for class_name in other.classes.keys():
if class_name in self.classes.keys():
self.classes[class_name]._extend(other.classes[class_name])
else:
self.classes[class_name] = other.classes[class_name]
@property
def root(self):
if self.parent is None:
return self
else:
return self.parent.root
def copy_including_children(self):
return copy.deepcopy(self)
def add_class(self, c: 'Class') -> None:
"""
Add a (sub)class to this class.
:param c: (Sub)class to add.
"""
self.classes[c.name] = c
c.parent = self
def remove_class(self, c: 'Class') -> None:
"""
Removes a (sub)class from this class.
:param c: (Sub)class to remove.
"""
del self.classes[c.name]
c.parent = None
def add_symbol(self, s: Symbol) -> None:
"""
Add a symbol to this class.
:param s: Symbol to add.
"""
self.symbols[s.name] = s
def remove_symbol(self, s: Symbol) -> None:
"""
Removes a symbol from this class.
:param s: Symbol to remove.
"""
del self.symbols[s.name]
def add_equation(self, e: Equation) -> None:
"""
Add an equation to this class.
:param e: Equation to add.
"""
self.equations.append(e)
def remove_equation(self, e: Equation) -> None:
"""
Removes an equation from this class.
:param e: Equation to remove.
"""
self.equations.remove(e)
def __deepcopy__(self, memo):
# Avoid copying the entire tree
if self.parent is not None and self.parent not in memo:
memo[id(self.parent)] = self.parent
_deepcp = self.__deepcopy__
self.__deepcopy__ = None
new = copy.deepcopy(self, memo)
self.__deepcopy__ = _deepcp
new.__deepcopy__ = _deepcp
return new
def __str__(self):
return '{} {}, Type "{}"'.format(type(self).__name__, self.name, self.type)
class InstanceClass(Class):
"""
Class used during instantiation/expansion of the model. Modififcations on
symbols and extends clauses are shifted to the modification environment of
this InstanceClass.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.modification_environment = ClassModification()
class Tree(Class):
"""
The root class.
"""
def extend(self, other: 'Tree') -> None:
self._extend(other)
self.update_parent_refs()
def _update_parent_refs(self, parent: Class) -> None:
for c in parent.classes.values():
c.parent = parent
self._update_parent_refs(c)
def update_parent_refs(self) -> None:
self._update_parent_refs(self)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class DeploymentsOperations(object):
"""DeploymentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def delete(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted.
Deleting a template deployment removes the associated deployment
operations. Deleting a template deployment does not affect the state of
the resource group. This is an asynchronous operation that returns a
status of 202 until the template deployment is successfully deleted.
The Location response header contains the URI that is used to obtain
the status of the process. While the process is running, a call to the
URI in the Location header returns a status of 202. When the process
finishes, the URI in the Location header returns a status of 204 on
success. If the asynchronous request failed, the URI in the Location
header returns an error-level status code.
:param resource_group_name: The name of the resource group with the
deployment to delete. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to delete.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def check_existence(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Checks whether the deployment exists.
:param resource_group_name: The name of the resource group with the
deployment to check. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to check.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = (response.status_code == 204)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, deployment_name, properties=None, custom_headers=None, raw=False, **operation_config):
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or
link to JSON files.
:param resource_group_name: The name of the resource group to deploy
the resources to. The name is case insensitive. The resource group
must already exist.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties: :class:`DeploymentProperties
<azure.mgmt.resource.resources.models.DeploymentProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<azure.mgmt.resource.resources.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Gets a deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to get.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentExtended
<azure.mgmt.resource.resources.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted
or Running. After the deployment is canceled, the provisioningState is
set to Canceled. Canceling a template deployment stops the currently
running template deployment and leaves the resource group partially
deployed.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to cancel.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def validate(
self, resource_group_name, deployment_name, properties=None, custom_headers=None, raw=False, **operation_config):
"""Validates whether the specified template is syntactically correct and
will be accepted by Azure Resource Manager..
:param resource_group_name: The name of the resource group the
template will be deployed to. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties: :class:`DeploymentProperties
<azure.mgmt.resource.resources.models.DeploymentProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentValidateResult
<azure.mgmt.resource.resources.models.DeploymentValidateResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 400]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def export_template(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Exports the template used for specified deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment from which to get
the template.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentExportResult
<azure.mgmt.resource.resources.models.DeploymentExportResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExportResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, resource_group_name, filter=None, top=None, custom_headers=None, raw=False, **operation_config):
"""Get all the deployments for a resource group.
:param resource_group_name: The name of the resource group with the
deployments to get. The name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation. For example, you
can use $filter=provisioningState eq '{state}'.
:type filter: str
:param top: The number of results to get. If null is passed, returns
all deployments.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeploymentExtendedPaged
<azure.mgmt.resource.resources.models.DeploymentExtendedPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
import io
import os
import unittest as ut
from unittest import mock
from bapsflib._hdf.maps import HDFMap
from ..file import File
from ..hdfoverview import HDFOverview
from . import TestBase, with_bf
class TestHDFOverview(TestBase):
"""
Test case for
:class:`~bapsflib._hdf.utils.hdfoverview.HDFOverview`.
"""
def setUp(self):
super().setUp()
# setup HDF5 file
self.f.add_module("SIS 3301") # digitizer
self.f.add_module("Waveform") # control
self.f.add_module("Discharge") # MSI diagnostic
self.f.create_group("Raw data + config/Unknown")
def tearDown(self):
super().tearDown()
@staticmethod
def create_overview(file):
return HDFOverview(file)
def test_not_file_obj(self):
"""Raise error if input is not bapsflib._hdf.utils.file.File"""
with self.assertRaises(ValueError):
self.create_overview(None)
@with_bf
def test_overview_basics(self, _bf: File):
_overview = self.create_overview(_bf)
# -- attribute existence ----
self.assertTrue(hasattr(_overview, "control_discovery"))
self.assertTrue(hasattr(_overview, "digitizer_discovery"))
self.assertTrue(hasattr(_overview, "msi_discovery"))
self.assertTrue(hasattr(_overview, "print"))
self.assertTrue(hasattr(_overview, "report_control_configs"))
self.assertTrue(hasattr(_overview, "report_controls"))
self.assertTrue(hasattr(_overview, "report_details"))
self.assertTrue(hasattr(_overview, "report_digitizer_configs"))
self.assertTrue(hasattr(_overview, "report_digitizers"))
self.assertTrue(hasattr(_overview, "report_discovery"))
self.assertTrue(hasattr(_overview, "report_general"))
self.assertTrue(hasattr(_overview, "report_msi"))
self.assertTrue(hasattr(_overview, "report_msi_configs"))
self.assertTrue(hasattr(_overview, "save"))
self.assertTrue(hasattr(_overview, "unknowns_discovery"))
@with_bf
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_discoveries(self, _bf: File, mock_stdout):
self.f.add_module("SIS crate")
_bf._map_file() # re-map file
_overview = self.create_overview(_bf)
# HDFOverview.control_discovery()
with mock.patch.object(
HDFMap,
"controls",
new_callable=mock.PropertyMock,
return_value=_bf.file_map.controls,
) as mock_dmap:
_overview.control_discovery()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# HDFOverview.digitizer_discovery()
with mock.patch.object(
HDFMap,
"digitizers",
new_callable=mock.PropertyMock,
return_value=_bf.file_map.digitizers,
) as mock_dmap:
_overview.digitizer_discovery()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# HDFOverview.msi_discovery()
with mock.patch.object(
HDFMap, "msi", new_callable=mock.PropertyMock, return_value=_bf.file_map.msi
) as mock_dmap:
_overview.msi_discovery()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# HDFOverview.unknowns_discovery()
with mock.patch.object(
HDFMap,
"unknowns",
new_callable=mock.PropertyMock,
return_value=_bf.file_map.unknowns,
) as mock_unknowns:
_overview.unknowns_discovery()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_unknowns.called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# HDFOverview.report_discovery()
with mock.patch.multiple(
HDFOverview,
control_discovery=mock.DEFAULT,
digitizer_discovery=mock.DEFAULT,
msi_discovery=mock.DEFAULT,
unknowns_discovery=mock.DEFAULT,
) as mock_values:
mock_values["control_discovery"].side_effect = _overview.control_discovery()
mock_values[
"digitizer_discovery"
].side_effect = _overview.digitizer_discovery()
mock_values["msi_discovery"].side_effect = _overview.msi_discovery()
mock_values["unknowns_discovery"].side_effect = _overview.unknowns_discovery()
_overview.report_discovery()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_values["control_discovery"].called)
self.assertTrue(mock_values["digitizer_discovery"].called)
self.assertTrue(mock_values["msi_discovery"].called)
self.assertTrue(mock_values["unknowns_discovery"].called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
@with_bf
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_report_controls(self, _bf: File, mock_stdout):
_overview = self.create_overview(_bf)
# HDFOverview.report_control_configs ----
control = _bf.controls["Waveform"]
# control has no configurations
with mock.patch.object(
control.__class__, "configs", new_callable=mock.PropertyMock, return_value={}
) as mock_configs:
_overview.report_control_configs(control)
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_configs.called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# control HAS configurations
configs = control.configs.copy()
mock_values = {}
with mock.patch.object(
control.__class__,
"configs",
new_callable=mock.PropertyMock,
return_value=configs,
) as mock_values["configs"]:
_overview.report_control_configs(control)
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_values["configs"].called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# HDFOverview.report_controls ----
with mock.patch.object(
HDFMap,
"controls",
new_callable=mock.PropertyMock,
return_value=_bf.file_map.controls,
) as mock_dmap, mock.patch.object(
HDFOverview,
"report_control_configs",
side_effect=_overview.report_control_configs,
) as mock_rcc:
# specify an existing control
_overview.report_controls("Waveform")
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
self.assertTrue(mock_rcc.called)
mock_dmap.reset_mock()
mock_rcc.reset_mock()
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# report all (aka specified control not in map dict)
_overview.report_controls()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
self.assertTrue(mock_rcc.called)
mock_dmap.reset_mock()
mock_rcc.reset_mock()
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
@with_bf
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_report_details(self, _bf: File, mock_stdout):
_overview = self.create_overview(_bf)
with mock.patch.multiple(
_overview.__class__,
report_controls=mock.DEFAULT,
report_digitizers=mock.DEFAULT,
report_msi=mock.DEFAULT,
) as mock_values:
_overview.report_details()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_values["report_digitizers"].called)
self.assertTrue(mock_values["report_controls"].called)
self.assertTrue(mock_values["report_msi"].called)
@with_bf
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_report_digitizers(self, _bf: File, mock_stdout):
self.f.add_module("SIS crate")
_bf._map_file() # re-map file
_overview = self.create_overview(_bf)
# HDFOverview.report_digitizer_configs ----
digi = _bf.digitizers["SIS 3301"]
# digitizer has no configurations
with mock.patch.object(
digi.__class__, "configs", new_callable=mock.PropertyMock, return_value={}
) as mock_configs:
_overview.report_digitizer_configs(digi)
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_configs.called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# digitizer HAS configurations
configs = digi.configs.copy()
active = digi.active_configs.copy()
mock_values = {}
with mock.patch.object(
digi.__class__,
"configs",
new_callable=mock.PropertyMock,
return_value=configs,
) as mock_values["configs"], mock.patch.object(
digi.__class__,
"active_configs",
new_callable=mock.PropertyMock,
return_value=active,
) as mock_values[
"active_configs"
]:
_overview.report_digitizer_configs(digi)
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_values["configs"].called)
self.assertTrue(mock_values["active_configs"].called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# HDFOverview.report_digitizers ----
with mock.patch.object(
HDFMap,
"digitizers",
new_callable=mock.PropertyMock,
return_value=_bf.file_map.digitizers,
) as mock_dmap, mock.patch.object(
HDFOverview,
"report_digitizer_configs",
side_effect=_overview.report_digitizer_configs,
) as mock_rdc:
# specify an existing digitizer
_overview.report_digitizers("SIS 3301")
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
self.assertTrue(mock_rdc.called)
mock_dmap.reset_mock()
mock_rdc.reset_mock()
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# report all (aka specified digitizer not in map dict)
_overview.report_digitizers()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
self.assertTrue(mock_rdc.called)
mock_dmap.reset_mock()
mock_rdc.reset_mock()
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
@with_bf
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_report_general(self, _bf: File, mock_stdout):
_overview = self.create_overview(_bf)
with mock.patch.object(
_bf.__class__, "info", new_callable=mock.PropertyMock, return_value=_bf.info
) as mock_info:
_overview.report_general()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_info.called)
@with_bf
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_report_msi(self, _bf: File, mock_stdout):
_overview = self.create_overview(_bf)
# HDFOverview.report_msi_configs ----
msi = _bf.msi["Discharge"]
# MSI has no configurations
with mock.patch.object(
msi.__class__, "configs", new_callable=mock.PropertyMock, return_value={}
) as mock_msi:
_overview.report_msi_configs(msi)
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_msi.called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# MSI HAS configurations
configs = msi.configs.copy()
mock_values = {}
with mock.patch.object(
msi.__class__, "configs", new_callable=mock.PropertyMock, return_value=configs
) as mock_values["configs"]:
_overview.report_msi_configs(msi)
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_values["configs"].called)
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# HDFOverview.report_msi ----
with mock.patch.object(
HDFMap, "msi", new_callable=mock.PropertyMock, return_value=_bf.file_map.msi
) as mock_dmap, mock.patch.object(
HDFOverview,
"report_msi_configs",
side_effect=_overview.report_control_configs,
) as mock_rmc:
# specify an existing control
_overview.report_msi("Discharge")
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
self.assertTrue(mock_rmc.called)
mock_dmap.reset_mock()
mock_rmc.reset_mock()
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
# report all (aka specified control not in map dict)
_overview.report_msi()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_dmap.called)
self.assertTrue(mock_rmc.called)
mock_dmap.reset_mock()
mock_rmc.reset_mock()
# "flush" StringIO
mock_stdout.truncate(0)
mock_stdout.seek(0)
self.assertEqual(mock_stdout.getvalue(), "")
@with_bf
@mock.patch("sys.stdout", new_callable=io.StringIO)
def test_print(self, _bf: File, mock_stdout):
_overview = self.create_overview(_bf)
with mock.patch.object(
_bf.__class__, "info", new_callable=mock.PropertyMock, return_value=_bf.info
) as mock_info, mock.patch.multiple(
_overview.__class__,
report_general=mock.DEFAULT,
report_discovery=mock.DEFAULT,
report_details=mock.DEFAULT,
) as mock_values:
_overview.print()
self.assertNotEqual(mock_stdout.getvalue(), "")
self.assertTrue(mock_info.called)
self.assertTrue(mock_values["report_general"].called)
self.assertTrue(mock_values["report_discovery"].called)
self.assertTrue(mock_values["report_details"].called)
@with_bf
@mock.patch("__main__.__builtins__.open", new_callable=mock.mock_open)
def test_save(self, _bf: File, mock_o):
_overview = self.create_overview(_bf)
with mock.patch.object(
_overview.__class__, "print", side_effect=_overview.print
) as mock_print:
# save overview file alongside HDF5 file
filename = os.path.splitext(_bf.filename)[0] + ".txt"
_overview.save()
self.assertTrue(mock_print.called)
self.assertEqual(mock_o.call_count, 1)
mock_o.assert_called_with(filename, "w")
# reset mocks
mock_o.reset_mock()
mock_print.reset_mock()
# specify `filename`
filename = "test.txt"
_overview.save(filename=filename)
self.assertTrue(mock_print.called)
self.assertEqual(mock_o.call_count, 1)
mock_o.assert_called_with(filename, "w")
if __name__ == "__main__":
ut.main()
|
|
import aiohttp
import asyncio
import socket
import unittest
from aiohttp import web
class TestWebSocketClientFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
if self.handler:
self.loop.run_until_complete(self.handler.finish_connections())
self.loop.close()
def find_unused_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
port = s.getsockname()[1]
s.close()
return port
@asyncio.coroutine
def create_server(self, method, path, handler):
app = web.Application(loop=self.loop)
app.router.add_route(method, path, handler)
port = self.find_unused_port()
self.handler = app.make_handler()
srv = yield from self.loop.create_server(
self.handler, '127.0.0.1', port)
url = "http://127.0.0.1:{}".format(port) + path
self.addCleanup(srv.close)
return app, srv, url
def test_send_recv_text(self):
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
msg = yield from ws.receive_str()
ws.send_str(msg+'/answer')
yield from ws.close()
return ws
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(url, loop=self.loop)
resp.send_str('ask')
msg = yield from resp.receive()
self.assertEqual(msg.data, 'ask/answer')
yield from resp.close()
self.loop.run_until_complete(go())
def test_send_recv_bytes(self):
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
msg = yield from ws.receive_bytes()
ws.send_bytes(msg+b'/answer')
yield from ws.close()
return ws
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(url, loop=self.loop)
resp.send_bytes(b'ask')
msg = yield from resp.receive()
self.assertEqual(msg.data, b'ask/answer')
yield from resp.close()
self.loop.run_until_complete(go())
def test_ping_pong(self):
closed = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
msg = yield from ws.receive_bytes()
ws.ping()
ws.send_bytes(msg+b'/answer')
try:
yield from ws.close()
finally:
closed.set_result(1)
return ws
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(url, loop=self.loop)
resp.ping()
resp.send_bytes(b'ask')
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.binary)
self.assertEqual(msg.data, b'ask/answer')
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.close)
yield from resp.close()
yield from closed
self.loop.run_until_complete(go())
def test_ping_pong_manual(self):
closed = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
msg = yield from ws.receive_bytes()
ws.ping()
ws.send_bytes(msg+b'/answer')
try:
yield from ws.close()
finally:
closed.set_result(1)
return ws
@asyncio.coroutine
def go():
_, srv, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(
url, autoping=False, loop=self.loop)
resp.ping()
resp.send_bytes(b'ask')
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.pong)
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.ping)
resp.pong()
msg = yield from resp.receive()
self.assertEqual(msg.data, b'ask/answer')
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.close)
yield from closed
self.loop.run_until_complete(go())
def test_close(self):
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
yield from ws.receive_bytes()
ws.send_str('test')
yield from ws.receive()
return ws
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(url, loop=self.loop)
resp.send_bytes(b'ask')
closed = yield from resp.close()
self.assertTrue(closed)
self.assertTrue(resp.closed)
self.assertEqual(resp.close_code, 1000)
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.closed)
self.loop.run_until_complete(go())
def test_close_from_server(self):
closed = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
try:
yield from ws.receive_bytes()
yield from ws.close()
finally:
closed.set_result(1)
return ws
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(url, loop=self.loop)
resp.send_bytes(b'ask')
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.close)
self.assertTrue(resp.closed)
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.closed)
yield from closed
self.loop.run_until_complete(go())
def test_close_manual(self):
closed = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
yield from ws.receive_bytes()
ws.send_str('test')
try:
yield from ws.close()
finally:
closed.set_result(1)
return ws
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(
url, autoclose=False, loop=self.loop)
resp.send_bytes(b'ask')
msg = yield from resp.receive()
self.assertEqual(msg.data, 'test')
msg = yield from resp.receive()
self.assertEqual(msg.tp, aiohttp.MsgType.close)
self.assertEqual(msg.data, 1000)
self.assertEqual(msg.extra, '')
self.assertFalse(resp.closed)
yield from resp.close()
yield from closed
self.assertTrue(resp.closed)
self.loop.run_until_complete(go())
def test_close_timeout(self):
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
yield from ws.receive_bytes()
ws.send_str('test')
yield from asyncio.sleep(10, loop=self.loop)
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(
url, timeout=0.2, autoclose=False, loop=self.loop)
resp.send_bytes(b'ask')
msg = yield from resp.receive()
self.assertEqual(msg.data, 'test')
self.assertEqual(msg.tp, aiohttp.MsgType.text)
msg = yield from resp.close()
self.assertTrue(resp.closed)
self.assertIsInstance(resp.exception(), asyncio.TimeoutError)
self.loop.run_until_complete(go())
def test_close_cancel(self):
@asyncio.coroutine
def handler(request):
ws = web.WebSocketResponse()
ws.start(request)
yield from ws.receive_bytes()
ws.send_str('test')
yield from asyncio.sleep(10, loop=self.loop)
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from aiohttp.ws_connect(
url, autoclose=False, loop=self.loop)
resp.send_bytes(b'ask')
text = yield from resp.receive()
self.assertEqual(text.data, 'test')
t = asyncio.async(resp.close(), loop=self.loop)
yield from asyncio.sleep(0.1, loop=self.loop)
t.cancel()
yield from asyncio.sleep(0.1, loop=self.loop)
self.assertTrue(resp.closed)
self.assertIsNone(resp.exception())
self.loop.run_until_complete(go())
|
|
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Server-side implementation of gRPC Asyncio Python."""
from concurrent.futures import Executor
from typing import Any, Optional, Sequence
import grpc
from grpc import _common
from grpc import _compression
from grpc._cython import cygrpc
from . import _base_server
from ._interceptor import ServerInterceptor
from ._typing import ChannelArgumentType
def _augment_channel_arguments(base_options: ChannelArgumentType,
compression: Optional[grpc.Compression]):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option
class Server(_base_server.Server):
"""Serves RPCs."""
def __init__(self, thread_pool: Optional[Executor],
generic_handlers: Optional[Sequence[grpc.GenericRpcHandler]],
interceptors: Optional[Sequence[Any]],
options: ChannelArgumentType,
maximum_concurrent_rpcs: Optional[int],
compression: Optional[grpc.Compression]):
self._loop = cygrpc.get_working_loop()
if interceptors:
invalid_interceptors = [
interceptor for interceptor in interceptors
if not isinstance(interceptor, ServerInterceptor)
]
if invalid_interceptors:
raise ValueError(
'Interceptor must be ServerInterceptor, the '
f'following are invalid: {invalid_interceptors}')
self._server = cygrpc.AioServer(
self._loop, thread_pool, generic_handlers, interceptors,
_augment_channel_arguments(options, compression),
maximum_concurrent_rpcs)
def add_generic_rpc_handlers(
self,
generic_rpc_handlers: Sequence[grpc.GenericRpcHandler]) -> None:
"""Registers GenericRpcHandlers with this Server.
This method is only safe to call before the server is started.
Args:
generic_rpc_handlers: A sequence of GenericRpcHandlers that will be
used to service RPCs.
"""
self._server.add_generic_rpc_handlers(generic_rpc_handlers)
def add_insecure_port(self, address: str) -> int:
"""Opens an insecure port for accepting RPCs.
This method may only be called before starting the server.
Args:
address: The address for which to open a port. If the port is 0,
or not specified in the address, then the gRPC runtime will choose a port.
Returns:
An integer port on which the server will accept RPC requests.
"""
return _common.validate_port_binding_result(
address, self._server.add_insecure_port(_common.encode(address)))
def add_secure_port(self, address: str,
server_credentials: grpc.ServerCredentials) -> int:
"""Opens a secure port for accepting RPCs.
This method may only be called before starting the server.
Args:
address: The address for which to open a port.
if the port is 0, or not specified in the address, then the gRPC
runtime will choose a port.
server_credentials: A ServerCredentials object.
Returns:
An integer port on which the server will accept RPC requests.
"""
return _common.validate_port_binding_result(
address,
self._server.add_secure_port(_common.encode(address),
server_credentials))
async def start(self) -> None:
"""Starts this Server.
This method may only be called once. (i.e. it is not idempotent).
"""
await self._server.start()
async def stop(self, grace: Optional[float]) -> None:
"""Stops this Server.
This method immediately stops the server from servicing new RPCs in
all cases.
If a grace period is specified, this method returns immediately and all
RPCs active at the end of the grace period are aborted. If a grace
period is not specified (by passing None for grace), all existing RPCs
are aborted immediately and this method blocks until the last RPC
handler terminates.
This method is idempotent and may be called at any time. Passing a
smaller grace value in a subsequent call will have the effect of
stopping the Server sooner (passing None will have the effect of
stopping the server immediately). Passing a larger grace value in a
subsequent call will not have the effect of stopping the server later
(i.e. the most restrictive grace value is used).
Args:
grace: A duration of time in seconds or None.
"""
await self._server.shutdown(grace)
async def wait_for_termination(self,
timeout: Optional[float] = None) -> bool:
"""Block current coroutine until the server stops.
This is an EXPERIMENTAL API.
The wait will not consume computational resources during blocking, and
it will block until one of the two following conditions are met:
1) The server is stopped or terminated;
2) A timeout occurs if timeout is not `None`.
The timeout argument works in the same way as `threading.Event.wait()`.
https://docs.python.org/3/library/threading.html#threading.Event.wait
Args:
timeout: A floating point number specifying a timeout for the
operation in seconds.
Returns:
A bool indicates if the operation times out.
"""
return await self._server.wait_for_termination(timeout)
def __del__(self):
"""Schedules a graceful shutdown in current event loop.
The Cython AioServer doesn't hold a ref-count to this class. It should
be safe to slightly extend the underlying Cython object's life span.
"""
if hasattr(self, '_server'):
if self._server.is_running():
cygrpc.schedule_coro_threadsafe(
self._server.shutdown(None),
self._loop,
)
def server(migration_thread_pool: Optional[Executor] = None,
handlers: Optional[Sequence[grpc.GenericRpcHandler]] = None,
interceptors: Optional[Sequence[Any]] = None,
options: Optional[ChannelArgumentType] = None,
maximum_concurrent_rpcs: Optional[int] = None,
compression: Optional[grpc.Compression] = None):
"""Creates a Server with which RPCs can be serviced.
Args:
migration_thread_pool: A futures.ThreadPoolExecutor to be used by the
Server to execute non-AsyncIO RPC handlers for migration purpose.
handlers: An optional list of GenericRpcHandlers used for executing RPCs.
More handlers may be added by calling add_generic_rpc_handlers any time
before the server is started.
interceptors: An optional list of ServerInterceptor objects that observe
and optionally manipulate the incoming RPCs before handing them over to
handlers. The interceptors are given control in the order they are
specified. This is an EXPERIMENTAL API.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC runtime)
to configure the channel.
maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
will service before returning RESOURCE_EXHAUSTED status, or None to
indicate no limit.
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This compression algorithm will be used for the
lifetime of the server unless overridden by set_compression. This is an
EXPERIMENTAL option.
Returns:
A Server object.
"""
return Server(migration_thread_pool, () if handlers is None else handlers,
() if interceptors is None else interceptors,
() if options is None else options, maximum_concurrent_rpcs,
compression)
|
|
"""
Support for KNX/IP covers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.knx/
"""
import voluptuous as vol
from homeassistant.components.cover import (
ATTR_POSITION, ATTR_TILT_POSITION, PLATFORM_SCHEMA, SUPPORT_CLOSE,
SUPPORT_OPEN, SUPPORT_SET_POSITION, SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP, CoverDevice)
from homeassistant.components.knx import ATTR_DISCOVER_DEVICES, DATA_KNX
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_utc_time_change
CONF_MOVE_LONG_ADDRESS = 'move_long_address'
CONF_MOVE_SHORT_ADDRESS = 'move_short_address'
CONF_POSITION_ADDRESS = 'position_address'
CONF_POSITION_STATE_ADDRESS = 'position_state_address'
CONF_ANGLE_ADDRESS = 'angle_address'
CONF_ANGLE_STATE_ADDRESS = 'angle_state_address'
CONF_TRAVELLING_TIME_DOWN = 'travelling_time_down'
CONF_TRAVELLING_TIME_UP = 'travelling_time_up'
CONF_INVERT_POSITION = 'invert_position'
CONF_INVERT_ANGLE = 'invert_angle'
DEFAULT_TRAVEL_TIME = 25
DEFAULT_NAME = 'KNX Cover'
DEPENDENCIES = ['knx']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MOVE_LONG_ADDRESS): cv.string,
vol.Optional(CONF_MOVE_SHORT_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_STATE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TRAVELLING_TIME_DOWN, default=DEFAULT_TRAVEL_TIME):
cv.positive_int,
vol.Optional(CONF_TRAVELLING_TIME_UP, default=DEFAULT_TRAVEL_TIME):
cv.positive_int,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
vol.Optional(CONF_INVERT_ANGLE, default=False): cv.boolean,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up cover(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up covers for KNX platform configured via xknx.yaml."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXCover(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up cover for KNX platform configured within platform."""
import xknx
cover = xknx.devices.Cover(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_long=config.get(CONF_MOVE_LONG_ADDRESS),
group_address_short=config.get(CONF_MOVE_SHORT_ADDRESS),
group_address_position_state=config.get(
CONF_POSITION_STATE_ADDRESS),
group_address_angle=config.get(CONF_ANGLE_ADDRESS),
group_address_angle_state=config.get(CONF_ANGLE_STATE_ADDRESS),
group_address_position=config.get(CONF_POSITION_ADDRESS),
travel_time_down=config.get(CONF_TRAVELLING_TIME_DOWN),
travel_time_up=config.get(CONF_TRAVELLING_TIME_UP),
invert_position=config.get(CONF_INVERT_POSITION),
invert_angle=config.get(CONF_INVERT_ANGLE))
hass.data[DATA_KNX].xknx.devices.add(cover)
async_add_entities([KNXCover(cover)])
class KNXCover(CoverDevice):
"""Representation of a KNX cover."""
def __init__(self, device):
"""Initialize the cover."""
self.device = device
self._unsubscribe_auto_updater = None
@callback
def async_register_callbacks(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
async def async_added_to_hass(self):
"""Store register state change callback."""
self.async_register_callbacks()
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | \
SUPPORT_SET_POSITION | SUPPORT_STOP
if self.device.supports_angle:
supported_features |= SUPPORT_SET_TILT_POSITION
return supported_features
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self.device.current_position()
@property
def is_closed(self):
"""Return if the cover is closed."""
return self.device.is_closed()
async def async_close_cover(self, **kwargs):
"""Close the cover."""
if not self.device.is_closed():
await self.device.set_down()
self.start_auto_updater()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
if not self.device.is_open():
await self.device.set_up()
self.start_auto_updater()
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
if ATTR_POSITION in kwargs:
position = kwargs[ATTR_POSITION]
await self.device.set_position(position)
self.start_auto_updater()
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
await self.device.stop()
self.stop_auto_updater()
@property
def current_cover_tilt_position(self):
"""Return current tilt position of cover."""
if not self.device.supports_angle:
return None
return self.device.current_angle()
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
if ATTR_TILT_POSITION in kwargs:
tilt_position = kwargs[ATTR_TILT_POSITION]
await self.device.set_angle(tilt_position)
def start_auto_updater(self):
"""Start the autoupdater to update HASS while cover is moving."""
if self._unsubscribe_auto_updater is None:
self._unsubscribe_auto_updater = async_track_utc_time_change(
self.hass, self.auto_updater_hook)
def stop_auto_updater(self):
"""Stop the autoupdater."""
if self._unsubscribe_auto_updater is not None:
self._unsubscribe_auto_updater()
self._unsubscribe_auto_updater = None
@callback
def auto_updater_hook(self, now):
"""Call for the autoupdater."""
self.async_schedule_update_ha_state()
if self.device.position_reached():
self.stop_auto_updater()
self.hass.add_job(self.device.auto_stop_if_necessary())
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"mobileNetworkName": _SERIALIZER.url("mobile_network_name", mobile_network_name, 'str', max_length=64, min_length=0, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"mobileNetworkName": _SERIALIZER.url("mobile_network_name", mobile_network_name, 'str', max_length=64, min_length=0, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
mobile_network_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"mobileNetworkName": _SERIALIZER.url("mobile_network_name", mobile_network_name, 'str', max_length=64, min_length=0, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_tags_request(
subscription_id: str,
resource_group_name: str,
mobile_network_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"mobileNetworkName": _SERIALIZER.url("mobile_network_name", mobile_network_name, 'str', max_length=64, min_length=0, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_by_subscription_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.MobileNetwork/mobileNetworks')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_sim_ids_request_initial(
subscription_id: str,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}/listSimIds')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"mobileNetworkName": _SERIALIZER.url("mobile_network_name", mobile_network_name, 'str', max_length=64, min_length=0, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class MobileNetworksOperations(object):
"""MobileNetworksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~mobile_network_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes the specified mobile network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param mobile_network_name: The name of the mobile network.
:type mobile_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> "_models.MobileNetwork":
"""Gets information about the specified mobile network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param mobile_network_name: The name of the mobile network.
:type mobile_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MobileNetwork, or the result of cls(response)
:rtype: ~mobile_network_management_client.models.MobileNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MobileNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MobileNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
mobile_network_name: str,
parameters: "_models.MobileNetwork",
**kwargs: Any
) -> "_models.MobileNetwork":
cls = kwargs.pop('cls', None) # type: ClsType["_models.MobileNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'MobileNetwork')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MobileNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('MobileNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
mobile_network_name: str,
parameters: "_models.MobileNetwork",
**kwargs: Any
) -> LROPoller["_models.MobileNetwork"]:
"""Creates or updates a mobile network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param mobile_network_name: The name of the mobile network.
:type mobile_network_name: str
:param parameters: Parameters supplied to the create or update mobile network operation.
:type parameters: ~mobile_network_management_client.models.MobileNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either MobileNetwork or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~mobile_network_management_client.models.MobileNetwork]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MobileNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('MobileNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}'} # type: ignore
@distributed_trace
def update_tags(
self,
resource_group_name: str,
mobile_network_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.MobileNetwork":
"""Updates a mobile network update tags.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param mobile_network_name: The name of the mobile network.
:type mobile_network_name: str
:param parameters: Parameters supplied to update mobile network tags.
:type parameters: ~mobile_network_management_client.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MobileNetwork, or the result of cls(response)
:rtype: ~mobile_network_management_client.models.MobileNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MobileNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
content_type=content_type,
json=_json,
template_url=self.update_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MobileNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> Iterable["_models.MobileNetworkListResult"]:
"""Lists all the mobile networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MobileNetworkListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~mobile_network_management_client.models.MobileNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MobileNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MobileNetworkListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.MobileNetwork/mobileNetworks'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.MobileNetworkListResult"]:
"""Lists all the mobile networks in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MobileNetworkListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~mobile_network_management_client.models.MobileNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MobileNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MobileNetworkListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks'} # type: ignore
def _list_sim_ids_initial(
self,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> "_models.SimIdListResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SimIdListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_sim_ids_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
template_url=self._list_sim_ids_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SimIdListResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SimIdListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_sim_ids_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}/listSimIds'} # type: ignore
@distributed_trace
def begin_list_sim_ids(
self,
resource_group_name: str,
mobile_network_name: str,
**kwargs: Any
) -> LROPoller["_models.SimIdListResult"]:
"""List sim ids under a mobile network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param mobile_network_name: The name of the mobile network.
:type mobile_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SimIdListResult or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~mobile_network_management_client.models.SimIdListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SimIdListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_sim_ids_initial(
resource_group_name=resource_group_name,
mobile_network_name=mobile_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SimIdListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_sim_ids.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}/listSimIds'} # type: ignore
|
|
# -*- test-case-name: automat._test.test_methodical -*-
import collections
from functools import wraps
from itertools import count
try:
# Python 3
from inspect import getfullargspec as getArgsSpec
except ImportError:
# Python 2
from inspect import getargspec as getArgsSpec
import attr
import six
from ._core import Transitioner, Automaton
from ._introspection import preserveName
ArgSpec = collections.namedtuple('ArgSpec', ['args', 'varargs', 'varkw',
'defaults', 'kwonlyargs',
'kwonlydefaults', 'annotations'])
def _getArgSpec(func):
"""
Normalize inspect.ArgSpec across python versions
and convert mutable attributes to immutable types.
:param Callable func: A function.
:return: The function's ArgSpec.
:rtype: ArgSpec
"""
spec = getArgsSpec(func)
return ArgSpec(
args=tuple(spec.args),
varargs=spec.varargs,
varkw=spec.varkw if six.PY3 else spec.keywords,
defaults=spec.defaults if spec.defaults else (),
kwonlyargs=tuple(spec.kwonlyargs) if six.PY3 else (),
kwonlydefaults=(
tuple(spec.kwonlydefaults.items())
if spec.kwonlydefaults else ()
) if six.PY3 else (),
annotations=tuple(spec.annotations.items()) if six.PY3 else (),
)
def _getArgNames(spec):
"""
Get the name of all arguments defined in a function signature.
The name of * and ** arguments is normalized to "*args" and "**kwargs".
:param ArgSpec spec: A function to interrogate for a signature.
:return: The set of all argument names in `func`s signature.
:rtype: Set[str]
"""
return set(
spec.args
+ spec.kwonlyargs
+ (('*args',) if spec.varargs else ())
+ (('**kwargs',) if spec.varkw else ())
+ spec.annotations
)
def _keywords_only(f):
"""
Decorate a function so all its arguments must be passed by keyword.
A useful utility for decorators that take arguments so that they don't
accidentally get passed the thing they're decorating as their first
argument.
Only works for methods right now.
"""
@wraps(f)
def g(self, **kw):
return f(self, **kw)
return g
@attr.s(frozen=True)
class MethodicalState(object):
"""
A state for a L{MethodicalMachine}.
"""
machine = attr.ib(repr=False)
method = attr.ib()
serialized = attr.ib(repr=False)
def upon(self, input, enter, outputs, collector=list):
"""
Declare a state transition within the :class:`automat.MethodicalMachine`
associated with this :class:`automat.MethodicalState`:
upon the receipt of the `input`, enter the `state`,
emitting each output in `outputs`.
:param MethodicalInput input: The input triggering a state transition.
:param MethodicalState enter: The resulting state.
:param Iterable[MethodicalOutput] outputs: The outputs to be triggered
as a result of the declared state transition.
:param Callable collector: The function to be used when collecting
output return values.
:raises TypeError: if any of the `outputs` signatures do not match
the `inputs` signature.
:raises ValueError: if the state transition from `self` via `input`
has already been defined.
"""
inputArgs = _getArgNames(input.argSpec)
for output in outputs:
outputArgs = _getArgNames(output.argSpec)
if not outputArgs.issubset(inputArgs):
raise TypeError(
"method {input} signature {inputSignature} "
"does not match output {output} "
"signature {outputSignature}".format(
input=input.method.__name__,
output=output.method.__name__,
inputSignature=getArgsSpec(input.method),
outputSignature=getArgsSpec(output.method),
))
self.machine._oneTransition(self, input, enter, outputs, collector)
def _name(self):
return self.method.__name__
def _transitionerFromInstance(oself, symbol, automaton):
"""
Get a L{Transitioner}
"""
transitioner = getattr(oself, symbol, None)
if transitioner is None:
transitioner = Transitioner(
automaton,
automaton.initialState,
)
setattr(oself, symbol, transitioner)
return transitioner
def _empty():
pass
def _docstring():
"""docstring"""
def assertNoCode(inst, attribute, f):
# The function body must be empty, i.e. "pass" or "return None", which
# both yield the same bytecode: LOAD_CONST (None), RETURN_VALUE. We also
# accept functions with only a docstring, which yields slightly different
# bytecode, because the "None" is put in a different constant slot.
# Unfortunately, this does not catch function bodies that return a
# constant value, e.g. "return 1", because their code is identical to a
# "return None". They differ in the contents of their constant table, but
# checking that would require us to parse the bytecode, find the index
# being returned, then making sure the table has a None at that index.
if f.__code__.co_code not in (_empty.__code__.co_code,
_docstring.__code__.co_code):
raise ValueError("function body must be empty")
def _filterArgs(args, kwargs, inputSpec, outputSpec):
"""
Filter out arguments that were passed to input that output won't accept.
:param tuple args: The *args that input received.
:param dict kwargs: The **kwargs that input received.
:param ArgSpec inputSpec: The input's arg spec.
:param ArgSpec outputSpec: The output's arg spec.
:return: The args and kwargs that output will accept.
:rtype: Tuple[tuple, dict]
"""
named_args = tuple(zip(inputSpec.args[1:], args))
if outputSpec.varargs:
# Only return all args if the output accepts *args.
return_args = args
else:
# Filter out arguments that don't appear
# in the output's method signature.
return_args = [v for n, v in named_args if n in outputSpec.args]
# Get any of input's default arguments that were not passed.
passed_arg_names = tuple(kwargs)
for name, value in named_args:
passed_arg_names += (name, value)
defaults = zip(inputSpec.args[::-1], inputSpec.defaults[::-1])
full_kwargs = {n: v for n, v in defaults if n not in passed_arg_names}
full_kwargs.update(kwargs)
if outputSpec.varkw:
# Only pass all kwargs if the output method accepts **kwargs.
return_kwargs = full_kwargs
else:
# Filter out names that the output method does not accept.
all_accepted_names = outputSpec.args[1:] + outputSpec.kwonlyargs
return_kwargs = {n: v for n, v in full_kwargs.items()
if n in all_accepted_names}
return return_args, return_kwargs
@attr.s(eq=False, hash=False)
class MethodicalInput(object):
"""
An input for a L{MethodicalMachine}.
"""
automaton = attr.ib(repr=False)
method = attr.ib(validator=assertNoCode)
symbol = attr.ib(repr=False)
collectors = attr.ib(default=attr.Factory(dict), repr=False)
argSpec = attr.ib(init=False, repr=False)
@argSpec.default
def _buildArgSpec(self):
return _getArgSpec(self.method)
def __get__(self, oself, type=None):
"""
Return a function that takes no arguments and returns values returned
by output functions produced by the given L{MethodicalInput} in
C{oself}'s current state.
"""
transitioner = _transitionerFromInstance(oself, self.symbol,
self.automaton)
@preserveName(self.method)
@wraps(self.method)
def doInput(*args, **kwargs):
self.method(oself, *args, **kwargs)
previousState = transitioner._state
(outputs, outTracer) = transitioner.transition(self)
collector = self.collectors[previousState]
values = []
for output in outputs:
if outTracer:
outTracer(output._name())
a, k = _filterArgs(args, kwargs, self.argSpec, output.argSpec)
value = output(oself, *a, **k)
values.append(value)
return collector(values)
return doInput
def _name(self):
return self.method.__name__
@attr.s(frozen=True)
class MethodicalOutput(object):
"""
An output for a L{MethodicalMachine}.
"""
machine = attr.ib(repr=False)
method = attr.ib()
argSpec = attr.ib(init=False, repr=False)
@argSpec.default
def _buildArgSpec(self):
return _getArgSpec(self.method)
def __get__(self, oself, type=None):
"""
Outputs are private, so raise an exception when we attempt to get one.
"""
raise AttributeError(
"{cls}.{method} is a state-machine output method; "
"to produce this output, call an input method instead.".format(
cls=type.__name__,
method=self.method.__name__
)
)
def __call__(self, oself, *args, **kwargs):
"""
Call the underlying method.
"""
return self.method(oself, *args, **kwargs)
def _name(self):
return self.method.__name__
@attr.s(eq=False, hash=False)
class MethodicalTracer(object):
automaton = attr.ib(repr=False)
symbol = attr.ib(repr=False)
def __get__(self, oself, type=None):
transitioner = _transitionerFromInstance(oself, self.symbol,
self.automaton)
def setTrace(tracer):
transitioner.setTrace(tracer)
return setTrace
counter = count()
def gensym():
"""
Create a unique Python identifier.
"""
return "_symbol_" + str(next(counter))
class MethodicalMachine(object):
"""
A :class:`MethodicalMachine` is an interface to an `Automaton`
that uses methods on a class.
"""
def __init__(self):
self._automaton = Automaton()
self._reducers = {}
self._symbol = gensym()
def __get__(self, oself, type=None):
"""
L{MethodicalMachine} is an implementation detail for setting up
class-level state; applications should never need to access it on an
instance.
"""
if oself is not None:
raise AttributeError(
"MethodicalMachine is an implementation detail.")
return self
@_keywords_only
def state(self, initial=False, terminal=False,
serialized=None):
"""
Declare a state, possibly an initial state or a terminal state.
This is a decorator for methods, but it will modify the method so as
not to be callable any more.
:param bool initial: is this state the initial state?
Only one state on this :class:`automat.MethodicalMachine`
may be an initial state; more than one is an error.
:param bool terminal: Is this state a terminal state?
i.e. a state that the machine can end up in?
(This is purely informational at this point.)
:param Hashable serialized: a serializable value
to be used to represent this state to external systems.
This value should be hashable;
:py:func:`unicode` is a good type to use.
"""
def decorator(stateMethod):
state = MethodicalState(machine=self,
method=stateMethod,
serialized=serialized)
if initial:
self._automaton.initialState = state
return state
return decorator
@_keywords_only
def input(self):
"""
Declare an input.
This is a decorator for methods.
"""
def decorator(inputMethod):
return MethodicalInput(automaton=self._automaton,
method=inputMethod,
symbol=self._symbol)
return decorator
@_keywords_only
def output(self):
"""
Declare an output.
This is a decorator for methods.
This method will be called when the state machine transitions to this
state as specified in the decorated `output` method.
"""
def decorator(outputMethod):
return MethodicalOutput(machine=self, method=outputMethod)
return decorator
def _oneTransition(self, startState, inputToken, endState, outputTokens,
collector):
"""
See L{MethodicalState.upon}.
"""
# FIXME: tests for all of this (some of it is wrong)
# if not isinstance(startState, MethodicalState):
# raise NotImplementedError("start state {} isn't a state"
# .format(startState))
# if not isinstance(inputToken, MethodicalInput):
# raise NotImplementedError("start state {} isn't an input"
# .format(inputToken))
# if not isinstance(endState, MethodicalState):
# raise NotImplementedError("end state {} isn't a state"
# .format(startState))
# for output in outputTokens:
# if not isinstance(endState, MethodicalState):
# raise NotImplementedError("output state {} isn't a state"
# .format(endState))
self._automaton.addTransition(startState, inputToken, endState,
tuple(outputTokens))
inputToken.collectors[startState] = collector
@_keywords_only
def serializer(self):
"""
"""
def decorator(decoratee):
@wraps(decoratee)
def serialize(oself):
transitioner = _transitionerFromInstance(oself, self._symbol,
self._automaton)
return decoratee(oself, transitioner._state.serialized)
return serialize
return decorator
@_keywords_only
def unserializer(self):
"""
"""
def decorator(decoratee):
@wraps(decoratee)
def unserialize(oself, *args, **kwargs):
state = decoratee(oself, *args, **kwargs)
mapping = {}
for eachState in self._automaton.states():
mapping[eachState.serialized] = eachState
transitioner = _transitionerFromInstance(
oself, self._symbol, self._automaton)
transitioner._state = mapping[state]
return None # it's on purpose
return unserialize
return decorator
@property
def _setTrace(self):
return MethodicalTracer(self._automaton, self._symbol)
def asDigraph(self):
"""
Generate a L{graphviz.Digraph} that represents this machine's
states and transitions.
@return: L{graphviz.Digraph} object; for more information, please
see the documentation for
U{graphviz<https://graphviz.readthedocs.io/>}
"""
from ._visualize import makeDigraph
return makeDigraph(
self._automaton,
stateAsString=lambda state: state.method.__name__,
inputAsString=lambda input: input.method.__name__,
outputAsString=lambda output: output.method.__name__,
)
|
|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Multisize sliding window workload estimation functions.
"""
from contracts import contract
from neat.contracts_primitive import *
from neat.contracts_extra import *
from itertools import islice
from collections import deque
import logging
log = logging.getLogger(__name__)
@contract
def mean(data, window_size):
""" Get the data mean according to the window size.
:param data: A list of values.
:type data: list(number)
:param window_size: A window size.
:type window_size: int,>0
:return: The mean value.
:rtype: float
"""
return float(sum(data)) / window_size
@contract
def variance(data, window_size):
""" Get the data variance according to the window size.
:param data: A list of values.
:type data: list(number)
:param window_size: A window size.
:type window_size: int,>0
:return: The variance value.
:rtype: float
"""
m = mean(data, window_size)
return float(sum((x - m) ** 2 for x in data)) / (window_size - 1)
@contract
def acceptable_variance(probability, window_size):
""" Get the acceptable variance.
:param probability: The probability to use.
:type probability: number,>=0,<=1
:param window_size: A window size.
:type window_size: int,>0
:return: The acceptable variance.
:rtype: float
"""
return float(probability * (1 - probability)) / window_size
@contract
def estimate_probability(data, window_size, state):
""" Get the estimated probability.
:param data: A list of data values.
:type data: list(number)
:param window_size: The window size.
:type window_size: int,>0
:param state: The current state.
:type state: int,>=0
:return: The estimated probability.
:rtype: float,>=0
"""
return float(data.count(state)) / window_size
@contract
def update_request_windows(request_windows, previous_state, current_state):
""" Update and return the updated request windows.
:param request_windows: The previous request windows.
:type request_windows: list(deque)
:param previous_state: The previous state.
:type previous_state: int,>=0
:param current_state: The current state.
:type current_state: int,>=0
:return: The updated request windows.
:rtype: list(deque)
"""
request_windows[previous_state].append(current_state)
return request_windows
@contract
def update_estimate_windows(estimate_windows, request_windows,
previous_state):
""" Update and return the updated estimate windows.
:param estimate_windows: The previous estimate windows.
:type estimate_windows: list(list(dict))
:param request_windows: The current request windows.
:type request_windows: list(deque)
:param previous_state: The previous state.
:type previous_state: int,>=0
:return: The updated estimate windows.
:rtype: list(list(dict))
"""
request_window = request_windows[previous_state]
state_estimate_windows = estimate_windows[previous_state]
for state, estimate_window in enumerate(state_estimate_windows):
for window_size, estimates in estimate_window.items():
slice_from = len(request_window) - window_size
if slice_from < 0:
slice_from = 0
estimates.append(
estimate_probability(
list(islice(request_window, slice_from, None)),
window_size, state))
return estimate_windows
@contract
def update_variances(variances, estimate_windows, previous_state):
""" Updated and return the updated variances.
:param variances: The previous variances.
:type variances: list(list(dict))
:param estimate_windows: The current estimate windows.
:type estimate_windows: list(list(dict))
:param previous_state: The previous state.
:type previous_state: int,>=0
:return: The updated variances.
:rtype: list(list(dict))
"""
estimate_window = estimate_windows[previous_state]
for state, variance_map in enumerate(variances[previous_state]):
for window_size in variance_map:
estimates = estimate_window[state][window_size]
if len(estimates) < window_size:
variance_map[window_size] = 1.0
else:
variance_map[window_size] = variance(
list(estimates), window_size)
return variances
@contract
def update_acceptable_variances(acceptable_variances, estimate_windows, previous_state):
""" Update and return the updated acceptable variances.
:param acceptable_variances: The previous acceptable variances.
:type acceptable_variances: list(list(dict))
:param estimate_windows: The current estimate windows.
:type estimate_windows: list(list(dict))
:param previous_state: The previous state.
:type previous_state: int,>=0
:return: The updated acceptable variances.
:rtype: list(list(dict))
"""
estimate_window = estimate_windows[previous_state]
state_acc_variances = acceptable_variances[previous_state]
for state, acceptable_variance_map in enumerate(state_acc_variances):
for window_size in acceptable_variance_map:
estimates = estimate_window[state][window_size]
acceptable_variance_map[window_size] = acceptable_variance(
estimates[-1], window_size)
return acceptable_variances
@contract
def select_window(variances, acceptable_variances, window_sizes):
""" Select window sizes according to the acceptable variances.
:param variances: The variances.
:type variances: list(list(dict))
:param acceptable_variances: The acceptable variances.
:type acceptable_variances: list(list(dict))
:param window_sizes: The available window sizes.
:type window_sizes: list(int)
:return: The selected window sizes.
:rtype: list(list(int))
"""
n = len(variances)
selected_windows = []
for i in range(n):
selected_windows.append([])
for j in range(n):
selected_size = window_sizes[0]
for window_size in window_sizes:
if variances[i][j][window_size] > \
acceptable_variances[i][j][window_size]:
break
selected_size = window_size
selected_windows[i].append(selected_size)
return selected_windows
@contract
def select_best_estimates(estimate_windows, selected_windows):
""" Select the best estimates according to the selected windows.
:param estimate_windows: The estimate windows.
:type estimate_windows: list(list(dict))
:param selected_windows: The selected window sizes.
:type selected_windows: list(list(int))
:return: The selected best estimates.
:rtype: list(list(number))
"""
n = len(estimate_windows)
selected_estimates = []
for i in range(n):
selected_estimates.append([])
for j in range(n):
estimates = estimate_windows[i][j][selected_windows[i][j]]
if estimates:
selected_estimates[i].append(estimates[-1])
else:
selected_estimates[i].append(0.0)
return selected_estimates
@contract
def init_request_windows(number_of_states, max_window_size):
""" Initialize a request window data structure.
:param number_of_states: The number of states.
:type number_of_states: int,>0
:param max_window_size: The max size of the request windows.
:type max_window_size: int,>0
:return: The initialized request windows data structure.
:rtype: list(deque)
"""
return [deque([], max_window_size)
for _ in range(number_of_states)]
@contract
def init_variances(window_sizes, number_of_states):
""" Initialize a variances data structure.
:param window_sizes: The required window sizes.
:type window_sizes: list(int)
:param number_of_states: The number of states.
:type number_of_states: int,>0
:return: The initialized variances data structure.
:rtype: list(list(dict))
"""
variances = []
for i in range(number_of_states):
variances.append([])
for j in range(number_of_states):
variances[i].append(dict(zip(window_sizes,
len(window_sizes) * [1.0])))
return variances
@contract
def init_deque_structure(window_sizes, number_of_states):
""" Initialize a 3 level deque data structure.
:param window_sizes: The required window sizes.
:type window_sizes: list(int)
:param number_of_states: The number of states.
:type number_of_states: int,>0
:return: The initialized 3 level deque data structure.
:rtype: list(list(dict))
"""
structure = []
for i in range(number_of_states):
structure.append([])
for j in range(number_of_states):
structure[i].append(dict((size, deque([], size))
for size in window_sizes))
return structure
@contract
def init_selected_window_sizes(window_sizes, number_of_states):
""" Initialize a selected window sizes data structure.
:param window_sizes: The required window sizes.
:type window_sizes: list(int)
:param number_of_states: The number of states.
:type number_of_states: int,>0
:return: The initialized selected window sizes data structure.
:rtype: list(list(int))
"""
structure = []
for i in range(number_of_states):
structure.append([])
for j in range(number_of_states):
structure[i].append(window_sizes[0])
return structure
|
|
"""Support for Template lights."""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
ENTITY_ID_FORMAT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import (
CONF_ENTITY_ID,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_ICON_TEMPLATE,
CONF_LIGHTS,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE
from .template_entity import TemplateEntity
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"]
CONF_ON_ACTION = "turn_on"
CONF_OFF_ACTION = "turn_off"
CONF_LEVEL_ACTION = "set_level"
CONF_LEVEL_TEMPLATE = "level_template"
CONF_TEMPERATURE_TEMPLATE = "temperature_template"
CONF_TEMPERATURE_ACTION = "set_temperature"
CONF_COLOR_TEMPLATE = "color_template"
CONF_COLOR_ACTION = "set_color"
CONF_WHITE_VALUE_TEMPLATE = "white_value_template"
CONF_WHITE_VALUE_ACTION = "set_white_value"
LIGHT_SCHEMA = vol.All(
cv.deprecated(CONF_ENTITY_ID),
vol.Schema(
{
vol.Required(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Required(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_LEVEL_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_LEVEL_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_TEMPERATURE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMPERATURE_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_COLOR_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_WHITE_VALUE_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_LIGHTS): cv.schema_with_slug_keys(LIGHT_SCHEMA)}
)
async def _async_create_entities(hass, config):
"""Create the Template Lights."""
lights = []
for device, device_config in config[CONF_LIGHTS].items():
friendly_name = device_config.get(CONF_FRIENDLY_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
unique_id = device_config.get(CONF_UNIQUE_ID)
on_action = device_config[CONF_ON_ACTION]
off_action = device_config[CONF_OFF_ACTION]
level_action = device_config.get(CONF_LEVEL_ACTION)
level_template = device_config.get(CONF_LEVEL_TEMPLATE)
temperature_action = device_config.get(CONF_TEMPERATURE_ACTION)
temperature_template = device_config.get(CONF_TEMPERATURE_TEMPLATE)
color_action = device_config.get(CONF_COLOR_ACTION)
color_template = device_config.get(CONF_COLOR_TEMPLATE)
white_value_action = device_config.get(CONF_WHITE_VALUE_ACTION)
white_value_template = device_config.get(CONF_WHITE_VALUE_TEMPLATE)
lights.append(
LightTemplate(
hass,
device,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
level_action,
level_template,
temperature_action,
temperature_template,
color_action,
color_template,
white_value_action,
white_value_template,
unique_id,
)
)
return lights
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template lights."""
async_add_entities(await _async_create_entities(hass, config))
class LightTemplate(TemplateEntity, LightEntity):
"""Representation of a templated Light, including dimmable."""
def __init__(
self,
hass,
device_id,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
level_action,
level_template,
temperature_action,
temperature_template,
color_action,
color_template,
white_value_action,
white_value_template,
unique_id,
):
"""Initialize the light."""
super().__init__(
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
domain = __name__.split(".")[-2]
self._on_script = Script(hass, on_action, friendly_name, domain)
self._off_script = Script(hass, off_action, friendly_name, domain)
self._level_script = None
if level_action is not None:
self._level_script = Script(hass, level_action, friendly_name, domain)
self._level_template = level_template
self._temperature_script = None
if temperature_action is not None:
self._temperature_script = Script(
hass, temperature_action, friendly_name, domain
)
self._temperature_template = temperature_template
self._color_script = None
if color_action is not None:
self._color_script = Script(hass, color_action, friendly_name, domain)
self._color_template = color_template
self._white_value_script = None
if white_value_action is not None:
self._white_value_script = Script(
hass, white_value_action, friendly_name, domain
)
self._white_value_template = white_value_template
self._state = False
self._brightness = None
self._temperature = None
self._color = None
self._white_value = None
self._unique_id = unique_id
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._temperature
@property
def white_value(self):
"""Return the white value."""
return self._white_value
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return self._color
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this light."""
return self._unique_id
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
if self._level_script is not None:
supported_features |= SUPPORT_BRIGHTNESS
if self._temperature_script is not None:
supported_features |= SUPPORT_COLOR_TEMP
if self._color_script is not None:
supported_features |= SUPPORT_COLOR
if self._white_value_script is not None:
supported_features |= SUPPORT_WHITE_VALUE
return supported_features
@property
def is_on(self):
"""Return true if device is on."""
return self._state
async def async_added_to_hass(self):
"""Register callbacks."""
if self._template:
self.add_template_attribute(
"_state", self._template, None, self._update_state
)
if self._level_template:
self.add_template_attribute(
"_brightness",
self._level_template,
None,
self._update_brightness,
none_on_template_error=True,
)
if self._temperature_template:
self.add_template_attribute(
"_temperature",
self._temperature_template,
None,
self._update_temperature,
none_on_template_error=True,
)
if self._color_template:
self.add_template_attribute(
"_color",
self._color_template,
None,
self._update_color,
none_on_template_error=True,
)
if self._white_value_template:
self.add_template_attribute(
"_white_value",
self._white_value_template,
None,
self._update_white_value,
none_on_template_error=True,
)
await super().async_added_to_hass()
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
optimistic_set = False
# set optimistic states
if self._template is None:
self._state = True
optimistic_set = True
if self._level_template is None and ATTR_BRIGHTNESS in kwargs:
_LOGGER.info(
"Optimistically setting brightness to %s", kwargs[ATTR_BRIGHTNESS]
)
self._brightness = kwargs[ATTR_BRIGHTNESS]
optimistic_set = True
if self._white_value_template is None and ATTR_WHITE_VALUE in kwargs:
_LOGGER.info(
"Optimistically setting white value to %s", kwargs[ATTR_WHITE_VALUE]
)
self._white_value = kwargs[ATTR_WHITE_VALUE]
optimistic_set = True
if self._temperature_template is None and ATTR_COLOR_TEMP in kwargs:
_LOGGER.info(
"Optimistically setting color temperature to %s",
kwargs[ATTR_COLOR_TEMP],
)
self._temperature = kwargs[ATTR_COLOR_TEMP]
optimistic_set = True
if ATTR_BRIGHTNESS in kwargs and self._level_script:
await self._level_script.async_run(
{"brightness": kwargs[ATTR_BRIGHTNESS]}, context=self._context
)
elif ATTR_COLOR_TEMP in kwargs and self._temperature_script:
await self._temperature_script.async_run(
{"color_temp": kwargs[ATTR_COLOR_TEMP]}, context=self._context
)
elif ATTR_WHITE_VALUE in kwargs and self._white_value_script:
await self._white_value_script.async_run(
{"white_value": kwargs[ATTR_WHITE_VALUE]}, context=self._context
)
elif ATTR_HS_COLOR in kwargs and self._color_script:
hs_value = kwargs[ATTR_HS_COLOR]
await self._color_script.async_run(
{"hs": hs_value, "h": int(hs_value[0]), "s": int(hs_value[1])},
context=self._context,
)
else:
await self._on_script.async_run(context=self._context)
if optimistic_set:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._off_script.async_run(context=self._context)
if self._template is None:
self._state = False
self.async_write_ha_state()
@callback
def _update_brightness(self, brightness):
"""Update the brightness from the template."""
try:
if brightness in ("None", ""):
self._brightness = None
return
if 0 <= int(brightness) <= 255:
self._brightness = int(brightness)
else:
_LOGGER.error(
"Received invalid brightness : %s. Expected: 0-255", brightness
)
self._brightness = None
except ValueError:
_LOGGER.error(
"Template must supply an integer brightness from 0-255, or 'None'",
exc_info=True,
)
self._brightness = None
@callback
def _update_white_value(self, white_value):
"""Update the white value from the template."""
try:
if white_value in ("None", ""):
self._white_value = None
return
if 0 <= int(white_value) <= 255:
self._white_value = int(white_value)
else:
_LOGGER.error(
"Received invalid white value: %s. Expected: 0-255", white_value
)
self._white_value = None
except ValueError:
_LOGGER.error(
"Template must supply an integer white_value from 0-255, or 'None'",
exc_info=True,
)
self._white_value = None
@callback
def _update_state(self, result):
"""Update the state from the template."""
if isinstance(result, TemplateError):
# This behavior is legacy
self._state = False
if not self._availability_template:
self._available = True
return
if isinstance(result, bool):
self._state = result
return
state = str(result).lower()
if state in _VALID_STATES:
self._state = state in ("true", STATE_ON)
return
_LOGGER.error(
"Received invalid light is_on state: %s. Expected: %s",
state,
", ".join(_VALID_STATES),
)
self._state = None
@callback
def _update_temperature(self, render):
"""Update the temperature from the template."""
try:
if render in ("None", ""):
self._temperature = None
return
temperature = int(render)
if self.min_mireds <= temperature <= self.max_mireds:
self._temperature = temperature
else:
_LOGGER.error(
"Received invalid color temperature : %s. Expected: %s-%s",
temperature,
self.min_mireds,
self.max_mireds,
)
self._temperature = None
except ValueError:
_LOGGER.error(
"Template must supply an integer temperature within the range for this light, or 'None'",
exc_info=True,
)
self._temperature = None
@callback
def _update_color(self, render):
"""Update the hs_color from the template."""
h_str = s_str = None
if isinstance(render, str):
if render in ("None", ""):
self._color = None
return
h_str, s_str = map(
float, render.replace("(", "").replace(")", "").split(",", 1)
)
elif isinstance(render, (list, tuple)) and len(render) == 2:
h_str, s_str = render
if (
h_str is not None
and s_str is not None
and 0 <= h_str <= 360
and 0 <= s_str <= 100
):
self._color = (h_str, s_str)
elif h_str is not None and s_str is not None:
_LOGGER.error(
"Received invalid hs_color : (%s, %s). Expected: (0-360, 0-100)",
h_str,
s_str,
)
self._color = None
else:
_LOGGER.error("Received invalid hs_color : (%s)", render)
self._color = None
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import re
import select
import subprocess
import time
import uuid
from apiclient.discovery import build
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
# This is the default location
# https://cloud.google.com/dataflow/pipelines/specifying-exec-params
DEFAULT_DATAFLOW_LOCATION = 'us-central1'
class _DataflowJob(LoggingMixin):
def __init__(self, dataflow, project_number, name, location, poll_sleep=10,
job_id=None):
self._dataflow = dataflow
self._project_number = project_number
self._job_name = name
self._job_location = location
self._job_id = job_id
self._job = self._get_job()
self._poll_sleep = poll_sleep
def _get_job_id_from_name(self):
jobs = self._dataflow.projects().locations().jobs().list(
projectId=self._project_number,
location=self._job_location
).execute(num_retries=5)
for job in jobs['jobs']:
if job['name'] == self._job_name:
self._job_id = job['id']
return job
return None
def _get_job(self):
if self._job_id:
job = self._dataflow.projects().locations().jobs().get(
projectId=self._project_number,
location=self._job_location,
jobId=self._job_id).execute(num_retries=5)
elif self._job_name:
job = self._get_job_id_from_name()
else:
raise Exception('Missing both dataflow job ID and name.')
if job and 'currentState' in job:
self.log.info(
'Google Cloud DataFlow job %s is %s',
job['name'], job['currentState']
)
elif job:
self.log.info(
'Google Cloud DataFlow with job_id %s has name %s',
self._job_id, job['name']
)
else:
self.log.info(
'Google Cloud DataFlow job not available yet..'
)
return job
def wait_for_done(self):
while True:
if self._job and 'currentState' in self._job:
if 'JOB_STATE_DONE' == self._job['currentState']:
return True
elif 'JOB_STATE_RUNNING' == self._job['currentState'] and \
'JOB_TYPE_STREAMING' == self._job['type']:
return True
elif 'JOB_STATE_FAILED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} has failed.".format(
self._job['name']))
elif 'JOB_STATE_CANCELLED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} was cancelled.".format(
self._job['name']))
elif 'JOB_STATE_RUNNING' == self._job['currentState']:
time.sleep(self._poll_sleep)
elif 'JOB_STATE_PENDING' == self._job['currentState']:
time.sleep(15)
else:
self.log.debug(str(self._job))
raise Exception(
"Google Cloud Dataflow job {} was unknown state: {}".format(
self._job['name'], self._job['currentState']))
else:
time.sleep(15)
self._job = self._get_job()
def get(self):
return self._job
class _Dataflow(LoggingMixin):
def __init__(self, cmd):
self.log.info("Running command: %s", ' '.join(cmd))
self._proc = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
def _line(self, fd):
if fd == self._proc.stderr.fileno():
line = b''.join(self._proc.stderr.readlines())
if line:
self.log.warning(line[:-1])
return line
if fd == self._proc.stdout.fileno():
line = b''.join(self._proc.stdout.readlines())
if line:
self.log.info(line[:-1])
return line
@staticmethod
def _extract_job(line):
# Job id info: https://goo.gl/SE29y9.
job_id_pattern = re.compile(
b'.*console.cloud.google.com/dataflow.*/jobs/([a-z|0-9|A-Z|\-|\_]+).*')
matched_job = job_id_pattern.search(line or '')
if matched_job:
return matched_job.group(1).decode()
def wait_for_done(self):
reads = [self._proc.stderr.fileno(), self._proc.stdout.fileno()]
self.log.info("Start waiting for DataFlow process to complete.")
job_id = None
# Make sure logs are processed regardless whether the subprocess is
# terminated.
process_ends = False
while True:
ret = select.select(reads, [], [], 5)
if ret is not None:
for fd in ret[0]:
line = self._line(fd)
if line:
job_id = job_id or self._extract_job(line)
else:
self.log.info("Waiting for DataFlow process to complete.")
if process_ends:
break
if self._proc.poll() is not None:
# Mark process completion but allows its outputs to be consumed.
process_ends = True
if self._proc.returncode is not 0:
raise Exception("DataFlow failed with return code {}".format(
self._proc.returncode))
return job_id
class DataFlowHook(GoogleCloudBaseHook):
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10):
self.poll_sleep = poll_sleep
super(DataFlowHook, self).__init__(gcp_conn_id, delegate_to)
def get_conn(self):
"""
Returns a Google Cloud Dataflow service object.
"""
http_authorized = self._authorize()
return build(
'dataflow', 'v1b3', http=http_authorized, cache_discovery=False)
def _start_dataflow(self, variables, name, command_prefix, label_formatter):
variables = self._set_variables(variables)
cmd = command_prefix + self._build_cmd(variables, label_formatter)
job_id = _Dataflow(cmd).wait_for_done()
_DataflowJob(self.get_conn(), variables['project'], name,
variables['region'],
self.poll_sleep, job_id).wait_for_done()
@staticmethod
def _set_variables(variables):
if variables['project'] is None:
raise Exception('Project not specified')
if 'region' not in variables.keys():
variables['region'] = DEFAULT_DATAFLOW_LOCATION
return variables
def start_java_dataflow(self, job_name, variables, dataflow, job_class=None,
append_job_name=True):
name = self._build_dataflow_job_name(job_name, append_job_name)
variables['jobName'] = name
def label_formatter(labels_dict):
return ['--labels={}'.format(
json.dumps(labels_dict).replace(' ', ''))]
command_prefix = (["java", "-cp", dataflow, job_class] if job_class
else ["java", "-jar", dataflow])
self._start_dataflow(variables, name, command_prefix, label_formatter)
def start_template_dataflow(self, job_name, variables, parameters, dataflow_template,
append_job_name=True):
variables = self._set_variables(variables)
name = self._build_dataflow_job_name(job_name, append_job_name)
self._start_template_dataflow(
name, variables, parameters, dataflow_template)
def start_python_dataflow(self, job_name, variables, dataflow, py_options,
append_job_name=True):
name = self._build_dataflow_job_name(job_name, append_job_name)
variables['job_name'] = name
def label_formatter(labels_dict):
return ['--labels={}={}'.format(key, value)
for key, value in labels_dict.items()]
self._start_dataflow(variables, name, ["python2"] + py_options + [dataflow],
label_formatter)
@staticmethod
def _build_dataflow_job_name(job_name, append_job_name=True):
base_job_name = str(job_name).replace('_', '-')
if not re.match(r"^[a-z]([-a-z0-9]*[a-z0-9])?$", base_job_name):
raise ValueError(
'Invalid job_name ({}); the name must consist of'
'only the characters [-a-z0-9], starting with a '
'letter and ending with a letter or number '.format(base_job_name))
if append_job_name:
safe_job_name = base_job_name + "-" + str(uuid.uuid4())[:8]
else:
safe_job_name = base_job_name
return safe_job_name
@staticmethod
def _build_cmd(variables, label_formatter):
command = ["--runner=DataflowRunner"]
if variables is not None:
for attr, value in variables.items():
if attr == 'labels':
command += label_formatter(value)
elif value is None or value.__len__() < 1:
command.append("--" + attr)
else:
command.append("--" + attr + "=" + value)
return command
def _start_template_dataflow(self, name, variables, parameters,
dataflow_template):
# Builds RuntimeEnvironment from variables dictionary
# https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
environment = {}
for key in ['maxWorkers', 'zone', 'serviceAccountEmail', 'tempLocation',
'bypassTempDirValidation', 'machineType']:
if key in variables:
environment.update({key: variables[key]})
body = {"jobName": name,
"parameters": parameters,
"environment": environment}
service = self.get_conn()
request = service.projects().locations().templates().launch(
projectId=variables['project'],
location=variables['region'],
gcsPath=dataflow_template,
body=body
)
response = request.execute()
variables = self._set_variables(variables)
_DataflowJob(self.get_conn(), variables['project'], name, variables['region'],
self.poll_sleep).wait_for_done()
return response
|
|
import asyncio
from binascii import hexlify
from itertools import chain
import typing
import logging
from lbry.dht import constants
from lbry.dht.error import RemoteException, TransportNotConnected
from lbry.dht.protocol.distance import Distance
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from lbry.dht.protocol.routing_table import TreeRoutingTable
from lbry.dht.protocol.protocol import KademliaProtocol
from lbry.dht.peer import PeerManager, KademliaPeer
log = logging.getLogger(__name__)
class FindResponse:
@property
def found(self) -> bool:
raise NotImplementedError()
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
raise NotImplementedError()
class FindNodeResponse(FindResponse):
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
self.key = key
self.close_triples = close_triples
@property
def found(self) -> bool:
return self.key in [triple[0] for triple in self.close_triples]
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
return self.close_triples
class FindValueResponse(FindResponse):
def __init__(self, key: bytes, result_dict: typing.Dict):
self.key = key
self.token = result_dict[b'token']
self.close_triples: typing.List[typing.Tuple[bytes, bytes, int]] = result_dict.get(b'contacts', [])
self.found_compact_addresses = result_dict.get(key, [])
@property
def found(self) -> bool:
return len(self.found_compact_addresses) > 0
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
"""
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
:param routing_table: a TreeRoutingTable
:param key: a 48 byte hash
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
"""
if len(key) != constants.hash_length:
raise ValueError("invalid key length: %i" % len(key))
return shortlist or routing_table.find_close_peers(key)
class IterativeFinder:
def __init__(self, loop: asyncio.BaseEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.k,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
if len(key) != constants.hash_length:
raise ValueError("invalid key length: %i" % len(key))
self.loop = loop
self.peer_manager = peer_manager
self.routing_table = routing_table
self.protocol = protocol
self.key = key
self.bottom_out_limit = bottom_out_limit
self.max_results = max_results
self.exclude = exclude or []
self.active: typing.Set['KademliaPeer'] = set()
self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key)
self.closest_peer: typing.Optional['KademliaPeer'] = None
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
self.iteration_queue = asyncio.Queue(loop=self.loop)
self.running_probes: typing.Set[asyncio.Task] = set()
self.iteration_count = 0
self.bottom_out_count = 0
self.running = False
self.tasks: typing.List[asyncio.Task] = []
self.delayed_calls: typing.List[asyncio.Handle] = []
for peer in get_shortlist(routing_table, key, shortlist):
if peer.node_id:
self._add_active(peer)
else:
# seed nodes
self._schedule_probe(peer)
async def send_probe(self, peer: 'KademliaPeer') -> FindResponse:
"""
Send the rpc request to the peer and return an object with the FindResponse interface
"""
raise NotImplementedError()
def search_exhausted(self):
"""
This method ends the iterator due no more peers to contact.
Override to provide last time results.
"""
self.iteration_queue.put_nowait(None)
def check_result_ready(self, response: FindResponse):
"""
Called after adding peers from an rpc result to the shortlist.
This method is responsible for putting a result for the generator into the Queue
"""
raise NotImplementedError()
def get_initial_result(self) -> typing.List['KademliaPeer']:
"""
Get an initial or cached result to be put into the Queue. Used for findValue requests where the blob
has peers in the local data store of blobs announced to us
"""
return []
def _is_closer(self, peer: 'KademliaPeer') -> bool:
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
def _add_active(self, peer):
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
self.active.add(peer)
if self._is_closer(peer):
self.prev_closest_peer = self.closest_peer
self.closest_peer = peer
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer)
for contact_triple in response.get_close_triples():
node_id, address, udp_port = contact_triple
self._add_active(self.peer_manager.get_kademlia_peer(node_id, address, udp_port))
self.check_result_ready(response)
async def _send_probe(self, peer: 'KademliaPeer'):
try:
response = await self.send_probe(peer)
except asyncio.TimeoutError:
self.active.discard(peer)
return
except ValueError as err:
log.warning(str(err))
self.active.discard(peer)
return
except TransportNotConnected:
return self.aclose()
except RemoteException:
return
return await self._handle_probe_result(peer, response)
async def _search_round(self):
"""
Send up to constants.alpha (5) probes to closest active peers
"""
added = 0
to_probe = list(self.active - self.contacted)
to_probe.sort(key=lambda peer: self.distance(self.key))
for peer in to_probe:
if added >= constants.alpha:
break
origin_address = (peer.address, peer.udp_port)
if origin_address in self.exclude:
continue
if peer.node_id == self.protocol.node_id:
continue
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
continue
self._schedule_probe(peer)
added += 1
log.debug("running %d probes", len(self.running_probes))
if not added and not self.running_probes:
log.debug("search for %s exhausted", hexlify(self.key)[:8])
self.search_exhausted()
def _schedule_probe(self, peer: 'KademliaPeer'):
self.contacted.add(peer)
t = self.loop.create_task(self._send_probe(peer))
def callback(_):
self.running_probes.difference_update({
probe for probe in self.running_probes if probe.done() or probe == t
})
if not self.running_probes:
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
t.add_done_callback(callback)
self.running_probes.add(t)
async def _search_task(self, delay: typing.Optional[float] = constants.iterative_lookup_delay):
try:
if self.running:
await self._search_round()
if self.running:
self.delayed_calls.append(self.loop.call_later(delay, self._search))
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
if self.running:
self.loop.call_soon(self.aclose)
def _search(self):
self.tasks.append(self.loop.create_task(self._search_task()))
def __aiter__(self):
if self.running:
raise Exception("already running")
self.running = True
self._search()
return self
async def __anext__(self) -> typing.List['KademliaPeer']:
try:
if self.iteration_count == 0:
result = self.get_initial_result() or await self.iteration_queue.get()
else:
result = await self.iteration_queue.get()
if not result:
raise StopAsyncIteration
self.iteration_count += 1
return result
except (asyncio.CancelledError, StopAsyncIteration):
self.loop.call_soon(self.aclose)
raise
def aclose(self):
self.running = False
self.iteration_queue.put_nowait(None)
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
task.cancel()
self.tasks.clear()
self.running_probes.clear()
self.delayed_calls.clear()
class IterativeNodeFinder(IterativeFinder):
def __init__(self, loop: asyncio.BaseEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.k,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
shortlist)
self.yielded_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
log.debug("probing %s:%d %s", peer.address, peer.udp_port, hexlify(peer.node_id)[:8] if peer.node_id else '')
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
return FindNodeResponse(self.key, response)
def search_exhausted(self):
self.put_result(self.active, finish=True)
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
not_yet_yielded = [
peer for peer in from_iter
if peer not in self.yielded_peers
and peer.node_id != self.protocol.node_id
and self.peer_manager.peer_is_good(peer) is not False
]
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
to_yield = not_yet_yielded[:min(constants.k, len(not_yet_yielded))]
if to_yield:
self.yielded_peers.update(to_yield)
self.iteration_queue.put_nowait(to_yield)
if finish:
self.iteration_queue.put_nowait(None)
def check_result_ready(self, response: FindNodeResponse):
found = response.found and self.key != self.protocol.node_id
if found:
log.debug("found")
return self.put_result(self.active, finish=True)
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
# self.bottom_out_count, self.iteration_count)
self.bottom_out_count = 0
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
log.info("limit hit")
self.put_result(self.active, True)
class IterativeValueFinder(IterativeFinder):
def __init__(self, loop: asyncio.BaseEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.k,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
shortlist)
self.blob_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
response = await self.protocol.get_rpc_peer(peer).find_value(self.key)
return FindValueResponse(self.key, response)
def check_result_ready(self, response: FindValueResponse):
if response.found:
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses]
to_yield = []
self.bottom_out_count = 0
for blob_peer in blob_peers:
if blob_peer not in self.blob_peers:
self.blob_peers.add(blob_peer)
to_yield.append(blob_peer)
if to_yield:
# log.info("found %i new peers for blob", len(to_yield))
self.iteration_queue.put_nowait(to_yield)
# if self.max_results and len(self.blob_peers) >= self.max_results:
# log.info("enough blob peers found")
# if not self.finished.is_set():
# self.finished.set()
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
if self.bottom_out_count >= self.bottom_out_limit:
log.info("blob peer search bottomed out")
self.iteration_queue.put_nowait(None)
def get_initial_result(self) -> typing.List['KademliaPeer']:
if self.protocol.data_store.has_peers_for_blob(self.key):
return self.protocol.data_store.get_peers_for_blob(self.key)
return []
|
|
import os
import sys
import traceback
import warnings
from os.path import join
import boto
import nibabel
import numpy as np
import pandas as pd
from boto.s3.key import Key
from nilearn.datasets.utils import _fetch_file
from sklearn.datasets.base import Bunch
TASK_LIST = ['EMOTION', 'WM', 'MOTOR', 'RELATIONAL',
'GAMBLING', 'SOCIAL', 'LANGUAGE']
EVS = {'EMOTION': {'EMOTION_Stats.csv',
'Sync.txt',
'fear.txt',
'neut.txt'},
'GAMBLING': {
'GAMBLING_Stats.csv',
'Sync.txt',
'loss.txt',
'loss_event.txt',
'neut_event.txt',
'win.txt',
'win_event.txt',
},
'LANGUAGE': {
'LANGUAGE_Stats.csv',
'Sync.txt',
'cue.txt',
'math.txt',
'present_math.txt',
'present_story.txt',
'question_math.txt',
'question_story.txt',
'response_math.txt',
'response_story.txt',
'story.txt',
},
'MOTOR': {
'Sync.txt',
'cue.txt',
'lf.txt',
'lh.txt',
'rf.txt',
'rh.txt',
't.txt',
},
'RELATIONAL': {
'RELATIONAL_Stats.csv',
'Sync.txt',
'error.txt',
'match.txt',
'relation.txt',
},
'SOCIAL': {
'SOCIAL_Stats.csv',
'Sync.txt',
'mental.txt',
'mental_resp.txt',
'other_resp.txt',
'rnd.txt',
},
'WM': {
'0bk_body.txt',
'0bk_cor.txt',
'0bk_err.txt',
'0bk_faces.txt',
'0bk_nlr.txt',
'0bk_places.txt',
'0bk_tools.txt',
'2bk_body.txt',
'2bk_cor.txt',
'2bk_err.txt',
'2bk_faces.txt',
'2bk_nlr.txt',
'2bk_places.txt',
'2bk_tools.txt',
'Sync.txt',
'WM_Stats.csv',
'all_bk_cor.txt',
'all_bk_err.txt'}
}
CONTRASTS = [["WM", 1, "2BK_BODY"],
["WM", 2, "2BK_FACE"],
["WM", 3, "2BK_PLACE"],
["WM", 4, "2BK_TOOL"],
["WM", 5, "0BK_BODY"],
["WM", 6, "0BK_FACE"],
["WM", 7, "0BK_PLACE"],
["WM", 8, "0BK_TOOL"],
["WM", 9, "2BK"],
["WM", 10, "0BK"],
["WM", 11, "2BK-0BK"],
["WM", 12, "neg_2BK"],
["WM", 13, "neg_0BK"],
["WM", 14, "0BK-2BK"],
["WM", 15, "BODY"],
["WM", 16, "FACE"],
["WM", 17, "PLACE"],
["WM", 18, "TOOL"],
["WM", 19, "BODY-AVG"],
["WM", 20, "FACE-AVG"],
["WM", 21, "PLACE-AVG"],
["WM", 22, "TOOL-AVG"],
["WM", 23, "neg_BODY"],
["WM", 24, "neg_FACE"],
["WM", 25, "neg_PLACE"],
["WM", 26, "neg_TOOL"],
["WM", 27, "AVG-BODY"],
["WM", 28, "AVG-FACE"],
["WM", 29, "AVG-PLACE"],
["WM", 30, "AVG-TOOL"],
["GAMBLING", 1, "PUNISH"],
["GAMBLING", 2, "REWARD"],
["GAMBLING", 3, "PUNISH-REWARD"],
["GAMBLING", 4, "neg_PUNISH"],
["GAMBLING", 5, "neg_REWARD"],
["GAMBLING", 6, "REWARD-PUNISH"],
["MOTOR", 1, "CUE"],
["MOTOR", 2, "LF"],
["MOTOR", 3, "LH"],
["MOTOR", 4, "RF"],
["MOTOR", 5, "RH"],
["MOTOR", 6, "T"],
["MOTOR", 7, "AVG"],
["MOTOR", 8, "CUE-AVG"],
["MOTOR", 9, "LF-AVG"],
["MOTOR", 10, "LH-AVG"],
["MOTOR", 11, "RF-AVG"],
["MOTOR", 12, "RH-AVG"],
["MOTOR", 13, "T-AVG"],
["MOTOR", 14, "neg_CUE"],
["MOTOR", 15, "neg_LF"],
["MOTOR", 16, "neg_LH"],
["MOTOR", 17, "neg_RF"],
["MOTOR", 18, "neg_RH"],
["MOTOR", 19, "neg_T"],
["MOTOR", 20, "neg_AVG"],
["MOTOR", 21, "AVG-CUE"],
["MOTOR", 22, "AVG-LF"],
["MOTOR", 23, "AVG-LH"],
["MOTOR", 24, "AVG-RF"],
["MOTOR", 25, "AVG-RH"],
["MOTOR", 26, "AVG-T"],
["LANGUAGE", 1, "MATH"],
["LANGUAGE", 2, "STORY"],
["LANGUAGE", 3, "MATH-STORY"],
["LANGUAGE", 4, "STORY-MATH"],
["LANGUAGE", 5, "neg_MATH"],
["LANGUAGE", 6, "neg_STORY"],
["SOCIAL", 1, "RANDOM"],
["SOCIAL", 2, "TOM"],
["SOCIAL", 3, "RANDOM-TOM"],
["SOCIAL", 4, "neg_RANDOM"],
["SOCIAL", 5, "neg_TOM"],
["SOCIAL", 6, "TOM-RANDOM"],
["RELATIONAL", 1, "MATCH"],
["RELATIONAL", 2, "REL"],
["RELATIONAL", 3, "MATCH-REL"],
["RELATIONAL", 4, "REL-MATCH"],
["RELATIONAL", 5, "neg_MATCH"],
["RELATIONAL", 6, "neg_REL"],
["EMOTION", 1, "FACES"],
["EMOTION", 2, "SHAPES"],
["EMOTION", 3, "FACES-SHAPES"],
["EMOTION", 4, "neg_FACES"],
["EMOTION", 5, "neg_SHAPES"],
["EMOTION", 6, "SHAPES-FACES"]]
def _init_s3_connection(aws_key, aws_secret,
bucket_name,
host='s3.amazonaws.com'):
com = boto.connect_s3(aws_key, aws_secret, host=host)
bucket = com.get_bucket(bucket_name, validate=False)
return bucket
def _convert_to_s3_target(filename, data_dir=None):
data_dir = get_data_dirs(data_dir)[0]
if data_dir in filename:
filename = filename.replace(data_dir, '/HCP_900')
return filename
def fetch_hcp_timeseries(data_dir=None,
subjects=None,
n_subjects=None,
data_type='rest',
sessions=None,
on_disk=True,
tasks=None):
"""Utility to download from s3"""
data_dir = get_data_dirs(data_dir)[0]
if data_type not in ['task', 'rest']:
raise ValueError("Wrong data type. Expected 'rest' or 'task', got"
"%s" % data_type)
if subjects is None:
subjects = fetch_subject_list(data_dir=data_dir,
n_subjects=n_subjects)
elif not hasattr(subjects, '__iter__'):
subjects = [subjects]
if not set(fetch_subject_list(data_dir=
data_dir)).issuperset(set(subjects)):
raise ValueError('Wrong subjects.')
res = []
for subject in subjects:
subject_dir = join(data_dir, str(subject), 'MNINonLinear', 'Results')
if data_type is 'task':
if tasks is None:
sessions = TASK_LIST
elif isinstance(tasks, str):
sessions = [tasks]
if not set(TASK_LIST).issuperset(set(sessions)):
raise ValueError('Wrong tasks.')
else:
if sessions is None:
sessions = [1, 2]
elif isinstance(sessions, int):
sessions = [sessions]
if not set([1, 2]).issuperset(set(sessions)):
raise ValueError('Wrong rest sessions.')
for session in sessions:
for direction in ['LR', 'RL']:
if data_type == 'task':
task = session
root_filename = 'tfMRI_%s_%s' % (task, direction)
else:
root_filename = 'rfMRI_REST%i_%s' % (session,
direction)
root_dir = join(subject_dir, root_filename)
filename = join(root_dir, root_filename + '.nii.gz')
mask = join(root_dir, root_filename + '_SBRef.nii.gz')
confounds = ['Movement_AbsoluteRMS_mean.txt',
'Movement_AbsoluteRMS.txt',
'Movement_Regressors_dt.txt',
'Movement_Regressors.txt',
'Movement_RelativeRMS_mean.txt',
'Movement_RelativeRMS.txt']
res_dict = {'filename': filename, 'mask': mask}
print(filename)
# for i, confound in enumerate(confounds):
# res_dict['confound_%i' % i] = join(root_dir, confound)
if data_type is 'task':
feat_file = join(root_dir,
"tfMRI_%s_%s_hp200_s4_level1.fsf"
% (task, direction))
res_dict['feat_file'] = feat_file
for i, ev in enumerate(EVS[task]):
res_dict['ev_%i' % i] = join(root_dir, 'EVs', ev)
requested_on_disk = os.path.exists(filename)
res_dict['subject'] = subject
res_dict['direction'] = direction
if data_type == 'rest':
res_dict['session'] = session
else:
res_dict['task'] = task
if not on_disk or requested_on_disk:
res.append(res_dict)
res = pd.DataFrame(res)
if not res.empty:
if data_type == 'rest':
res.set_index(['subject', 'session', 'direction'],
inplace=True)
else:
res.set_index(['subject', 'task', 'direction'],
inplace=True)
return res
def fetch_hcp_contrasts(data_dir=None,
output='nistats',
n_subjects=None,
subjects=None,
on_disk=True,
level=2):
"""Nilearn like fetcher"""
data_dir = get_data_dirs(data_dir)[0]
if subjects is None:
subjects = fetch_subject_list(data_dir=data_dir,
n_subjects=n_subjects)
elif not hasattr(subjects, '__iter__'):
subjects = [subjects]
if not set(fetch_subject_list(data_dir=
data_dir)).issuperset(set(subjects)):
raise ValueError('Wrong subjects.')
res = []
if output == 'fsl':
for subject in subjects:
subject_dir = join(data_dir, str(subject), 'MNINonLinear',
'Results')
for i, contrast in enumerate(CONTRASTS):
task_name = contrast[0]
contrast_idx = contrast[1]
contrast_name = contrast[2]
if level == 2:
z_map = join(subject_dir, "tfMRI_%s/tfMRI_%s_hp200_s4_"
"level2vol.feat/cope%i.feat/"
"stats/zstat1.nii.gz"
% (task_name, task_name, contrast_idx))
if os.path.exists(z_map) or not on_disk:
res.append({'z_map': z_map,
'subject': subject,
'task': task_name,
'contrast': contrast_name,
'direction': 'level2'
})
else:
break
else:
raise ValueError("Can only output level 2 images"
"with output='fsl'")
else:
source_dir = join(data_dir, 'glm')
if level == 2:
directions = ['level2']
elif level == 1:
directions = ['LR', 'RL']
else:
raise ValueError('Level should be 1 or 2, got %s' % level)
for subject in subjects:
subject_dir = join(source_dir, str(subject))
for contrast in CONTRASTS:
task_name = contrast[0]
contrast_name = contrast[2]
for direction in directions:
z_dir = join(subject_dir, task_name, direction,
'z_maps')
effect_dir = join(subject_dir, task_name, direction,
'effects_maps')
z_map = join(z_dir, 'z_' + contrast_name +
'.nii.gz')
effect_map = join(effect_dir, 'effects_' + contrast_name +
'.nii.gz')
if ((os.path.exists(z_map) and os.path.exists(effect_map))
or not on_disk):
res.append({'z_map': z_map,
'effect_map': effect_map,
'subject': subject,
'task': task_name,
'contrast': contrast_name,
'direction': direction
})
res = pd.DataFrame(res)
if not res.empty:
res.set_index(['subject', 'task', 'contrast', 'direction'],
inplace=True)
res.sort_index(ascending=True, inplace=True)
return res
def fetch_behavioral_data(data_dir=None,
restricted=False,
overwrite=False):
_, _, username, password = get_credentials(data_dir=data_dir)
data_dir = get_data_dirs(data_dir)[0]
behavioral_dir = join(data_dir, 'behavioral')
if not os.path.exists(behavioral_dir):
os.makedirs(behavioral_dir)
csv_unrestricted = join(behavioral_dir, 'hcp_unrestricted_data.csv')
if not os.path.exists(csv_unrestricted) or overwrite:
result = _fetch_file(data_dir=data_dir,
url='https://db.humanconnectome.org/REST/'
'search/dict/Subject%20Information/results?'
'format=csv&removeDelimitersFromFieldValues'
'=true'
'&restricted=0&project=HCP_900',
username=username, password=password)
os.rename(result, csv_unrestricted)
csv_restricted = join(behavioral_dir, 'hcp_restricted_data.csv')
df_unrestricted = pd.read_csv(csv_unrestricted)
df_unrestricted.set_index('Subject', inplace=True)
if restricted and not os.path.exists(csv_restricted):
warnings.warn("Cannot automatically retrieve restricted data. "
"Please create the file '%s' manually" %
csv_restricted)
restricted = False
if not restricted:
df = df_unrestricted
else:
df_restricted = pd.read_csv(csv_restricted)
df_restricted.set_index('Subject', inplace=True)
df = df_unrestricted.join(df_restricted, how='outer')
df.sort_index(ascending=True, inplace=True)
df.index.names = ['subject']
return df
def fetch_subject_list(data_dir=None, n_subjects=None, only_terminated=True):
df = fetch_behavioral_data(data_dir=data_dir)
if only_terminated:
indices = np.logical_and(df['3T_RS-fMRI_PctCompl'] == 100,
df['3T_tMRI_PctCompl'] == 100)
df = df.loc[indices]
return df.iloc[:n_subjects].index. \
get_level_values('subject').unique().tolist()
def download_experiment(subject,
data_dir=None,
data_type='rest',
tasks=None,
sessions=None,
overwrite=False,
mock=False,
verbose=0):
aws_key, aws_secret, _, _ = get_credentials(data_dir)
bucket = _init_s3_connection(aws_key, aws_secret, 'hcp-openaccess')
targets = fetch_hcp_timeseries(data_dir=data_dir,
subjects=subject,
data_type=data_type,
tasks=tasks,
on_disk=False,
sessions=sessions).values.ravel().tolist()
keys = list(map(_convert_to_s3_target, targets))
try:
download_from_s3(bucket, keys[0], targets[0], mock=True,
verbose=0)
except FileNotFoundError:
return
if verbose > 0:
if data_type == 'task':
print('Downloading files for subject %s,'
' tasks %s' % (subject, tasks))
else:
print('Downloading files for subject %s,'
' session %s' % (subject, sessions))
for key, target in zip(keys, targets):
dirname = os.path.dirname(target)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
download_from_s3(bucket, key, target, mock=mock,
overwrite=overwrite, verbose=verbose - 1)
except FileNotFoundError:
pass
except ConnectionError:
os.unlink(target)
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = '\n'.join(traceback.format_exception(
exc_type, exc_value, exc_traceback))
target += '-error'
with open(target, 'w+') as f:
f.write(msg)
def download_from_s3(bucket, key, target, mock=False,
overwrite=False, verbose=0):
"""Download file from bucket
"""
my_key = Key(bucket)
my_key.key = key
if my_key.exists():
s3fid = bucket.get_key(key)
if not mock:
if not os.path.exists(target) or overwrite:
if verbose:
print('Downloading %s from %s' % (target, key))
s3fid.get_contents_to_filename(target)
name, ext = os.path.splitext(target)
if ext == '.gz':
try:
_ = nibabel.load(target).get_shape()
if verbose:
print('Nifti consistency checked.')
except:
raise ConnectionError('Corrupted download')
else:
if verbose:
print('Skipping %s as it already exists' % target)
else:
if verbose:
print('Mock download %s from %s' % (target, key))
else:
raise FileNotFoundError('File does not exist on S3')
def get_data_dirs(data_dir=None):
""" Returns the directories in which modl looks for data.
This is typically useful for the end-user to check where the data is
downloaded and stored.
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
Returns
-------
paths: list of strings
Paths of the dataset directories.
Notes
-----
This function retrieves the datasets directories using the following
priority :
1. the keyword argument data_dir
2. the global environment variable MODL_SHARED_DATA
3. the user environment variable MODL_DATA
4. modl_data in the user home folder
"""
paths = []
# Check data_dir which force storage in a specific location
if data_dir is not None:
paths.extend(data_dir.split(os.pathsep))
# If data_dir has not been specified, then we crawl default locations
if data_dir is None:
global_data = os.getenv('HCP_SHARED_DATA')
if global_data is not None:
paths.extend(global_data.split(os.pathsep))
local_data = os.getenv('HCP_DATA')
if local_data is not None:
paths.extend(local_data.split(os.pathsep))
paths.append(os.path.expanduser('~/HCP900').split(os.pathsep))
return paths
def get_credentials(filename=None, data_dir=None):
"""Retrieve credentials for COnnectomeDB and S3 bucket access.
First try to look whether
Parameters
----------
filename: str,
Filename of
"""
try:
if filename is None:
filename = 'credentials.txt'
if not os.path.exists(filename):
data_dir = get_data_dirs(data_dir)[0]
filename = join(data_dir, filename)
if not os.path.exists(filename):
if ('HCP_AWS_KEY' in os.environ
and 'HCP_AWS_SECRET_KEY' in os.environ
and 'CDB_USERNAME' in os.environ
and 'CDB_PASSWORD' in os.environ):
aws_key = os.environ['HCP_AWS_KEY']
aws_secret = os.environ['HCP_AWS_SECRET_KEY']
cdb_username = os.environ['CDB_USERNAME']
cdb_password = os.environ['CDB_PASSWORD']
return aws_key, aws_secret, cdb_username, cdb_password
else:
raise KeyError('Could not find environment variables.')
file = open(filename, 'r')
return file.readline()[:-1].split(',')
except (KeyError, FileNotFoundError):
raise ValueError("Cannot find credentials. Provide them"
"in a file credentials.txt where the script is "
"executed, or in the HCP directory, or in"
"environment variables.")
def fetch_hcp_mask(data_dir=None, url=None, resume=True):
data_dir = get_data_dirs(data_dir)[0]
if not os.path.exists(data_dir):
os.makedirs(data_dir)
data_dir = join(data_dir, 'parietal')
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if url is None:
url = 'http://amensch.fr/data/cogspaces/mask/mask_img.nii.gz'
_fetch_file(url, data_dir, resume=resume)
return join(data_dir, 'mask_img.nii.gz')
def fetch_hcp(data_dir=None, n_subjects=None, subjects=None,
from_file=False,
on_disk=True):
root = get_data_dirs(data_dir)[0]
mask = fetch_hcp_mask(data_dir)
if not from_file:
rest = fetch_hcp_timeseries(data_dir, data_type='rest',
n_subjects=n_subjects, subjects=subjects,
on_disk=on_disk)
task = fetch_hcp_timeseries(data_dir, data_type='task',
n_subjects=n_subjects, subjects=subjects,
on_disk=on_disk)
contrasts = fetch_hcp_contrasts(data_dir,
output='nistats',
n_subjects=n_subjects,
subjects=subjects,
on_disk=on_disk)
behavioral = fetch_behavioral_data(data_dir)
indices = []
for df in rest, task, contrasts:
if not df.empty:
indices.append(df.index.get_level_values('subject').
unique().values)
if indices:
index = indices[0]
for this_index in indices[1:]:
index = np.union1d(index, this_index)
behavioral = behavioral.loc[index]
else:
behavioral = pd.DataFrame([])
else:
rest = pd.read_csv(join(root, 'parietal', 'rest.csv'))
task = pd.read_csv(join(root, 'parietal', 'task.csv'))
contrasts = pd.read_csv(join(root, 'parietal',
'contrasts.csv'))
behavioral = pd.read_csv(join(root, 'parietal',
'behavioral.csv'))
behavioral.set_index('subject', inplace=True)
rest.set_index(['subject', 'session', 'direction'], inplace=True)
task.set_index(['subject', 'task', 'direction'], inplace=True)
contrasts.set_index(['subject', 'task', 'contrast', 'direction'],
inplace=True)
if subjects is None:
subjects = fetch_subject_list(data_dir=data_dir,
n_subjects=n_subjects)
rest = rest.loc[subjects]
task = task.loc[subjects]
contrasts = contrasts.loc[subjects]
behavioral = behavioral.loc[subjects]
return Bunch(rest=rest,
contrasts=contrasts,
task=task,
behavioral=behavioral,
mask=mask,
root=root)
def dump_hcp_csv(data_dir=None):
dataset = fetch_hcp(data_dir, on_disk=True)
data_dir = get_data_dirs(data_dir)[0]
dataset.rest.to_csv(join(data_dir, 'parietal',
'rest.csv'))
dataset.task.to_csv(join(data_dir, 'parietal',
'task.csv'))
dataset.contrasts.to_csv(join(data_dir, 'parietal',
'contrasts.csv'))
dataset.behavioral.to_csv(join(data_dir, 'parietal',
'behavioral.csv'))
|
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import sys
import errno
import subprocess
import time
import argparse
import re
import json
from pprint import pprint
sys.path.insert(0, os.path.realpath('/usr/lib/python2.7/site-packages'))
sys.path.insert(
0,
os.path.realpath('/opt/contrail/api-venv/lib/python2.7/site-packages/vnc_cfg_api_server/'))
from vnc_api.vnc_api import *
from vnc_api.common import exceptions as vnc_exceptions
import vnc_cfg_api_server
class ContrailConfigCmd(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
# connect to vnc server
self._vnc_lib = VncApi('u', 'p',
api_server_host=self._args.listen_ip_addr,
api_server_port=self._args.listen_port)
self.re_parser = re.compile('[ \t\n]+')
self.final_list = []
#end __init__
def _parse_args(self, args_str):
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
global_defaults = {}
args.conf_file = '/etc/contrail/api_server.conf'
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
global_defaults.update(dict(config.items("DEFAULTS")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**global_defaults)
subparsers = parser.add_subparsers()
restore_parser = subparsers.add_parser('restore')
restore_parser.add_argument("filename",
help="file name to save config")
restore_parser.set_defaults(func=self.restore_contrail_config)
backup_parser = subparsers.add_parser('backup')
backup_parser.add_argument("name",
help="name to backup config database")
backup_parser.set_defaults(func=self.backup_contrail_config)
self._args = parser.parse_args(remaining_argv)
#end _parse_args
def _delete_default_security_groups(self, objs, type):
for obj in objs.values():
if obj['type'] != type:
continue
fq_name = obj['data']['fq_name']
fq_name.append('default')
while 1:
try:
self._vnc_lib.security_group_delete(fq_name=fq_name)
break
except NoIdError:
pass
#end _delete_default_security_groups
def _create_objects(self, objs, type):
for obj in objs.values():
if obj['type'] != type:
continue
if obj['created']:
continue
#create object with these keys
create_obj = {}
for key, val in obj['data'].items():
if key not in ['uuid', 'fq_name', 'id_perms',
'parent_type', 'virtual_network_refs',
'network_ipam_refs', 'network_ipam_mgmt']:
continue
create_obj[key] = val
resource = obj['type'].replace('_', '-')
json_body = json.dumps({resource: create_obj})
try:
self._vnc_lib.restore_config(True, resource, json_body)
obj['created'] = True
except RefsExistError:
obj['created'] = True
except Exception as e:
if hasattr(e, 'status_code'):
print("Error(%s): creating %s %s "
% (e.status_code, obj['type'],
obj['data']['fq_name']))
else:
print("Error: creating %s %s %s\n"
% (obj['type'], obj['data']['fq_name'], e))
#end _create_objects
def _update_objects(self, objs, type):
for obj in objs.values():
if obj['type'] != type:
continue
resource = obj['type'].replace('_', '-')
json_body = json.dumps({resource: obj['data']})
try:
self._vnc_lib.restore_config(False, resource, json_body)
except RefsExistError:
pass
except Exception as e:
if hasattr(e, 'status_code'):
print("Error(%s): updating %s %s "
% (e.status_code, obj['type'],
obj['data']['fq_name']))
else:
print("Error: updating %s %s %s\n"
% (obj['type'], obj['data']['fq_name'], e))
#end _update_objects
#breadth first search
def _bfs(self, tree, root):
bfs_path = []
index = -1
bfs_path.append(root)
while index != len(bfs_path):
index += 1
try:
values = tree[bfs_path[index]]
values.sort()
for value in values:
if value not in bfs_path:
bfs_path.append(value)
except Exception as e:
pass
return bfs_path
#end _bfs
#restore config from a file - overwrite old config with the same uuid
def restore_contrail_config(self):
print "Restoring config from %s" % (self._args.filename)
objs = {}
#scan through the file
f = open(self._args.filename, 'r')
while 1:
line = f.readline()
if not line:
break
line = line.strip().replace('\n', '')
if not line.startswith('type'):
continue
#store objects
type = line.split(':')[1]
line = f.readline()
obj = json.loads(line)
objs[str(obj['fq_name'])] = {'created': False,
'type': type.replace('_', '-'),
'data': obj}
f.close()
#create hierarchy
hierarchy = {}
for obj in objs.values():
if not 'parent_type' in obj['data'].keys():
if obj['type'] not in hierarchy:
hierarchy[obj['type']] = []
elif not obj['data']['parent_type'] in hierarchy:
hierarchy[obj['data']['parent_type']] = []
hierarchy[obj['data']['parent_type']].append(obj['type'])
else:
if obj['type'] not in hierarchy[obj['data']['parent_type']]:
hierarchy[obj['data']['parent_type']].append(obj['type'])
#find top level
top_list = []
for key, val in hierarchy.items():
top_level = True
for values in hierarchy.values():
if key in values:
top_level = False
break
if top_level:
top_list.append(key)
#organize hierarchy
for top_level in top_list:
bfs_list = self._bfs(hierarchy, top_level)
self.final_list.extend(bfs_list)
#post(create) object to api server
print self.final_list
print ("Phase create")
print ("------------")
for type in self.final_list:
print("%-64s -- start" % type)
self._create_objects(objs, type)
if type == 'project':
self._delete_default_security_groups(objs, type)
print("%-64s -- done" % '')
#put(update) object in api server
print ("Phase update")
print ("------------")
for type in self.final_list:
print("%-64s -- start" % type)
self._update_objects(objs, type)
print("%-64s -- done" % '')
print "Config restore complete %s" % (self._args.filename)
#restore_contrail_config
#backup config
def backup_contrail_config(self):
print "Snapshot config database to %s" % (self._args.name)
f = open(self._args.name, 'w')
records = self._vnc_lib.fetch_records()
#store the records
for record in records:
f.write("\ntype:%s\n" % (record['type']))
f.write(json.dumps(record) + '\n')
f.close()
print "Snapshot config database to %s done" % (self._args.name)
#backup_contrail_config
#end class ContrailConfigCmd
def main(args_str=None):
cfg = ContrailConfigCmd(args_str)
cfg._args.func()
#end main
if __name__ == "__main__":
main()
|
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# $Id:setup.py 5158 2007-04-09 00:14:35Z zim $
#
#------------------------------------------------------------------------------
import os, time, shutil, xmlrpclib, socket, pprint
from signal import *
from hodlib.Common.logger import hodLog, hodDummyLogger
from hodlib.Common.socketServers import hodXMLRPCServer
from hodlib.Common.util import local_fqdn
from hodlib.Common.xmlrpc import hodXRClient
class hodBaseService:
"""hodBaseService class - This class provides service registration, logging,
and configuration access methods. It also provides an XML-RPC server.
This class should be extended to create hod services. Methods beginning
with _xr_method will automatically be added to instances of this class.
"""
def __init__(self, name, config, xrtype='threaded'):
""" Initialization requires a name string and a config object of type
hodlib.Common.setup.options or hodlib.Common.setup.config."""
self.name = name
self.hostname = local_fqdn()
self._cfg = config
self._xrc = None
self.logs = {}
self._baseLogger = None
self._serviceID = os.getenv('PBS_JOBID')
self.__logDir = None
self.__svcrgy = None
self.__stop = False
self.__xrtype = xrtype
self._init_logging()
if name != 'serviceRegistry': self._init_signals()
self._init_xrc_server()
def __set_logging_level(self, level):
self.logs['main'].info("Setting log level to %s." % level)
for loggerName in self.loggers.keys():
self.logs['main'].set_logger_level(loggerName, level)
def __get_logging_level(self):
if self._cfg.has_key('stream'):
return self.loggers['main'].get_level('stream', 'main')
elif self._cfg.has_key('log-dir'):
return self.loggers['main'].get_level('file', 'main')
else:
return 0
def _xr_method_stop(self, *args):
"""XML-RPC method, calls stop() on ourselves."""
return self.stop()
def _xr_method_status(self, *args):
"""XML-RPC method, calls status() on ourselves."""
return self.status()
def _init_logging(self):
if self._cfg.has_key('debug'):
if self._cfg['debug'] > 0:
self._baseLogger = hodLog(self.name)
self.logs['main'] = self._baseLogger.add_logger('main')
if self._cfg.has_key('stream'):
if self._cfg['stream']:
self._baseLogger.add_stream(level=self._cfg['debug'],
addToLoggerNames=('main',))
if self._cfg.has_key('log-dir'):
if self._serviceID:
self.__logDir = os.path.join(self._cfg['log-dir'], "%s.%s" % (
self._cfg['userid'], self._serviceID))
else:
self.__logDir = os.path.join(self._cfg['log-dir'],
self._cfg['userid'])
if not os.path.exists(self.__logDir):
os.mkdir(self.__logDir)
self._baseLogger.add_file(logDirectory=self.__logDir,
level=self._cfg['debug'], addToLoggerNames=('main',))
if self._cfg.has_key('syslog-address'):
self._baseLogger.add_syslog(self._cfg['syslog-address'],
level=self._cfg['debug'], addToLoggerNames=('main',))
if not self.logs.has_key('main'):
self.logs['main'] = hodDummyLogger()
else:
self.logs['main'] = hodDummyLogger()
else:
self.logs['main'] = hodDummyLogger()
def _init_signals(self):
def sigStop(sigNum, handler):
self.sig_wrapper(sigNum, self.stop)
def toggleLevel():
currentLevel = self.__get_logging_level()
if currentLevel == 4:
self.__set_logging_level(1)
else:
self.__set_logging_level(currentLevel + 1)
def sigStop(sigNum, handler):
self._sig_wrapper(sigNum, self.stop)
def sigDebug(sigNum, handler):
self.sig_wrapper(sigNum, toggleLevel)
signal(SIGTERM, sigStop)
signal(SIGQUIT, sigStop)
signal(SIGINT, sigStop)
signal(SIGUSR2, sigDebug)
def _sig_wrapper(self, sigNum, handler, *args):
self.logs['main'].info("Caught signal %s." % sigNum)
if args:
handler(args)
else:
handler()
def _init_xrc_server(self):
host = None
ports = None
if self._cfg.has_key('xrs-address'):
(host, port) = (self._cfg['xrs-address'][0], self._cfg['xrs-address'][1])
ports = (port,)
elif self._cfg.has_key('xrs-port-range'):
host = ''
ports = self._cfg['xrs-port-range']
if host != None:
if self.__xrtype == 'threaded':
self._xrc = hodXMLRPCServer(host, ports)
elif self.__xrtype == 'twisted':
try:
from socketServers import twistedXMLRPCServer
self._xrc = twistedXMLRPCServer(host, ports, self.logs['main'])
except ImportError:
self.logs['main'].error("Twisted XML-RPC server not available, "
+ "falling back on threaded server.")
self._xrc = hodXMLRPCServer(host, ports)
for attr in dir(self):
if attr.startswith('_xr_method_'):
self._xrc.register_function(getattr(self, attr),
attr[11:])
self._xrc.register_introspection_functions()
def _register_service(self, port=None, installSignalHandlers=1):
if self.__svcrgy:
self.logs['main'].info(
"Registering service with service registery %s... " % self.__svcrgy)
svcrgy = hodXRClient(self.__svcrgy, None, None, 0, 0, installSignalHandlers)
if self._xrc and self._http:
svcrgy.registerService(self._cfg['userid'], self._serviceID,
self.hostname, self.name, 'hod', {
'xrs' : "http://%s:%s" % (
self._xrc.server_address[0],
self._xrc.server_address[1]),'http' :
"http://%s:%s" % (self._http.server_address[0],
self._http.server_address[1])})
elif self._xrc:
svcrgy.registerService(self._cfg['userid'], self._serviceID,
self.hostname, self.name, 'hod', {
'xrs' : "http://%s:%s" % (
self._xrc.server_address[0],
self._xrc.server_address[1]),})
elif self._http:
svcrgy.registerService(self._cfg['userid'], self._serviceID,
self.hostname, self.name, 'hod', {'http' :
"http://%s:%s" % (self._http.server_address[0],
self._http.server_address[1]),})
else:
svcrgy.registerService(self._cfg['userid'], self._serviceID,
self.hostname, name, 'hod', {} )
def start(self):
""" Start XML-RPC server and register service."""
self.logs['main'].info("Starting HOD service: %s ..." % self.name)
if self._xrc: self._xrc.serve_forever()
if self._cfg.has_key('register') and self._cfg['register']:
self._register_service()
def stop(self):
""" Stop XML-RPC server, unregister service and set stop flag. """
self.logs['main'].info("Stopping service...")
if self._xrc: self._xrc.stop()
self.__stop = True
return True
def status(self):
"""Returns true, should be overriden."""
return True
def wait(self):
"""Wait until stop method is called."""
while not self.__stop:
time.sleep(.1)
|
|
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance REST API Client Programmatic Interface
"""
import httplib
import json
import StringIO
import time
import urllib2
from oslo_log import log
from cinder.i18n import _LE, _LI
LOG = log.getLogger(__name__)
class Status(object):
"""Result HTTP Status"""
def __init__(self):
pass
#: Request return OK
OK = httplib.OK
#: New resource created successfully
CREATED = httplib.CREATED
#: Command accepted
ACCEPTED = httplib.ACCEPTED
#: Command returned OK but no data will be returned
NO_CONTENT = httplib.NO_CONTENT
#: Bad Request
BAD_REQUEST = httplib.BAD_REQUEST
#: User is not authorized
UNAUTHORIZED = httplib.UNAUTHORIZED
#: The request is not allowed
FORBIDDEN = httplib.FORBIDDEN
#: The requested resource was not found
NOT_FOUND = httplib.NOT_FOUND
#: The request is not allowed
NOT_ALLOWED = httplib.METHOD_NOT_ALLOWED
#: Request timed out
TIMEOUT = httplib.REQUEST_TIMEOUT
#: Invalid request
CONFLICT = httplib.CONFLICT
#: Service Unavailable
BUSY = httplib.SERVICE_UNAVAILABLE
class RestResult(object):
"""Result from a REST API operation"""
def __init__(self, response=None, err=None):
"""Initialize a RestResult containing the results from a REST call
:param response: HTTP response
"""
self.response = response
self.error = err
self.data = ""
self.status = 0
if self.response:
self.status = self.response.getcode()
result = self.response.read()
while result:
self.data += result
result = self.response.read()
if self.error:
self.status = self.error.code
self.data = httplib.responses[self.status]
LOG.debug('Response code: %s', self.status)
LOG.debug('Response data: %s', self.data)
def get_header(self, name):
"""Get an HTTP header with the given name from the results
:param name: HTTP header name
:return: The header value or None if no value is found
"""
if self.response is None:
return None
info = self.response.info()
return info.getheader(name)
class RestClientError(Exception):
"""Exception for ZFS REST API client errors"""
def __init__(self, status, name="ERR_INTERNAL", message=None):
"""Create a REST Response exception
:param status: HTTP response status
:param name: The name of the REST API error type
:param message: Descriptive error message returned from REST call
"""
super(RestClientError, self).__init__(message)
self.code = status
self.name = name
self.msg = message
if status in httplib.responses:
self.msg = httplib.responses[status]
def __str__(self):
return "%d %s %s" % (self.code, self.name, self.msg)
class RestClientURL(object):
"""ZFSSA urllib2 client"""
def __init__(self, url, **kwargs):
"""Initialize a REST client.
:param url: The ZFSSA REST API URL
:key session: HTTP Cookie value of x-auth-session obtained from a
normal BUI login.
:key timeout: Time in seconds to wait for command to complete.
(Default is 60 seconds)
"""
self.url = url
self.local = kwargs.get("local", False)
self.base_path = kwargs.get("base_path", "/api")
self.timeout = kwargs.get("timeout", 60)
self.headers = None
if kwargs.get('session'):
self.headers['x-auth-session'] = kwargs.get('session')
self.headers = {"content-type": "application/json"}
self.do_logout = False
self.auth_str = None
def _path(self, path, base_path=None):
"""build rest url path"""
if path.startswith("http://") or path.startswith("https://"):
return path
if base_path is None:
base_path = self.base_path
if not path.startswith(base_path) and not (
self.local and ("/api" + path).startswith(base_path)):
path = "%s%s" % (base_path, path)
if self.local and path.startswith("/api"):
path = path[4:]
return self.url + path
def _authorize(self):
"""Performs authorization setting x-auth-session"""
self.headers['authorization'] = 'Basic %s' % self.auth_str
if 'x-auth-session' in self.headers:
del self.headers['x-auth-session']
try:
result = self.post("/access/v1")
del self.headers['authorization']
if result.status == httplib.CREATED:
self.headers['x-auth-session'] = \
result.get_header('x-auth-session')
self.do_logout = True
LOG.info(_LI('ZFSSA version: %s'),
result.get_header('x-zfssa-version'))
elif result.status == httplib.NOT_FOUND:
raise RestClientError(result.status, name="ERR_RESTError",
message="REST Not Available: \
Please Upgrade")
except RestClientError:
del self.headers['authorization']
raise
def login(self, auth_str):
"""Login to an appliance using a user name and password.
Start a session like what is done logging into the BUI. This is not a
requirement to run REST commands, since the protocol is stateless.
What is does is set up a cookie session so that some server side
caching can be done. If login is used remember to call logout when
finished.
:param auth_str: Authorization string (base64)
"""
self.auth_str = auth_str
self._authorize()
def logout(self):
"""Logout of an appliance"""
result = None
try:
result = self.delete("/access/v1", base_path="/api")
except RestClientError:
pass
self.headers.clear()
self.do_logout = False
return result
def islogin(self):
"""return if client is login"""
return self.do_logout
@staticmethod
def mkpath(*args, **kwargs):
"""Make a path?query string for making a REST request
:cmd_params args: The path part
:cmd_params kwargs: The query part
"""
buf = StringIO.StringIO()
query = "?"
for arg in args:
buf.write("/")
buf.write(arg)
for k in kwargs:
buf.write(query)
if query == "?":
query = "&"
buf.write(k)
buf.write("=")
buf.write(kwargs[k])
return buf.getvalue()
def request(self, path, request, body=None, **kwargs):
"""Make an HTTP request and return the results
:param path: Path used with the initialized URL to make a request
:param request: HTTP request type (GET, POST, PUT, DELETE)
:param body: HTTP body of request
:key accept: Set HTTP 'Accept' header with this value
:key base_path: Override the base_path for this request
:key content: Set HTTP 'Content-Type' header with this value
"""
out_hdrs = dict.copy(self.headers)
if kwargs.get("accept"):
out_hdrs['accept'] = kwargs.get("accept")
if body:
if isinstance(body, dict):
body = str(json.dumps(body))
if body and len(body):
out_hdrs['content-length'] = len(body)
zfssaurl = self._path(path, kwargs.get("base_path"))
req = urllib2.Request(zfssaurl, body, out_hdrs)
req.get_method = lambda: request
maxreqretries = kwargs.get("maxreqretries", 10)
retry = 0
response = None
LOG.debug('Request: %s %s', (request, zfssaurl))
LOG.debug('Out headers: %s', out_hdrs)
if body and body != '':
LOG.debug('Body: %s', body)
while retry < maxreqretries:
try:
response = urllib2.urlopen(req, timeout=self.timeout)
except urllib2.HTTPError as err:
if err.code == httplib.NOT_FOUND:
LOG.debug('REST Not Found: %s', err.code)
else:
LOG.error(_LE('REST Not Available: %s'), err.code)
if err.code == httplib.SERVICE_UNAVAILABLE and \
retry < maxreqretries:
retry += 1
time.sleep(1)
LOG.error(_LE('Server Busy retry request: %s'), retry)
continue
if (err.code == httplib.UNAUTHORIZED or
err.code == httplib.INTERNAL_SERVER_ERROR) and \
'/access/v1' not in zfssaurl:
try:
LOG.error(_LE('Authorizing request: %(zfssaurl)s '
'retry: %(retry)d .'),
{'zfssaurl': zfssaurl, 'retry': retry})
self._authorize()
req.add_header('x-auth-session',
self.headers['x-auth-session'])
except RestClientError:
pass
retry += 1
time.sleep(1)
continue
return RestResult(err=err)
except urllib2.URLError as err:
LOG.error(_LE('URLError: %s'), err.reason)
raise RestClientError(-1, name="ERR_URLError",
message=err.reason)
break
if response and response.getcode() == httplib.SERVICE_UNAVAILABLE and \
retry >= maxreqretries:
raise RestClientError(response.getcode(), name="ERR_HTTPError",
message="REST Not Available: Disabled")
return RestResult(response=response)
def get(self, path, **kwargs):
"""Make an HTTP GET request
:param path: Path to resource.
"""
return self.request(path, "GET", **kwargs)
def post(self, path, body="", **kwargs):
"""Make an HTTP POST request
:param path: Path to resource.
:param body: Post data content
"""
return self.request(path, "POST", body, **kwargs)
def put(self, path, body="", **kwargs):
"""Make an HTTP PUT request
:param path: Path to resource.
:param body: Put data content
"""
return self.request(path, "PUT", body, **kwargs)
def delete(self, path, **kwargs):
"""Make an HTTP DELETE request
:param path: Path to resource that will be deleted.
"""
return self.request(path, "DELETE", **kwargs)
def head(self, path, **kwargs):
"""Make an HTTP HEAD request
:param path: Path to resource.
"""
return self.request(path, "HEAD", **kwargs)
|
|
#!/usr/bin/env python
# Copyright (c) 2009 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A Twitter API client, implemented using remoteobjects.
"""
__version__ = '1.1'
__date__ = '17 April 2009'
__author__ = 'Brad Choate'
import httplib
from optparse import OptionParser
import sys
from urllib import urlencode, quote_plus
from urlparse import urljoin, urlunsplit
from httplib2 import Http
from remoteobjects import RemoteObject, fields, ListObject
class User(RemoteObject):
"""A Twitter account.
A User can be retrieved from ``http://twitter.com/users/show.json`` with
the appropriate ``id``, ``user_id``, or ``screen_name`` parameter.
"""
id = fields.Field()
name = fields.Field()
screen_name = fields.Field()
location = fields.Field()
description = fields.Field()
profile_image_url = fields.Field()
protected = fields.Field()
followers_count = fields.Field()
status = fields.Object('Status')
@classmethod
def get_user(cls, http=None, **kwargs):
url = '/users/show'
if 'id' in kwargs:
url += '/%s.json' % quote_plus(kwargs['id'])
else:
url += '.json'
query = urlencode(filter(lambda x: x in ('screen_name', 'user_id'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class DirectMessage(RemoteObject):
"""A Twitter direct message.
The authenticated user's most recent direct messages are at
``http://twitter.com/direct_messages.json``.
"""
id = fields.Field()
sender_id = fields.Field()
text = fields.Field()
recipient_id = fields.Field()
created_at = fields.Field()
sender_screen_name = fields.Field()
recipient_screen_name = fields.Field()
sender = fields.Object(User)
recipient = fields.Object(User)
def __unicode__(self):
return u"%s: %s" % (self.sender.screen_name, self.text)
class Status(RemoteObject):
"""A Twitter update.
Statuses can be fetched from
``http://twitter.com/statuses/show/<id>.json``.
"""
created_at = fields.Field()
id = fields.Field()
text = fields.Field()
source = fields.Field()
truncated = fields.Field()
in_reply_to_status_id = fields.Field()
in_reply_to_user_id = fields.Field()
in_reply_to_screen_name = fields.Field()
favorited = fields.Field()
user = fields.Object(User)
@classmethod
def get_status(cls, id, http=None):
return cls.get(urljoin(Twitter.endpoint, "/statuses/show/%d.json" % int(id)), http=http)
def __unicode__(self):
return u"%s: %s" % (self.user.screen_name, self.text)
class DirectMessageList(ListObject):
entries = fields.List(fields.Object(DirectMessage))
def __getitem__(self, key):
return self.entries.__getitem__(key)
@classmethod
def get_messages(cls, http=None, **kwargs):
url = '/direct_messages.json'
query = urlencode(filter(lambda x: x in ('since_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
@classmethod
def get_sent_messages(cls, http=None, **kwargs):
url = '/direct_messages/sent.json'
query = urlencode(filter(lambda x: x in ('since_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class UserList(ListObject):
entries = fields.List(fields.Object(User))
def __getitem__(self, key):
return self.entries.__getitem__(key)
@classmethod
def get_friends(cls, http=None, **kwargs):
return cls.get_related("friends", http=http, **kwargs)
@classmethod
def get_followers(cls, http=None, **kwargs):
return cls.get_related("followers", http=http, **kwargs)
@classmethod
def get_related(cls, relation, http=None, **kwargs):
url = '/statuses/%s' % relation
if 'id' in kwargs:
url += '/%s.json' % quote_plus(kwargs['id'])
else:
url += '.json'
query = urlencode(filter(lambda x: x in ('screen_name', 'user_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class Timeline(ListObject):
entries = fields.List(fields.Object(Status))
def __getitem__(self, key):
return self.entries.__getitem__(key)
@classmethod
def public(cls, http=None):
return cls.get(urljoin(Twitter.endpoint, '/statuses/public_timeline.json'), http=http)
@classmethod
def friends(cls, http=None, **kwargs):
query = urlencode(filter(lambda x: x in ('since_id', 'max_id', 'count', 'page'), kwargs))
url = urlunsplit((None, None, '/statuses/friends_timeline.json', query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
@classmethod
def user(cls, http=None, **kwargs):
url = '/statuses/user_timeline'
if 'id' in kwargs:
url += '/%s.json' % quote_plus(kwargs['id'])
else:
url += '.json'
query = urlencode(filter(lambda x: x in ('screen_name', 'user_id', 'since_id', 'max_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
@classmethod
def mentions(cls, http=None, **kwargs):
query = urlencode(filter(lambda x: x in ('since_id', 'max_id', 'page'), kwargs))
url = urlunsplit((None, None, '/statuses/mentions.json', query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class Twitter(Http):
"""A user agent for interacting with Twitter.
Instances of this class are full ``httplib2.Http`` HTTP user agent
objects, but provide convenient convenience methods for interacting with
Twitter and its data objects.
"""
endpoint = 'http://twitter.com/'
def public_timeline(self):
return Timeline.public(http=self)
def friends_timeline(self, **kwargs):
return Timeline.friends(http=self, **kwargs)
def user_timeline(self, **kwargs):
return Timeline.user(http=self, **kwargs)
def show(self, id):
return Status.get_status(id, http=self)
def user(self, id, **kwargs):
return User.get_user(http=self, **kwargs)
def mentions(self, **kwargs):
return Timeline.mentions(http=self, **kwargs)
def friends(self, **kwargs):
return UserList.get_friends(http=self, **kwargs)
def direct_messages_received(self, **kwargs):
return DirectMessageList.get_messages(http=self, **kwargs)
def direct_messages_sent(self, **kwargs):
return DirectMessageList.get_messages_sent(http=self, **kwargs)
def show_public(twitter):
print "## Public timeline ##"
for tweet in twitter.public_timeline():
print unicode(tweet)
def show_dms(twitter):
print "## Direct messages sent to me ##"
for dm in twitter.direct_messages_received():
print unicode(dm)
def show_friends(twitter):
print "## Tweets from my friends ##"
for tweet in twitter.friends_timeline():
print unicode(tweet)
def main(argv=None):
if argv is None:
argv = sys.argv
parser = OptionParser()
parser.add_option("-u", "--username", dest="username",
help="name of user for authentication")
parser.add_option("--public", action="store_const", const=show_public,
dest="action", default=show_public,
help="Show tweets from the public timeline")
parser.add_option("--dms", action="store_const", const=show_dms,
dest="action", help="Show DMs sent to you (requires -u)")
parser.add_option("--friends", action="store_const", const=show_friends,
dest="action", help="Show your friends' recent tweets (requires -u)")
opts, args = parser.parse_args()
twitter = Twitter()
# We'll use regular HTTP authentication, so ask for a password and add
# it in the regular httplib2 way.
if opts.username is not None:
password = raw_input("Password (will echo): ")
twitter.add_credentials(opts.username, password)
try:
print
opts.action(twitter)
print
except httplib.HTTPException, exc:
# The API could be down, or the credentials on an auth-only request
# could be wrong, so show the error to the end user.
print >>sys.stderr, "Error making request: %s: %s" \
% (type(exc).__name__, str(exc))
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding: UTF-8 -*-
import numpy as np
#import caid.cad_geometry as cg
from caid.cad_geometry import line, square
from caid.utils.extraction import splineRefMat
DIM_1D = 1
DIM_2D = 2
# ...
def test1D1():
spl = splineRefMat(DIM_1D)
list_r = list(np.random.random(20))
for r in list_r:
nx = 7
px = 2
geo = line(n=[nx], p=[px])
nrb = geo[0]
knots = nrb.knots[0]
n = nrb.shape[0]
p = nrb.degree[0]
P = nrb.points
dim = P.shape[1]
Q = spl.refineSpline(dim, r, p, n, knots, P)
M = spl.construct([r], p, n, knots)
R = M.dot(nrb.points[:,0])
geo.refine(id=0, list_t=[[r]])
nrb = geo[0]
#print nrb.knots[0]
Q = np.asarray(Q[:,0])
P = np.asarray(nrb.points[:,0])
assert(np.allclose(P,Q))
assert(np.allclose(P,R))
print("test1D1: OK")
# ...
# ...
def test1D2():
spl = splineRefMat(DIM_1D)
# list_r = list(np.random.random(20))
list_r = [0.1,0.2,0.3]
nx = 3
px = 2
geo = line(n=[nx], p=[px])
nrb = geo[0]
knots = nrb.knots[0]
n = nrb.shape[0]
p = nrb.degree[0]
P = nrb.points
M = spl.construct(list_r, p, n, knots)
from scipy.io import mmwrite
mmwrite('M.mtx', M)
R = M.dot(nrb.points[:,0])
geo = line(n=[nx], p=[px])
geo.refine(id=0, list_t=[list_r])
nrb = geo[0]
P = np.asarray(nrb.points[:,0])
assert(np.allclose(P,R))
print("test1D2: OK")
# ...
# ...
def test1D3():
spl = splineRefMat(DIM_1D)
# list_r = list(np.random.random(20))
list_r = [0.25,0.5,0.75]
nx = 3
px = 2
geo = line(n=[nx], p=[px])
nrb = geo[0]
knots = nrb.knots[0]
# nrb = nrb.clone().unclamp(0)
n = nrb.shape[0]
p = nrb.degree[0]
P = nrb.points
M = spl.construct(list_r, p, n, knots)
from scipy.io import mmwrite
mmwrite('M.mtx', M)
R = M.dot(nrb.points[:,0])
geo = line(n=[nx], p=[px])
geo.refine(id=0, list_t=[list_r])
nrb = geo[0]
P = np.asarray(nrb.points[:,0])
assert(np.allclose(P,R))
print("test1D3: OK")
# ...
# ...
def test2D1():
spl = splineRefMat(DIM_1D)
list_r1 = list(np.random.random(20))
list_r2 = list(np.random.random(20))
nx = 10 ; ny = 15
px = 3 ; py = 2
geo = square(n=[nx, ny], p=[px, py])
dim = geo.dim
nrb = geo[0]
u1,u2 = nrb.knots
n1,n2 = nrb.shape
p1,p2 = nrb.degree
M1 = spl.construct(list_r1, p1, n1, u1)
M2 = spl.construct(list_r2, p2, n2, u2)
tM2 = M2.transpose().tocsr()
Px = nrb.points[:,:,0].copy()
Py = nrb.points[:,:,1].copy()
geo.refine(id=0, list_t=[list_r1, list_r2])
nrb = geo[0]
Qx = np.asarray(nrb.points[:,:,0])
Qy = np.asarray(nrb.points[:,:,1])
from scipy.sparse import csr_matrix
list_P = [Px, Py]
list_Q = [Qx, Qy]
# list_P = [Px]
# list_Q = [Qx]
for (U,Q) in zip(list_P, list_Q):
Us = csr_matrix(U).dot(tM2)
tV = M1.dot(Us).todense()
assert(np.allclose(tV, Q))
print("test2D1: OK")
# ...
# ...
def test2D2():
spl = splineRefMat(DIM_2D)
list_r1 = list(np.random.random(20))
list_r2 = list(np.random.random(20))
# list_r1 = [0.1, 0.2]
# list_r2 = [0.9]
nx = 20 ; ny = 31
px = 3 ; py = 2
geo = square(n=[nx, ny], p=[px, py])
n = nx + px + 1 + len(list_r1)
m = ny + py + 1 + len(list_r2)
dim = geo.dim
nrb = geo[0]
u1,u2 = nrb.knots
n1,n2 = nrb.shape
p1,p2 = nrb.degree
H = spl.construct(list_r1, list_r2, p1, p2, n1, n2, u1, u2)
Px = nrb.points[:,:,0].copy()
Py = nrb.points[:,:,1].copy()
geo.refine(id=0, list_t=[list_r1, list_r2])
nrb = geo[0]
Qx = np.asarray(nrb.points[:,:,0])
Qy = np.asarray(nrb.points[:,:,1])
list_P = [Px, Py]
list_Q = [Qx, Qy]
# list_P = [Px]
# list_Q = [Qx]
for (U,Q) in zip(list_P, list_Q):
nU,mU = U.shape
vecU = U.transpose().reshape(nU*mU)
vecP = H.dot(vecU)
P = vecP.reshape((m,n)).transpose()
assert(np.allclose(P, Q))
print("test2D2: OK")
# ...
# ...
def test2D3():
spl = splineRefMat(DIM_2D, useDecouple=True)
list_r1 = list(np.random.random(20))
list_r2 = list(np.random.random(20))
# list_r1 = [0.1, 0.2]
# list_r2 = [0.9]
nx = 20 ; ny = 31
px = 3 ; py = 2
geo = square(n=[nx, ny], p=[px, py])
n = nx + px + 1 + len(list_r1)
m = ny + py + 1 + len(list_r2)
dim = geo.dim
nrb = geo[0]
u1,u2 = nrb.knots
n1,n2 = nrb.shape
p1,p2 = nrb.degree
H1, H2 = spl.construct(list_r1, list_r2, p1, p2, n1, n2, u1, u2)
assert(np.allclose(np.array(H1.shape), np.array((44,24))))
assert(np.allclose(np.array(H2.shape), np.array((54,34))))
print("test2D3: OK")
# ...
###############################################################################
if __name__=="__main__":
test1D1()
test1D2()
test1D3()
test2D1()
test2D2()
test2D3()
|
|
"""
================================================
:mod:`replacers` -- Survivor replacement methods
================================================
This module provides pre-defined replacers for evolutionary computations.
All replacer functions have the following arguments:
- *random* -- the random number generator object
- *population* -- the population of individuals
- *parents* -- the list of parent individuals
- *offspring* -- the list of offspring individuals
- *args* -- a dictionary of keyword arguments
Each replacer function returns the list of surviving individuals.
.. Copyright 2012 Aaron Garrett
.. This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.. You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
.. module:: replacers
.. moduleauthor:: Aaron Garrett <aaron.lee.garrett@gmail.com>
"""
import math
def default_replacement(random, population, parents, offspring, args):
"""Performs no replacement, returning the original population.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
return population
def truncation_replacement(random, population, parents, offspring, args):
"""Replaces population with the best of the population and offspring.
This function performs truncation replacement, which means that
the entire existing population is replaced by the best from among
the current population and offspring, keeping the existing population
size fixed. This is similar to so-called "plus" replacement in the
evolution strategies literature, except that "plus" replacement
considers only parents and offspring for survival. However, if the
entire population are parents (which is often the case in evolution
strategies), then truncation replacement and plus-replacement are
equivalent approaches.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
psize = len(population)
population.extend(list(offspring))
population.sort(reverse=True)
return population[:psize]
def steady_state_replacement(random, population, parents, offspring, args):
"""Performs steady-state replacement for the offspring.
This function performs steady-state replacement, which means that
the offspring replace the least fit individuals in the existing
population, even if those offspring are less fit than the individuals
that they replace.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
population.sort()
num_to_replace = min(len(offspring), len(population))
population[:num_to_replace] = offspring[:num_to_replace]
return population
def generational_replacement(random, population, parents, offspring, args):
"""Performs generational replacement with optional weak elitism.
This function performs generational replacement, which means that
the entire existing population is replaced by the offspring,
truncating to the population size if the number of offspring is
larger. Weak elitism may also be specified through the `num_elites`
keyword argument in args. If this is used, the best `num_elites`
individuals in the current population are allowed to survive if
they are better than the worst `num_elites` offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_elites* -- number of elites to consider (default 0)
"""
num_elites = args.setdefault('num_elites', 0)
population.sort(reverse=True)
offspring.extend(population[:num_elites])
offspring.sort(reverse=True)
survivors = offspring[:len(population)]
return survivors
def random_replacement(random, population, parents, offspring, args):
"""Performs random replacement with optional weak elitism.
This function performs random replacement, which means that
the offspring replace random members of the population, keeping
the population size constant. Weak elitism may also be specified
through the `num_elites` keyword argument in args. If this is used,
the best `num_elites` individuals in the current population are
allowed to survive if they are better than the worst `num_elites`
offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_elites* -- number of elites to consider (default 0)
"""
num_elites = args.setdefault('num_elites', 0)
population.sort(reverse=True)
num_to_replace = min(len(offspring), len(population) - num_elites)
valid_indices = range(num_elites, len(population))
rep_index = random.sample(valid_indices, num_to_replace)
for i, repind in enumerate(rep_index):
population[repind] = offspring[i]
return population
def plus_replacement(random, population, parents, offspring, args):
"""Performs "plus" replacement.
This function performs "plus" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the combined set of parents and
offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
pool = list(offspring)
pool.extend(parents)
pool.sort(reverse=True)
survivors = pool[:len(population)]
return survivors
def comma_replacement(random, population, parents, offspring, args):
"""Performs "comma" replacement.
This function performs "comma" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the offspring. This function
makes the assumption that the size of the offspring is at
least as large as the original population. Otherwise, the
population size will not be constant.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
offspring.sort(reverse=True)
survivors = offspring[:len(population)]
return survivors
def crowding_replacement(random, population, parents, offspring, args):
"""Performs crowding replacement as a form of niching.
This function performs crowding replacement, which means that
the members of the population are replaced one-at-a-time with
each of the offspring. A random sample of `crowding_distance`
individuals is pulled from the current population, and the
closest individual to the current offspring (where "closest"
is determined by the `distance_function`) is replaced by that
offspring, if the offspring is better. It is possible for one
offspring to replace an earlier offspring in the same generation,
given the random sample that is taken of the current survivors
for each offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *distance_function* -- a function that accepts two candidate
solutions and returns the distance between them (default
Euclidean L2 distance)
- *crowding_distance* -- a positive integer representing the
number of closest solutions to consider as a "crowd" (default 2)
"""
def distance(x, y):
return math.sqrt(sum([(a - b)**2 for a, b in zip(x, y)]))
try:
distance_function = args['distance_function']
except KeyError:
distance_function = distance
args['distance_function'] = distance_function
crowding_distance = args.setdefault('crowding_distance', 2)
survivors = population
for o in offspring:
pool = random.sample(survivors, crowding_distance)
closest = min(pool, key=lambda x: distance_function(o.candidate, x.candidate))
if o > closest:
survivors.remove(closest)
survivors.append(o)
return survivors
#-------------------------------------------
# Algorithm-specific Replacement Strategies
#-------------------------------------------
def simulated_annealing_replacement(random, population, parents, offspring, args):
"""Replaces population using the simulated annealing schedule.
This function performs simulated annealing replacement based
on a temperature and a cooling rate. These can be specified
by the keyword arguments `temperature`, which should be the
initial temperature, and `cooling_rate`, which should be the
coefficient by which the temperature is reduced. If these
keyword arguments are not present, then the function will
attempt to base the cooling schedule either on the ratio of
evaluations to the maximum allowed evaluations or on the
ratio of generations to the maximum allowed generations.
Each of these ratios is of the form ``(max - current)/max``
so that the cooling schedule moves smoothly from 1 to 0.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *temperature* -- the initial temperature
- *cooling_rate* -- a real-valued coefficient in the range (0, 1)
by which the temperature should be reduced
"""
try:
temp = args['temperature']
cooling_rate = args['cooling_rate']
temp = temp * cooling_rate
args['temperature'] = temp
except KeyError:
try:
num_evals = args['_ec'].num_evaluations
max_evals = args['max_evaluations']
temp = float(max_evals - num_evals) / float(max_evals)
except KeyError:
num_gens = args['_ec'].num_generations
max_gens = args['max_generations']
temp = 1 - float(max_gens - num_gens) / float(max_gens)
new_pop = []
for p, o in zip(parents, offspring):
if o >= p:
new_pop.append(o)
elif temp > 0 and random.random() < math.exp(-abs(p.fitness - o.fitness) / float(temp)):
new_pop.append(o)
else:
new_pop.append(p)
return new_pop
def nsga_replacement(random, population, parents, offspring, args):
"""Replaces population using the non-dominated sorting technique from NSGA-II.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
survivors = []
combined = list(population)
combined.extend(offspring)
# Perform the non-dominated sorting to determine the fronts.
fronts = []
pop = set(range(len(combined)))
while len(pop) > 0:
front = []
for p in pop:
dominated = False
for q in pop:
if combined[p] < combined[q]:
dominated = True
break
if not dominated:
front.append(p)
fronts.append([dict(individual=combined[f], index=f) for f in front])
pop = pop - set(front)
# Go through each front and add all the elements until doing so
# would put you above the population limit. At that point, fall
# back to the crowding distance to determine who to put into the
# next population. Individuals with higher crowding distances
# (i.e., more distance between neighbors) are preferred.
for i, front in enumerate(fronts):
if len(survivors) + len(front) > len(population):
# Determine the crowding distance.
distance = [0 for _ in range(len(combined))]
individuals = list(front)
num_individuals = len(individuals)
num_objectives = len(individuals[0]['individual'].fitness)
for obj in range(num_objectives):
individuals.sort(key=lambda x: x['individual'].fitness[obj])
distance[individuals[0]['index']] = float('inf')
distance[individuals[-1]['index']] = float('inf')
for i in range(1, num_individuals-1):
distance[individuals[i]['index']] = (distance[individuals[i]['index']] +
(individuals[i+1]['individual'].fitness[obj] -
individuals[i-1]['individual'].fitness[obj]))
crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]
crowd.sort(key=lambda x: x['dist'], reverse=True)
last_rank = [combined[c['index']] for c in crowd]
r = 0
num_added = 0
num_left_to_add = len(population) - len(survivors)
while r < len(last_rank) and num_added < num_left_to_add:
if last_rank[r] not in survivors:
survivors.append(last_rank[r])
num_added += 1
r += 1
# If we've filled out our survivor list, then stop.
# Otherwise, process the next front in the list.
if len(survivors) == len(population):
break
else:
for f in front:
if f['individual'] not in survivors:
survivors.append(f['individual'])
return survivors
def paes_replacement(random, population, parents, offspring, args):
"""Replaces population using the Pareto Archived Evolution Strategy method.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
archive = args['_ec'].archive
archiver = args['_ec'].archiver
survivors = []
for p, o in zip(parents, offspring):
if o == p:
survivors.append(p)
elif o in archive:
survivors.append(p)
elif o > p:
archive = archiver(random, [o], archive, args)
survivors.append(o)
elif o >= p:
for a in archive:
if o > a or o < a:
break
if o >= a:
archive = archiver(random, [o], archive, args)
if o > a or archiver.grid_population[o.grid_location] <= archiver.grid_population[p.grid_location]:
survivors.append(o)
else:
survivors.append(p)
else:
survivors.append(p)
else:
survivors.append(p)
return survivors
|
|
"""Configuration for ACLs."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import netaddr
from faucet import valve_of
from faucet import valve_acl
from faucet.valve_of import MATCH_FIELDS, OLD_MATCH_FIELDS
from faucet.conf import Conf, test_config_condition, InvalidConfigError
from faucet.valve_table import wildcard_table
from ryu.ofproto import ether
class ACL(Conf):
"""Contains the state for an ACL, including the configuration.
ACL Config
ACLs are configured under the 'acls' configuration block. The acls block
contains a dictionary of individual acls each keyed by its name.
Each acl contains a list of rules, a packet will have the first matching rule
applied to it.
Each rule is a dictionary containing the single key 'rule' with the value the
matches and actions for the rule.
The matches are key/values based on the ryu RESTFul API.
The key 'actions' contains a dictionary with keys/values as follows:
* allow (int): if 1 allow the packet to continue through the Faucet \
pipeline, if 0 drop the packet.
* force_port_vlan (int): if 1, do not verify the VLAN/port association \
for this packet and override any VLAN ACL on the forced VLAN.
* meter (str): meter to apply to the packet
* output (dict): used to output a packet directly. details below.
* cookie (int): set flow cookie to this value on this flow
The output action contains a dictionary with the following elements:
* tunnel (dict): the tunnel formation, creates a tunnel from the applied port(s) \
to the specified destination
* port (int or string): the port to output the packet to
* ports (list): a list of the ports (int or string) to output the packet to
* set_fields (list): a list of fields to set with values
* pop_vlans: (int): pop the packet vlan before outputting
* vlan_vid: (int): push the vlan vid on the packet when outputting
* vlan_vids: (list): push the list of vlans on the packet when outputting, with option eth_type
* swap_vid (int): rewrite the vlan vid of the packet when outputting
* failover (dict): Output with a failover port (experimental)
"""
defaults = {
'rules': None,
'exact_match': False,
'dot1x_assigned': False,
}
defaults_types = {
'rules': list,
'exact_match': bool,
'dot1x_assigned': bool,
}
rule_types = {
'cookie': int,
'actions': dict,
'description': str,
}
actions_types = {
'meter': str,
'mirror': (str, int),
'output': (dict, list),
'allow': int,
'force_port_vlan': int,
}
output_actions_types = {
'tunnel': dict,
'port': (str, int),
'ports': list,
'failover': dict,
'set_fields': list,
'pop_vlans': int,
'swap_vid': int,
'vlan_vid': int,
'vlan_vids': list,
}
tunnel_types = {
'type': (str, None),
'tunnel_id': (str, int, None),
'dp': str,
'port': (str, int, None),
'exit_instructions': (list, None),
'maintain_encapsulation': bool,
'bi_directional': bool,
'reverse': bool,
}
mutable_attrs = frozenset(['tunnel_sources'])
def __init__(self, _id, dp_id, conf):
self.rules = []
self.exact_match = None
self.dot1x_assigned = None
self.meter = False
self.matches = {}
self.set_fields = set()
self._ports_resolved = False
# Tunnel info maintains the tunnel output information for each tunnel rule
self.tunnel_dests = {}
# Tunnel sources is a list of the sources in the network for this ACL
self.tunnel_sources = {}
# Tunnel rules is the rules for each tunnel in the ACL for each source
self.dyn_tunnel_rules = {}
self.dyn_reverse_tunnel_rules = {}
for match_fields in (MATCH_FIELDS, OLD_MATCH_FIELDS):
self.rule_types.update({match: (str, int) for match in match_fields})
conf = copy.deepcopy(conf)
if isinstance(conf, dict):
rules = conf.get('rules', [])
elif isinstance(conf, list):
rules = conf
conf = {}
else:
raise InvalidConfigError(
'ACL conf is an invalid type %s' % _id)
conf['rules'] = []
for rule in rules:
normalized_rule = rule
if isinstance(rule, dict):
normalized_rule = rule.get('rule', rule)
if normalized_rule is None:
normalized_rule = {k: v for k, v in rule.items() if v is not None}
test_config_condition(not isinstance(normalized_rule, dict), (
'ACL rule is %s not %s (%s)' % (type(normalized_rule), dict, rules)))
conf['rules'].append(normalized_rule)
super(ACL, self).__init__(_id, dp_id, conf)
def finalize(self):
self._ports_resolved = True
super(ACL, self).finalize()
def check_config(self):
test_config_condition(
not self.rules, 'no rules found for ACL %s' % self._id)
for rule in self.rules:
self._check_conf_types(rule, self.rule_types)
for rule_field, rule_conf in rule.items():
if rule_field == 'cookie':
test_config_condition(
rule_conf < 0 or rule_conf > 2**16,
'rule cookie value must be 0-2**16')
elif rule_field == 'actions':
test_config_condition(
not rule_conf,
'Missing rule actions in ACL %s' % self._id)
self._check_conf_types(rule_conf, self.actions_types)
for action_name, action_conf in rule_conf.items():
if action_name == 'output':
if isinstance(action_conf, (list, tuple)):
# New ordered format
for subconf in action_conf:
# Make sure only one specified action per list element
test_config_condition(
len(subconf) > 1,
'ACL ordered output must have only one action per element')
# Ensure correct action format
self._check_conf_types(subconf, self.output_actions_types)
else:
# Old format
self._check_conf_types(
action_conf, self.output_actions_types)
def build(self, meters, vid, port_num):
"""Check that ACL can be built from config."""
self.matches = {}
self.set_fields = set()
self.meter = False
if self.rules:
try:
ofmsgs = valve_acl.build_acl_ofmsgs(
[self], wildcard_table,
[valve_of.goto_table(wildcard_table)],
[valve_of.goto_table(wildcard_table)],
2**16-1, meters, self.exact_match,
vlan_vid=vid, port_num=port_num)
except (netaddr.core.AddrFormatError, KeyError, ValueError) as err:
raise InvalidConfigError from err
test_config_condition(not ofmsgs, 'OF messages is empty')
for ofmsg in ofmsgs:
try:
valve_of.verify_flowmod(ofmsg)
except (KeyError, ValueError) as err:
raise InvalidConfigError from err
except Exception as err:
raise err
if valve_of.is_flowmod(ofmsg):
apply_actions = []
for inst in ofmsg.instructions:
if valve_of.is_apply_actions(inst):
apply_actions.extend(inst.actions)
elif valve_of.is_meter(inst):
self.meter = True
for action in apply_actions:
if valve_of.is_set_field(action):
self.set_fields.add(action.key)
for match, value in ofmsg.match.items():
has_mask = isinstance(value, (tuple, list))
if has_mask or match not in self.matches:
self.matches[match] = has_mask
for tunnel_rules in self.tunnel_dests.values():
if 'exit_instructions' in tunnel_rules:
exit_inst = tunnel_rules['exit_instructions']
try:
ofmsgs = valve_acl.build_tunnel_ofmsgs(
exit_inst, wildcard_table, 1)
except (netaddr.core.AddrFormatError, KeyError, ValueError) as err:
raise InvalidConfigError from err
test_config_condition(not ofmsgs, 'OF messages is empty')
for ofmsg in ofmsgs:
try:
valve_of.verify_flowmod(ofmsg)
except (KeyError, ValueError) as err:
raise InvalidConfigError from err
except Exception as err:
raise err
if valve_of.is_flowmod(ofmsg):
apply_actions = []
for inst in ofmsg.instructions:
if valve_of.is_apply_actions(inst):
apply_actions.extend(inst.actions)
elif valve_of.is_meter(inst):
self.meter = True
for action in apply_actions:
if valve_of.is_set_field(action):
self.set_fields.add(action.key)
for match, value in ofmsg.match.items():
has_mask = isinstance(value, (tuple, list))
if has_mask or match not in self.matches:
self.matches[match] = has_mask
return (self.matches, self.set_fields, self.meter)
def get_meters(self):
"""Yield meters for each rule in ACL"""
for rule in self.rules:
if 'actions' not in rule or 'meter' not in rule['actions']:
continue
yield rule['actions']['meter']
def get_mirror_destinations(self):
"""Yield mirror destinations for each rule in ACL"""
for rule in self.rules:
if 'actions' not in rule or 'mirror' not in rule['actions']:
continue
yield rule['actions']['mirror']
def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tunnel_objects):
"""Resolve output actions in the ordered list format"""
result = []
for action in output_list:
for key, value in action.items():
if key == 'tunnel':
tunnel = value
# Fetch tunnel items from the tunnel output dict
test_config_condition(
'dp' not in tunnel,
'ACL (%s) tunnel DP not defined' % self._id)
tunnel_dp = tunnel['dp']
tunnel_port = tunnel.get('port', None)
tunnel_id = tunnel.get('tunnel_id', None)
tunnel_type = tunnel.get('type', 'vlan')
tunnel_exit_instructions = tunnel.get('exit_instructions', [])
tunnel_direction = tunnel.get('bi_directional', False)
tunnel_maintain = tunnel.get('maintain_encapsulation', False)
tunnel_reverse = tunnel.get('reverse', False)
test_config_condition(
tunnel_reverse and tunnel_direction,
('Tunnel ACL %s cannot contain values for the fields'
'`bi_directional` and `reverse` at the same time' % self._id))
# Resolve the tunnel items
dst_dp, dst_port, tunnel_id = resolve_tunnel_objects(
tunnel_dp, tunnel_port, tunnel_id)
# Compile the tunnel into an easy-access dictionary
tunnel_dict = {
'dst_dp': dst_dp,
'dst_port': dst_port,
'tunnel_id': tunnel_id,
'type': tunnel_type,
'exit_instructions': tunnel_exit_instructions,
'bi_directional': tunnel_direction,
'maintain_encapsulation': tunnel_maintain,
'reverse': tunnel_reverse,
}
self.tunnel_dests[tunnel_id] = tunnel_dict
result.append({key: tunnel_id})
elif key == 'port':
port_name = value
port = resolve_port_cb(port_name)
test_config_condition(
not port,
'ACL (%s) output port undefined in DP: %s' % (self._id, self.dp_id))
result.append({key: port})
elif key == 'ports':
resolved_ports = [
resolve_port_cb(p) for p in value]
test_config_condition(
None in resolved_ports,
'ACL (%s) output port(s) not defined in DP: %s' % (self._id, self.dp_id))
result.append({key: resolved_ports})
elif key == 'failover':
failover = value
test_config_condition(not isinstance(failover, dict), (
'failover is not a dictionary'))
failover_dict = {}
for failover_name, failover_values in failover.items():
if failover_name == 'ports':
resolved_ports = [
resolve_port_cb(p) for p in failover_values]
test_config_condition(
None in resolved_ports,
'ACL (%s) failover port(s) not defined in DP: %s' % (
self._id, self.dp_id))
failover_dict[failover_name] = resolved_ports
else:
failover_dict[failover_name] = failover_values
result.append({key: failover_dict})
else:
result.append(action)
return result
def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_objects):
"""Resolve the values for output actions in the ACL"""
if isinstance(action_conf, (list, tuple)):
return self._resolve_ordered_output_ports(
action_conf, resolve_port_cb, resolve_tunnel_objects)
result = {}
test_config_condition(
'vlan_vid' in action_conf and 'vlan_vids' in action_conf,
'ACL %s has both vlan_vid and vlan_vids defined' % self._id)
test_config_condition(
'port' in action_conf and 'ports' in action_conf,
'ACL %s has both port and ports defined' % self._id)
for output_action, output_action_values in action_conf.items():
if output_action == 'tunnel':
tunnel = output_action_values
# Fetch tunnel items from the tunnel output dict
test_config_condition(
'dp' not in tunnel,
'ACL (%s) tunnel DP not defined' % self._id)
tunnel_dp = tunnel['dp']
tunnel_port = tunnel.get('port', None)
tunnel_id = tunnel.get('tunnel_id', None)
tunnel_type = tunnel.get('type', 'vlan')
tunnel_exit_instructions = tunnel.get('exit_instructions', [])
tunnel_direction = tunnel.get('bi_directional', False)
tunnel_maintain = tunnel.get('maintain_encapsulation', False)
tunnel_reverse = tunnel.get('reverse', False)
test_config_condition(
tunnel_reverse and tunnel_direction,
('Tunnel ACL %s cannot contain values for the fields'
'`bi_directional` and `reverse` at the same time' % self._id))
# Resolve the tunnel items
dst_dp, dst_port, tunnel_id = resolve_tunnel_objects(
tunnel_dp, tunnel_port, tunnel_id)
# Compile the tunnel into an easy-access dictionary
tunnel_dict = {
'dst_dp': dst_dp,
'dst_port': dst_port,
'tunnel_id': tunnel_id,
'type': tunnel_type,
'exit_instructions': tunnel_exit_instructions,
'bi_directional': tunnel_direction,
'maintain_encapsulation': tunnel_maintain,
'reverse': tunnel_reverse,
}
self.tunnel_dests[tunnel_id] = tunnel_dict
result[output_action] = tunnel_id
elif output_action == 'port':
port_name = output_action_values
port = resolve_port_cb(port_name)
test_config_condition(
not port,
('ACL (%s) output port undefined in DP: %s'\
% (self._id, self.dp_id))
)
result[output_action] = port
elif output_action == 'ports':
resolved_ports = [
resolve_port_cb(p) for p in output_action_values]
test_config_condition(
None in resolved_ports,
('ACL (%s) output port(s) not defined in DP: %s'\
% (self._id, self.dp_id))
)
result[output_action] = resolved_ports
elif output_action == 'failover':
failover = output_action_values
test_config_condition(not isinstance(failover, dict), (
'failover is not a dictionary'))
result[output_action] = {}
for failover_name, failover_values in failover.items():
if failover_name == 'ports':
resolved_ports = [
resolve_port_cb(p) for p in failover_values]
test_config_condition(
None in resolved_ports,
('ACL (%s) failover port(s) not defined in DP: %s'\
% (self._id, self.dp_id))
)
result[output_action][failover_name] = resolved_ports
else:
result[output_action][failover_name] = failover_values
else:
result[output_action] = output_action_values
return result
def resolve_ports(self, resolve_port_cb, resolve_tunnel_objects):
"""Resolve the values for the actions of an ACL"""
if self._ports_resolved:
return
for rule_conf in self.rules:
if 'actions' in rule_conf:
actions_conf = rule_conf['actions']
resolved_actions = {}
test_config_condition(not isinstance(actions_conf, dict), (
'actions value is not a dictionary'))
for action_name, action_conf in actions_conf.items():
if action_name == 'mirror':
resolved_port = resolve_port_cb(action_conf)
test_config_condition(
resolved_port is None,
('ACL (%s) mirror port is not defined in DP: %s'\
% (self._id, self.dp_id))
)
resolved_actions[action_name] = resolved_port
elif action_name == 'output':
resolved_action = self._resolve_output_ports(
action_conf, resolve_port_cb, resolve_tunnel_objects)
resolved_actions[action_name] = resolved_action
else:
resolved_actions[action_name] = action_conf
rule_conf['actions'] = resolved_actions
self._ports_resolved = True
def requires_reverse_tunnel(self, tunnel_id):
"""Returns true if the tunnel requires a reverse pathway"""
return self.tunnel_dests[tunnel_id]['bi_directional']
def get_num_tunnels(self):
"""Returns the number of tunnels specified in the ACL"""
num_tunnels = 0
for rule_conf in self.rules:
if self.does_rule_contain_tunnel(rule_conf):
output_conf = rule_conf['actions']['output']
if isinstance(output_conf, list):
for action in output_conf:
for key in action:
if key == 'tunnel':
num_tunnels += 1
else:
if 'tunnel' in output_conf:
num_tunnels += 1
return num_tunnels
def get_tunnel_rules(self, tunnel_id):
"""Return the list of rules that apply a specific tunnel ID"""
rules = []
for rule_conf in self.rules:
if self.does_rule_contain_tunnel(rule_conf):
output_conf = rule_conf['actions']['output']
if isinstance(output_conf, (list, tuple)):
for action in output_conf:
for key, value in action.items():
if key == 'tunnel' and value == tunnel_id:
rules.append(rule_conf)
continue
else:
if output_conf['tunnel'] == tunnel_id:
rules.append(rule_conf)
return rules
def does_rule_contain_tunnel(self, rule_conf):
"""Return true if the ACL rule contains a tunnel"""
if 'actions' in rule_conf:
if 'output' in rule_conf['actions']:
output_conf = rule_conf['actions']['output']
if isinstance(output_conf, (list, tuple)):
for action in output_conf:
for key in action:
if key == 'tunnel':
return True
else:
if 'tunnel' in output_conf:
return True
return False
def is_tunnel_acl(self):
"""Return true if the ACL contains a tunnel"""
if self.tunnel_dests:
return True
for rule_conf in self.rules:
if self.does_rule_contain_tunnel(rule_conf):
return True
return False
def _tunnel_source_id(self, source):
"""Return ID for a tunnel source."""
return tuple(sorted(source.items()))
def add_tunnel_source(self, dp, port, reverse=False, bi_directional=False):
"""Add a source dp/port pair for the tunnel ACL"""
source = {'dp': dp, 'port': port, 'reverse': reverse, 'bi_directional': bi_directional}
source_id = self._tunnel_source_id(source)
self.tunnel_sources[source_id] = source
for _id in self.tunnel_dests:
self.dyn_tunnel_rules.setdefault(_id, {})
self.dyn_reverse_tunnel_rules.setdefault(_id, {})
def verify_tunnel_rules(self):
"""Make sure that matches & set fields are configured correctly to handle tunnels"""
if 'eth_type' not in self.matches:
self.matches['eth_type'] = False
if 'in_port' not in self.matches:
self.matches['in_port'] = False
if 'vlan_vid' not in self.matches:
self.matches['vlan_vid'] = False
if 'vlan_vid' not in self.set_fields:
self.set_fields.add('vlan_vid')
if 'vlan_pcp' not in self.matches:
self.matches['vlan_pcp'] = False
if 'vlan_pcp' not in self.set_fields:
self.set_fields.add('vlan_pcp')
def update_reverse_tunnel_rules(self, curr_dp, source_id, tunnel_id, out_port, output_table):
"""Update the tunnel rulelist for when the output port has changed (reverse direction)"""
if not self.requires_reverse_tunnel(tunnel_id):
return False
dst_dp = self.tunnel_sources[source_id]['dp']
src_dp = self.tunnel_dests[tunnel_id]['dst_dp']
prev_list = self.dyn_reverse_tunnel_rules[tunnel_id].get(source_id, [])
new_list = []
if curr_dp == src_dp and curr_dp != dst_dp:
# SRC DP: vlan_vid, vlan_pcp, actions=[out_port]
# NOTE: For the bi_directional reverse tunnel, we assume that
# the packet already has the required encapsulation
new_list = [{'port': out_port}]
elif curr_dp == dst_dp and curr_dp != src_dp:
# DST DP: vlan_vid, vlan_pcp, actions=[pop_vlans, output]
new_list = [{'pop_vlans': 1}]
if out_port is None:
# DP dest tunnel, so we fall through into the eth_dst output table
new_list.append({'goto': output_table.table_id})
else:
# Tunnel has port specified, so output to destination
new_list.append({'port': out_port})
elif curr_dp == src_dp and curr_dp == dst_dp:
# SINGLE DP: actions=[pop_vlans, out_port]
new_list = [{'pop_vlans': 1}]
if out_port is None:
# DP dest tunnel, so we fall through into the eth_dst output table
new_list.extend([{'goto': output_table.table_id}])
else:
# Tunnel has port specified, so output to destination
new_list.extend([{'port': out_port}])
else:
# TRANSIT DP: vlan_vid, vlan_pcp, actions=[output]
new_list = [{'port': out_port}]
if new_list != prev_list:
self.dyn_reverse_tunnel_rules[tunnel_id][source_id] = new_list
return True
return True
def update_source_tunnel_rules(self, curr_dp, source_id, tunnel_id, out_port, output_table):
"""Update the tunnel rulelist for when the output port has changed"""
src_dp = self.tunnel_sources[source_id]['dp']
dst_dp = self.tunnel_dests[tunnel_id]['dst_dp']
prev_list = self.dyn_tunnel_rules[tunnel_id].get(source_id, [])
new_list = []
pcp_flag = valve_of.PCP_TUNNEL_FLAG
if self.tunnel_dests[tunnel_id]['reverse']:
pcp_flag = valve_of.PCP_TUNNEL_REVERSE_DIRECTION_FLAG
if curr_dp == src_dp and curr_dp != dst_dp:
# SRC DP: in_port, actions=[push_vlan, output, pop_vlans]
# Ideally, we would be able to detect if the tunnel has an `allow` action clause.
# However, this is difficult as a single ACL can have multiple rules using the same
# tunnel, but with one instance requiring the `allow` clause and another, not.
# This means it is easier to always append the `pop_vlans` in assumption that the
# `allow` action does exist, and then optimize/reduce the redundant rules before
# outputting the flowrule.
# We also set the tunnel VLAN header with a PCP value indicating that we are in
# the tunnel, which will save the VLANs from being reserved.
new_list = [
{'vlan_vids': [{'vid': tunnel_id, 'eth_type': ether.ETH_TYPE_8021Q}]},
{'set_fields': [{'vlan_pcp': pcp_flag}]},
{'port': out_port},
{'pop_vlans': 1}]
elif curr_dp == dst_dp and curr_dp != src_dp:
# DST DP: in_port, vlan_vid, actions=[pop_vlan, additional_instructions, output]
# If exit_instructions are applied, then we want to pop off the tunnel
# VLAN header, then apply the additional instructions, then output
if self.tunnel_dests[tunnel_id]['maintain_encapsulation']:
# We wish to maintain tunnel encapsulation before outputting
# So do not add the pop_vlans rule
new_list = []
else:
new_list = [{'pop_vlans': 1}]
exit_instructions = self.tunnel_dests[tunnel_id].get('exit_instructions', [])
new_list.extend(copy.copy(list(exit_instructions)))
if out_port is None:
# DP dest tunnel, so we fall through into the eth_dst output table
new_list.append({'goto': output_table.table_id})
else:
# Tunnel has port specified, so output to destination
new_list.append({'port': out_port})
elif curr_dp == src_dp and curr_dp == dst_dp:
# SINGLE DP: in_port, actions=[additional_instructions, out_port]
exit_instructions = self.tunnel_dests[tunnel_id].get('exit_instructions', [])
new_list.extend(copy.copy(list(exit_instructions)))
if self.tunnel_dests[tunnel_id].get('maintain_encapsulation', False):
# Maintain encapsulation implies we want the tunnel VID on the packet,
# so ensure it is purposefully put onto the packet, even when
# there would originally be no need to push on a tunnel VID
new_list.extend([
{'vlan_vids': [{'vid': tunnel_id, 'eth_type': ether.ETH_TYPE_8021Q}]},
{'set_fields': [{'vlan_pcp': pcp_flag}]}])
if out_port is None:
# DP dest tunnel, so we fall through into the eth_dst output table
new_list.extend([{'goto': output_table.table_id}])
else:
# Tunnel has port specified, so output to destination
new_list.extend([{'port': out_port}])
else:
# TRANSIT DP: in_port, vlan_vid, actions=[output]
new_list = [{'port': out_port}]
if new_list != prev_list:
self.dyn_tunnel_rules[tunnel_id][source_id] = new_list
return True
return True
# NOTE: 802.1x steals the port ACL table.
PORT_ACL_8021X = ACL(
'port_acl_8021x', 0,
{'rules': [
{'eth_type': 1, 'eth_src': '01:02:03:04:05:06', 'actions': {'output': {
'port': valve_of.ofp.OFPP_LOCAL, 'set_fields': [
{'eth_src': '01:02:03:04:05:06'}, {'eth_dst': '01:02:03:04:05:06'}]}}}]})
PORT_ACL_8021X.build({}, None, 1)
MAB_ACL_8021X = ACL(
'mab_acl_8021x', 0,
{'rules': [{
'eth_type': valve_of.ether.ETH_TYPE_IP, 'eth_src': '01:02:03:04:05:06',
'ip_proto': valve_of.inet.IPPROTO_UDP, 'udp_src': 68, 'udp_dst': 67,
'actions': {'output': {'port': valve_of.ofp.OFPP_LOCAL}}}]})
MAB_ACL_8021X.build({}, None, 1)
|
|
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import os
import socket
import time
import traceback
import zipfile
from oslo.serialization import jsonutils
import muranoclient.common.exceptions as exceptions
import requests
from fuel_health.common.utils.data_utils import rand_name
import fuel_health.nmanager
LOG = logging.getLogger(__name__)
class MuranoTest(fuel_health.nmanager.PlatformServicesBaseClass):
"""Manager that provides access to the Murano python client for
calling Murano API.
"""
@classmethod
def setUpClass(cls):
super(MuranoTest, cls).setUpClass()
cls.packages = []
cls.environments = []
def setUp(self):
super(MuranoTest, self).setUp()
self.check_clients_state()
self.env_name = rand_name("ostf_test-Murano_env")
if not self.config.compute.compute_nodes:
self.skipTest('There are no compute nodes to run tests')
self.min_required_ram_mb = 4096
self.murano_available = True
self.endpoint = self.config.murano.api_url + '/v1/'
self.headers = {'X-Auth-Token': self.murano_client.auth_token,
'content-type': 'application/json'}
try:
self.list_environments()
except exceptions.CommunicationError:
self.murano_available = False
self.skipTest("Murano service is not available")
def tearDown(self):
"""This method allows to clean up the OpenStack environment
after the Murano OSTF tests.
"""
if self.murano_available:
if self.environments:
for environment_id in self.environments:
try:
self.delete_environment(environment_id)
except Exception:
LOG.warning(traceback.format_exc())
if self.packages:
for package in self.packages:
try:
self.delete_package(package.id)
except Exception:
LOG.warning(traceback.format_exc())
super(MuranoTest, self).tearDown()
def zip_dir(self, parent_dir, app_dir):
"""This method allows to zip directory with application
:param parent_dir: Directory, where application lives
:param app_dir: Directory with application
:return:
"""
abs_path = os.path.join(parent_dir, app_dir)
path_len = len(abs_path) + 1
zip_file = abs_path + ".zip"
with contextlib.closing(zipfile.ZipFile(zip_file, "w")) as zf:
for dir_name, _, files in os.walk(abs_path):
for filename in files:
fn = os.path.join(dir_name, filename)
zf.write(fn, fn[path_len:])
return zip_file
def find_murano_image(self, image_type):
"""This method allows to find Windows images with Murano tag.
Returns the image object or None
image_type should be in [linux, windows.2012, cirros.demo]
"""
tag = 'murano_image_info'
for image in self.compute_client.images.list():
if tag in image.metadata:
metadata = jsonutils.loads(image.metadata[tag])
if image_type == metadata['type']:
return image
def list_environments(self):
"""This method allows to get the list of environments.
Returns the list of environments.
"""
resp = requests.get(self.endpoint + 'environments',
headers=self.headers)
return resp.json()
def create_environment(self, name):
"""This method allows to create environment.
Input parameters:
name - Name of new environment
Returns new environment.
"""
environment = self.murano_client.environments.create({'name': name})
self.environments.append(environment.id)
return environment
def get_environment(self, environment_id):
"""This method allows to get specific environment by ID.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment (optional)
Returns specific environment.
"""
return self.murano_client.environments.get(environment_id)
def update_environment(self, environment_id, new_name):
"""This method allows to update specific environment by ID.
Input parameters:
environment_id - ID of environment
new_name - New name for environment
Returns new environment.
"""
return self.murano_client.environments.update(environment_id, new_name)
def delete_environment(self, environment_id):
"""This method allows to delete specific environment by ID.
Input parameters:
environment_id - ID of environment
Returns None.
"""
self.murano_client.environments.delete(environment_id)
return self.environments.remove(environment_id)
def environment_delete_check(self, environment_id, timeout=60):
resp = requests.get('{0}environments/{1}'.format(self.endpoint,
environment_id),
headers=self.headers)
self.delete_environment(environment_id)
point = time.time()
while resp.status_code == 200:
if time.time() - point > timeout:
self.fail("Can't delete environment more than {0} seconds".
format(timeout))
resp = requests.get('{0}environments/{1}'.format(self.endpoint,
environment_id),
headers=self.headers)
try:
env = resp.json()
if env["status"] == "delete failure":
self.fail("Environment status: {0}".format(env["status"]))
except Exception:
LOG.debug("Failed to get environment status "
"or environment no more exists")
time.sleep(5)
def create_session(self, environment_id):
"""This method allows to create session for environment.
Input parameters:
environment_id - ID of environment
Returns new session.
"""
return self.murano_client.sessions.configure(environment_id)
def get_session(self, environment_id, session_id):
"""This method allows to get specific session.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment
Returns specific session.
"""
return self.murano_client.sessions.get(environment_id, session_id)
def delete_session(self, environment_id, session_id):
"""This method allows to delete session for environment.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment
Returns None.
"""
return self.murano_client.sessions.delete(environment_id, session_id)
def deploy_session(self, environment_id, session_id):
"""This method allows to deploy session for environment.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment
Returns specific session.
"""
endpoint = '{0}environments/{1}/sessions/{2}/deploy'.format(
self.endpoint, environment_id, session_id)
return requests.post(endpoint, data=None, headers=self.headers)
def create_service(self, environment_id, session_id, json_data):
"""This method allows to create service.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment
json_data - JSON with service description
Returns specific service.
"""
headers = self.headers.copy()
headers.update({'x-configuration-session': session_id})
endpoint = '{0}environments/{1}/services'.format(self.endpoint,
environment_id)
return requests.post(endpoint, data=jsonutils.dumps(json_data),
headers=headers).json()
def list_services(self, environment_id, session_id=None):
"""This method allows to get list of services.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment (optional)
Returns list of services.
"""
return self.murano_client.services.get(environment_id, '/', session_id)
def get_service(self, environment_id, session_id, service_id):
"""This method allows to get service by ID.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment
service_id - ID of service in this environment
Returns specific service.
"""
return self.murano_client.services.get(environment_id,
'/{0}'.format(service_id),
session_id)
def delete_service(self, environment_id, session_id, service_id):
"""This method allows to delete specific service.
Input parameters:
environment_id - ID of environment
session_id - ID of session for this environment
service_id - ID of service in this environment
Returns None.
"""
return self.murano_client.services.delete(environment_id,
'/{0}'.format(service_id),
session_id)
def deploy_check(self, environment):
"""This method allows to wait for deployment of Murano evironments.
Input parameters:
environment - Murano environment
Returns environment.
"""
environment = self.get_environment(environment.id)
while environment.status != 'ready':
time.sleep(5)
environment = self.get_environment(environment.id)
if environment.status == 'deploy failure':
LOG.error(
'Environment has incorrect status'
' %s' % environment.status)
self.fail(
'Environment has incorrect status'
' %s .' % environment.status)
return environment
def deployments_status_check(self, environment_id):
"""This method allows to check that deployment status is 'success'.
Input parameters:
environment_id - ID of environment
Returns 'OK'.
"""
endpoint = '{0}environments/{1}/deployments'.format(self.endpoint,
environment_id)
deployments = requests.get(endpoint,
headers=self.headers).json()['deployments']
for deployment in deployments:
# Save the information about all deployments
LOG.debug("Environment state: {0}".format(deployment['state']))
r = requests.get('{0}/{1}'.format(endpoint, deployment['id']),
headers=self.headers).json()
LOG.debug("Reports: {0}".format(r))
self.assertEqual('success', deployment['state'])
return 'OK'
def check_port_access(self, ip, port):
result = 1
start_time = time.time()
while time.time() - start_time < 600:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((str(ip), port))
sock.close()
if result == 0:
break
time.sleep(5)
self.assertEqual(0, result, '%s port is closed on instance' % port)
def port_status_check(self, environment, configurations):
"""Function which gives opportunity to check multiple instances
:param environment: Murano environment
:param configurations: Array of configurations.
Example: [[instance_name, *ports], [instance_name, *ports]] ...
"""
for configuration in configurations:
inst_name = configuration[0]
ports = configuration[1:]
ip = self.get_ip_by_instance_name(environment, inst_name)
if ip and ports:
for port in ports:
self.check_port_access(ip, port)
else:
self.fail('Instance does not have floating IP')
def get_ip_by_instance_name(self, environment, inst_name):
"""Returns ip of instance using instance name
:param environment: Murano environment
:param name: String, which is substring of name of instance or name of
instance
:return:
"""
for service in environment.services:
if inst_name in service['instance']['name']:
return service['instance']['floatingIpAddress']
def get_list_packages(self):
try:
packages = self.murano_client.packages.list()
except exceptions.ClientException:
self.fail("Can not get list of packages")
packages_list = list(packages)
LOG.debug('Packages List: {0}'.format(packages_list))
self.assertIsInstance(packages_list, list)
return packages_list
def generate_fqn_list(self):
fqn_list = []
packages = self.get_list_packages()
for package in packages:
fqn_list.append(package.to_dict()['fully_qualified_name'])
LOG.debug('FQN List: {0}'.format(fqn_list))
return fqn_list
def upload_package(self, package_name, body, app):
files = {'%s' % package_name: open(app, 'rb')}
package = self.murano_client.packages.create(body, files)
self.packages.append(package)
return package
def package_exists(self, *packages):
fqn_list = self.generate_fqn_list()
LOG.debug("Response for packages is {0}".format(fqn_list))
for package in packages:
if package not in fqn_list:
return False
return True
def get_package(self, package_id):
resp = requests.get(self.endpoint + 'catalog/packages/{0}'.
format(package_id), headers=self.headers)
self.assertEqual(200, resp.status_code)
return resp.json()
def get_package_by_fqdn(self, package_name):
resp = requests.get(self.endpoint + 'catalog/packages',
headers=self.headers)
for package in resp.json()["packages"]:
if package["fully_qualified_name"] == package_name:
return package
def delete_package(self, package_id):
resp = requests.delete(self.endpoint + 'catalog/packages/{0}'.
format(package_id), headers=self.headers)
try:
self.assertEqual(200, resp.status_code)
except Exception:
self.assertEqual(404, resp.status_code)
LOG.debug("Package not exists.")
def get_list_categories(self):
resp = requests.get(self.endpoint + 'catalog/packages/categories',
headers=self.headers)
self.assertEqual(200, resp.status_code)
self.assertIsInstance(resp.json()['categories'], list)
def check_path(self, env, path, inst_name=None):
environment = env.manager.get(env.id)
if inst_name:
ip = self.get_ip_by_instance_name(environment, inst_name)
else:
ip = environment.services[0]['instance']['floatingIpAddress']
resp = requests.get('http://{0}/{1}'.format(ip, path))
if resp.status_code == 200:
pass
else:
self.fail("Service path unavailable")
|
|
"""Code heatmap module."""
import inspect
import fnmatch
import os
import runpy
import sys
import time
from collections import defaultdict
from collections import deque
from vprof import base_profiler
_STDLIB_PATHS = [
os.path.abspath(path) for path in sys.path
if os.path.isdir(path) and path.startswith(sys.prefix)]
def check_standard_dir(module_path):
"""Checks whether path belongs to standard library or installed modules."""
if 'site-packages' in module_path:
return True
for stdlib_path in _STDLIB_PATHS:
if fnmatch.fnmatchcase(module_path, stdlib_path + '*'):
return True
return False
class _CodeHeatmapCalculator:
"""Calculates Python code heatmap.
Class that contains all logic related to calculating code heatmap
for a Python program.
"""
def __init__(self):
self.original_trace_function = sys.gettrace()
self.prev_lineno = None
self.prev_timestamp = None
self.prev_path = None
self.lines = deque()
self._execution_count = defaultdict(lambda: defaultdict(int))
self._heatmap = defaultdict(lambda: defaultdict(float))
def __enter__(self):
"""Enables heatmap calculator."""
sys.settrace(self.record_line)
return self
def __exit__(self, exc_type, exc_val, exc_tbf):
"""Disables heatmap calculator."""
sys.settrace(self.original_trace_function)
if self.prev_timestamp:
runtime = time.time() - self.prev_timestamp
self.lines.append([self.prev_path, self.prev_lineno, runtime])
def record_line(self, frame, event, arg): # pylint: disable=unused-argument
"""Records line execution time."""
if event == 'line':
if self.prev_timestamp:
runtime = time.time() - self.prev_timestamp
self.lines.append([self.prev_path, self.prev_lineno, runtime])
self.prev_lineno = frame.f_lineno
self.prev_path = frame.f_code.co_filename
self.prev_timestamp = time.time()
return self.record_line
@property
def lines_without_stdlib(self):
"""Filters code from standard library from self.lines."""
prev_line = None
current_module_path = inspect.getabsfile(inspect.currentframe())
for module_path, lineno, runtime in self.lines:
module_abspath = os.path.abspath(module_path)
if not prev_line:
prev_line = [module_abspath, lineno, runtime]
else:
if (not check_standard_dir(module_path) and
module_abspath != current_module_path):
yield prev_line
prev_line = [module_abspath, lineno, runtime]
else:
prev_line[2] += runtime
yield prev_line
def fill_heatmap(self):
"""Fills code heatmap and execution count dictionaries."""
for module_path, lineno, runtime in self.lines_without_stdlib:
self._execution_count[module_path][lineno] += 1
self._heatmap[module_path][lineno] += runtime
@property
def heatmap(self):
"""Returns heatmap with absolute path names."""
if not self._heatmap:
self.fill_heatmap()
return self._heatmap
@property
def execution_count(self):
"""Returns execution count map with absolute path names."""
if not self._execution_count:
self.fill_heatmap()
return self._execution_count
class CodeHeatmapProfiler(base_profiler.BaseProfiler):
"""Code heatmap wrapper."""
SKIP_LINES = 10
MIN_SKIP_SIZE = 100
def _calc_skips(self, heatmap, num_lines):
"""Calculates skip map for large sources.
Skip map is a list of tuples where first element of tuple is a line
number and second is a length of the skip region:
[(1, 10), (15, 10)] means skipping 10 lines after line 1 and
10 lines after line 15.
"""
if num_lines < self.MIN_SKIP_SIZE:
return []
skips, prev_line = [], 0
for line in sorted(heatmap):
curr_skip = line - prev_line - 1
if curr_skip > self.SKIP_LINES:
skips.append((prev_line, curr_skip))
prev_line = line
if num_lines - prev_line > self.SKIP_LINES:
skips.append((prev_line, num_lines - prev_line))
return skips
@staticmethod
def _skip_lines(src_code, skip_map):
"""Skips lines in src_code specified by a skip map."""
if not skip_map:
return [['line', j + 1, l] for j, l in enumerate(src_code)]
code_with_skips, i = [], 0
for line, length in skip_map:
code_with_skips.extend(
['line', i + j + 1, l] for j, l in enumerate(src_code[i:line]))
if (code_with_skips
and code_with_skips[-1][0] == 'skip'): # Merge skips.
code_with_skips[-1][1] += length
else:
code_with_skips.append(['skip', length])
i = line + length
code_with_skips.extend(
['line', i + j + 1, l] for j, l in enumerate(src_code[i:]))
return code_with_skips
def _profile_package(self):
"""Calculates heatmap for a package."""
with _CodeHeatmapCalculator() as prof:
try:
runpy.run_path(self._run_object, run_name='__main__')
except SystemExit:
pass
heatmaps = []
for filename, heatmap in prof.heatmap.items():
if os.path.isfile(filename):
heatmaps.append(
self._format_heatmap(
filename, heatmap, prof.execution_count[filename]))
run_time = sum(heatmap['runTime'] for heatmap in heatmaps)
return {
'objectName': self._run_object,
'runTime': run_time,
'heatmaps': heatmaps
}
def profile_package(self):
"""Runs package profiler in a separate process."""
return base_profiler.run_in_separate_process(self._profile_package)
def _format_heatmap(self, filename, heatmap, execution_count):
"""Formats heatmap for UI."""
with open(filename) as src_file:
file_source = src_file.read().split('\n')
skip_map = self._calc_skips(heatmap, len(file_source))
run_time = sum(time for time in heatmap.values())
return {
'name': filename,
'heatmap': heatmap,
'executionCount': execution_count,
'srcCode': self._skip_lines(file_source, skip_map),
'runTime': run_time
}
def _profile_module(self):
"""Calculates heatmap for a module."""
with open(self._run_object, 'r') as srcfile:
src_code = srcfile.read()
code = compile(src_code, self._run_object, 'exec')
try:
with _CodeHeatmapCalculator() as prof:
exec(code, self._globs, None)
except SystemExit:
pass
heatmaps = []
for filename, heatmap in prof.heatmap.items():
if os.path.isfile(filename):
heatmaps.append(
self._format_heatmap(
filename, heatmap, prof.execution_count[filename]))
run_time = sum(heatmap['runTime'] for heatmap in heatmaps)
return {
'objectName': self._run_object,
'runTime': run_time,
'heatmaps': heatmaps
}
def profile_module(self):
"""Runs module profiler in a separate process."""
return base_profiler.run_in_separate_process(self._profile_module)
def profile_function(self):
"""Calculates heatmap for a function."""
with _CodeHeatmapCalculator() as prof:
result = self._run_object(*self._run_args, **self._run_kwargs)
code_lines, start_line = inspect.getsourcelines(self._run_object)
source_lines = []
for line in code_lines:
source_lines.append(('line', start_line, line))
start_line += 1
filename = os.path.abspath(inspect.getsourcefile(self._run_object))
heatmap = prof.heatmap[filename]
run_time = sum(time for time in heatmap.values())
return {
'objectName': self._object_name,
'runTime': run_time,
'result': result,
'timestamp': int(time.time()),
'heatmaps': [{
'name': self._object_name,
'heatmap': heatmap,
'executionCount': prof.execution_count[filename],
'srcCode': source_lines,
'runTime': run_time
}]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.